使用JavaScript修剪音频文件(前3秒)

2eafrhcq  于 12个月前  发布在  Java
关注(0)|答案(2)|浏览(172)

我有一个问题,我可以修剪我的音频文件是通过JavaScript记录?就像我想修剪前3秒。我使用p5.js录制了音频文件,并使用AudioContext()将录制的文件与卡拉OK音频合并,我想修剪它,因为开始时声音不舒服。

cmssoen2

cmssoen21#

您可能需要使用类似AudioContext.decodeAudioData()的东西将音频读取到AudioBuffer中,将AudioBuffer插入AudioBufferSourceNode。然后,您可以使用AudioBufferSourceNode.start()的offset参数跳过前3秒,并记录结果输出流。
示例代码:

var source = audioCtx.createBufferSource();
var dest = audioCtx.createMediaStreamDestination();
var mediaRecorder = new MediaRecorder(dest.stream);

var request = new XMLHttpRequest();
request.open('GET', 'your.ogg', true);
request.responseType = 'arraybuffer';

request.onload = function() {
  var audioData = request.response;
  audioCtx.decodeAudioData(
    audioData,
    function(buffer) {
      source.buffer = buffer;
      source.connect(dest);
      mediaRecorder.start();
      source.start(audioCtx.currentTime, 3);
      // etc...
    },
    function(e){ 
      console.log("Error with decoding audio data" + e.err);
    }
  );

}

request.send();
jogvjijk

jogvjijk2#

<!DOCTYPE html>
<html>
<head>
    <title>Audio Trimmer</title>
</head>
<body>
    <input type="file" id="audioFile" accept="audio/*">
    <button onclick="trimAudio()">Trim Audio</button>

    <script>
        async function trimAudio() {
            const audioFileInput = document.getElementById('audioFile');
            const audioFile = audioFileInput.files[0];
            const startTime = 10; // Start time in seconds
            const endTime = 30; // End time in seconds

            if (audioFile) {
                try {
                    const audioContext = new (window.AudioContext || window.webkitAudioContext)();
                    const fileBuffer = await audioFile.arrayBuffer();
                    const audioBuffer = await audioContext.decodeAudioData(fileBuffer);

                    // Trim the audio buffer
                    const trimmedBuffer = trimAudioBuffer(audioBuffer, startTime, endTime);

                    // Create a Blob from the trimmed buffer
                    const trimmedAudioBlob = await createBlobFromAudioBuffer(trimmedBuffer);

                    // Create an <audio> element to play the trimmed audio
                    const audioElement = document.createElement('audio');
                    audioElement.controls = true;
                    const audioUrl = URL.createObjectURL(trimmedAudioBlob);
                    audioElement.src = audioUrl;

                    document.body.appendChild(audioElement);
                } catch (error) {
                    console.error('Error trimming audio:', error);
                }
            } else {
                console.error('Please select an audio file.');
            }
        }

        function trimAudioBuffer(buffer, startTime, endTime) {
            const sampleRate = buffer.sampleRate;
            const startFrame = startTime * sampleRate;
            const endFrame = endTime * sampleRate;
            const duration = endTime - startTime;
            const channels = buffer.numberOfChannels;

            // Create a new AudioBuffer for the trimmed audio
            const trimmedBuffer = audioContext.createBuffer(
                channels,
                (endFrame - startFrame),
                sampleRate
            );

            for (let channel = 0; channel < channels; channel++) {
                const sourceData = buffer.getChannelData(channel).subarray(startFrame, endFrame);
                trimmedBuffer.getChannelData(channel).set(sourceData);
            }

            return trimmedBuffer;
        }

        function createBlobFromAudioBuffer(audioBuffer) {
            return new Promise((resolve) => {
                const audioContext = new (window.AudioContext || window.webkitAudioContext)();
                const audioBufferSource = audioContext.createBufferSource();
                audioBufferSource.buffer = audioBuffer;

                audioBufferSource.connect(audioContext.destination);

                audioBufferSource.onended = () => {
                    const audioData = audioBuffer.getChannelData(0);
                    const audioBlob = new Blob([audioData], { type: 'audio/wav' });
                    resolve(audioBlob);
                };

                audioBufferSource.start();
                audioBufferSource.stop(audioContext.currentTime + audioBuffer.duration);
            });
        }
    </script>
</body>
</html>

相关问题