Skip to content

Instantly share code, notes, and snippets.

@meziantou
Last active April 7, 2024 14:02
Show Gist options
  • Star 40 You must be signed in to star a gist
  • Fork 17 You must be signed in to fork a gist
  • Save meziantou/edb7217fddfbb70e899e to your computer and use it in GitHub Desktop.
Save meziantou/edb7217fddfbb70e899e to your computer and use it in GitHub Desktop.
Javascript - Record audio
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title></title>
</head>
<body>
<h1>Audio</h1>
<button id="startRecordingButton">Start recording</button>
<button id="stopRecordingButton">Stop recording</button>
<button id="playButton">Play</button>
<button id="downloadButton">Download</button>
<script>
var startRecordingButton = document.getElementById("startRecordingButton");
var stopRecordingButton = document.getElementById("stopRecordingButton");
var playButton = document.getElementById("playButton");
var downloadButton = document.getElementById("downloadButton");
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recordingLength = 0;
var volume = null;
var mediaStream = null;
var sampleRate = 44100;
var context = null;
var blob = null;
startRecordingButton.addEventListener("click", function () {
// Initialize recorder
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
navigator.getUserMedia(
{
audio: true
},
function (e) {
console.log("user consent");
// creates the audio context
window.AudioContext = window.AudioContext || window.webkitAudioContext;
context = new AudioContext();
// creates an audio node from the microphone incoming stream
mediaStream = context.createMediaStreamSource(e);
// https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/createScriptProcessor
// bufferSize: the onaudioprocess event is called when the buffer is full
var bufferSize = 2048;
var numberOfInputChannels = 2;
var numberOfOutputChannels = 2;
if (context.createScriptProcessor) {
recorder = context.createScriptProcessor(bufferSize, numberOfInputChannels, numberOfOutputChannels);
} else {
recorder = context.createJavaScriptNode(bufferSize, numberOfInputChannels, numberOfOutputChannels);
}
recorder.onaudioprocess = function (e) {
leftchannel.push(new Float32Array(e.inputBuffer.getChannelData(0)));
rightchannel.push(new Float32Array(e.inputBuffer.getChannelData(1)));
recordingLength += bufferSize;
}
// we connect the recorder
mediaStream.connect(recorder);
recorder.connect(context.destination);
},
function (e) {
console.error(e);
});
});
stopRecordingButton.addEventListener("click", function () {
// stop recording
recorder.disconnect(context.destination);
mediaStream.disconnect(recorder);
// we flat the left and right channels down
// Float32Array[] => Float32Array
var leftBuffer = flattenArray(leftchannel, recordingLength);
var rightBuffer = flattenArray(rightchannel, recordingLength);
// we interleave both channels together
// [left[0],right[0],left[1],right[1],...]
var interleaved = interleave(leftBuffer, rightBuffer);
// we create our wav file
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true); // chunkSize
view.setUint16(20, 1, true); // wFormatTag
view.setUint16(22, 2, true); // wChannels: stereo (2 channels)
view.setUint32(24, sampleRate, true); // dwSamplesPerSec
view.setUint32(28, sampleRate * 4, true); // dwAvgBytesPerSec
view.setUint16(32, 4, true); // wBlockAlign
view.setUint16(34, 16, true); // wBitsPerSample
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
// write the PCM samples
var index = 44;
var volume = 1;
for (var i = 0; i < interleaved.length; i++) {
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
// our final blob
blob = new Blob([view], { type: 'audio/wav' });
});
playButton.addEventListener("click", function () {
if (blob == null) {
return;
}
var url = window.URL.createObjectURL(blob);
var audio = new Audio(url);
audio.play();
});
downloadButton.addEventListener("click", function () {
if (blob == null) {
return;
}
var url = URL.createObjectURL(blob);
var a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "sample.wav";
a.click();
window.URL.revokeObjectURL(url);
});
function flattenArray(channelBuffer, recordingLength) {
var result = new Float32Array(recordingLength);
var offset = 0;
for (var i = 0; i < channelBuffer.length; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel) {
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string) {
for (var i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
</script>
</body>
</html>
@prasanna-code
Copy link

Hi Guys,

Can anyone help me to record the Wav audio in a 24-bit format? I achieved it in 16 and 32 bit.

Thanks in advance

Thanks,
PJ

@gauravgargtech
Copy link

Thanks for sharing the cool snippet and can we check silence and automatically stop the recording and when a person starts to speak then restart the recording,

@guest271314
Copy link

How to reverse the procedure to get the samples as Float32Array from wav file?

wav.slice(44), https://stackoverflow.com/a/35248852

@Bufarhan
Copy link

Bufarhan commented Apr 2, 2021

Hi guys, can any one tell me how to send the blob or the arrayBuffer to the blazor ?

@pradeep1983
Copy link

Hi , I tried same approach as mentioned above with multichannel

var buffer = [];
for (var channel = 0; channel < _2 ; channel++) {
buffer.push(e.inputBuffer.getChannelData(channel));
}
I am using single audio device and expecting byte data for each channels - buffer[0] = [0,1,2,3,2] , buffer[1] = [0,0,0,0,0]
but receive similar data from both channels - eg (channel 1) buffer[0]= [0,1,2,3,2] , (channel 2) buffer[1] = [0,1,2,3,2]

please help me , how to get the buffer based on channels.

Thanks
Pradeep

@ariel-bentu
Copy link

hello, this has been very helpful.
One feedback: while using it, I noticed that the microphone is not closed unless you add on top of what you wrote:

// stop recording
            recorder.disconnect(context.destination);
            mediaStream.disconnect(recorder);

also:

mediaStream.getAudioTracks().forEach(track => {
                track.stop();
})
context.close();

I hope this could be my small contribution back :)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment