|
// This file is required by the index.html file and will |
|
// be executed in the renderer process for that window. |
|
// All of the Node.js APIs are available in this process. |
|
|
|
const workletString = ` |
|
class PcmAudioWorkletProcessorWorklet extends AudioWorkletProcessor { |
|
process(inputs, outputs, parameters) { |
|
if (inputs.length === 0) { |
|
throw new Error('PcmAudioWorkletProcessorWorklet has no inputs'); |
|
} |
|
const input = inputs[0]; |
|
|
|
if (input.length === 0) { |
|
// This input has no channels, so stop processing |
|
return false; |
|
} |
|
|
|
const firstChannelSamples = input[0]; |
|
|
|
// Send the input, and transfer ownership of the buffers |
|
this.port.postMessage(firstChannelSamples, [firstChannelSamples.buffer]); |
|
|
|
return true; |
|
} |
|
} |
|
|
|
registerProcessor('pcm-audio-worklet-processor', PcmAudioWorkletProcessorWorklet); |
|
`; |
|
|
|
async function listAudioInputs() { |
|
const devices = await navigator.mediaDevices.enumerateDevices(); |
|
return devices.filter((device) => device.kind === 'audioinput'); |
|
} |
|
|
|
async function getDefaultInput(fallbackToFirstInput = true) { |
|
const audioInputs = await listAudioInputs(); |
|
const defaultDevice = audioInputs.find((device) => device.deviceId === 'default'); |
|
if (defaultDevice) { |
|
return defaultDevice; |
|
} |
|
return fallbackToFirstInput && audioInputs.length > 0 ? audioInputs[0] : undefined; |
|
} |
|
|
|
async function getAudioStream(device) { |
|
try { |
|
const constraints = { |
|
audio: { |
|
deviceId: device.deviceId, |
|
}, |
|
}; |
|
return navigator.mediaDevices.getUserMedia(constraints); |
|
} catch (e) { |
|
trackError(e, 'get-microphone-stream'); |
|
throw e; |
|
} |
|
} |
|
|
|
async function createRecordingPipeline(device) { |
|
const stream = await getAudioStream(device); |
|
const audioTracks = stream.getAudioTracks(); |
|
if (audioTracks.length === 0) { |
|
throw new Error('Microphone stream has no audio tracks'); |
|
} |
|
const sampleRate = audioTracks[0].getSettings().sampleRate; |
|
if (sampleRate === undefined) { |
|
throw new Error('Audio input has undefined sample rate'); |
|
} |
|
|
|
const context = new AudioContext({ sampleRate, latencyHint: 'interactive' }); |
|
if (context.sampleRate !== sampleRate) { |
|
console.warn( |
|
`AudioContext.sampleRate (${ |
|
context.sampleRate |
|
}) differs from requested sampleRate (${sampleRate})`, |
|
); |
|
} |
|
console.log(`requested sample rate: ${sampleRate}`); |
|
console.log(`audio context sample rate: ${context.sampleRate}`); |
|
|
|
const sourceNode = context.createMediaStreamSource(stream); |
|
const blob = new Blob([workletString], { type: 'text/javascript' }); |
|
const workletUrl = URL.createObjectURL(blob); |
|
|
|
await context.audioWorklet.addModule(workletUrl); |
|
const workletNode = new AudioWorkletNode(context, 'pcm-audio-worklet-processor'); |
|
|
|
sourceNode.connect(workletNode); |
|
const destinationNode = context.createMediaStreamDestination(); |
|
workletNode.connect(destinationNode); |
|
|
|
let messagesReceived = 0; |
|
let lastLogAt = performance.now(); |
|
workletNode.port.onmessage = (event) => { |
|
const data = event.data; |
|
|
|
// messagesReceived++; |
|
messagesReceived += data.length; |
|
|
|
const now = performance.now(); |
|
if (now > lastLogAt + 1000) { |
|
// const diff = now - lastMessageAt; |
|
// const rate = (1000 / diff).toFixed(0); |
|
// const rate = 1000 / diff; |
|
// console.log(`${rate} sample rate`); |
|
|
|
console.log(`${messagesReceived} sample rate`); |
|
console.log(data.length); |
|
|
|
lastLogAt = now; |
|
messagesReceived = 0; |
|
} |
|
// lastMessageAt = now; |
|
}; |
|
} |
|
|
|
async function goGoGo() { |
|
const device = await getDefaultInput(); |
|
await createRecordingPipeline(device); |
|
} |
|
|
|
goGoGo(); |