Created
February 18, 2024 16:32
-
-
Save guest271314/65963d3db8b6d1790aa1b3eeea193404 to your computer and use it in GitHub Desktop.
MP3 encoder
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
var url = URL.createObjectURL(new Blob([await (await fetch('https://raw.githubusercontent.com/guest271314/captureSystemAudio/master/native_messaging/capture_system_audio/lame.min.js')).text()],{ | |
type: "text/javascript" | |
})); | |
*/ | |
var dir = await navigator.storage.getDirectory(); | |
var handle; | |
try { | |
handle = await dir.getFileHandle("lame.js", { | |
create: false, | |
}); | |
} catch (e) { | |
console.log(e); | |
} finally { | |
if (!handle) { | |
handle = await dir.getFileHandle("lame.js", { | |
create: true, | |
}); | |
new Blob([await (await fetch("https://raw.githubusercontent.com/guest271314/captureSystemAudio/master/native_messaging/capture_system_audio/lame.min.js", )).arrayBuffer(), ],{ | |
type: "text/javascript", | |
}).stream().pipeTo(await handle.createWritable()); | |
} | |
} | |
var file = await handle.getFile(); | |
var url = URL.createObjectURL(file); | |
var {lamejs} = await import(url); | |
var mp3encoder = new lamejs.Mp3Encoder(2,44100,128); | |
var mp3controller = void 0; | |
var mp3stream = new ReadableStream({ | |
start: (_)=>{ | |
return (mp3controller = _); | |
} | |
, | |
}); | |
var worklet = URL.createObjectURL(new Blob([`class AudioWorkletStream extends AudioWorkletProcessor { | |
constructor(options) { | |
super(); | |
} | |
process(inputs, outputs) { | |
const channels = inputs.flat(); | |
this.port.postMessage(channels); | |
return true; | |
} | |
}; | |
registerProcessor( | |
'audio-worklet-stream', | |
AudioWorkletStream | |
);`],{ | |
type: "text/javascript", | |
}), ); | |
// Finally possible to capture speechSynthesis.speak() on Chromium and Chrome | |
// Enable Speech Dispatcher, PulseAudio loopback for screen capture, disable default WebRTC input volume adjustment from 100% to 8% | |
// chrome --enable-speech-dispatcher --enable-features=PulseaudioLoopbackForScreenShare --disable-features=WebRtcAllowInputVolumeAdjustment | |
// Still have to manually select share system audio in picker with systemAudio set to "include" | |
// https://issues.chromium.org/issues/40155218 | |
let stream = await navigator.mediaDevices.getDisplayMedia({ | |
// We're not going to be using the video track | |
video: { | |
width: 0, | |
height: 0, | |
frameRate: 0, | |
displaySurface: "monitor", | |
}, | |
audio: { | |
suppressLocalAudioPlayback: false, | |
// Speech synthesis audio output is generally 1 channel | |
channelCount: 2, | |
noiseSuppression: false, | |
autoGainControl: false, | |
echoCancellation: false, | |
}, | |
systemAudio: "include", | |
// Doesn't work for Tab capture | |
// preferCurrentTab: true | |
}); | |
function log(e, ...args) { | |
if (e?.target) { | |
console.log(e.target.constructor.name, e.type); | |
} else { | |
console.log(...args); | |
} | |
} | |
let[videoTrack] = stream.getVideoTracks(); | |
videoTrack.stop(); | |
let[audioTrack] = stream.getAudioTracks(); | |
log(null, audioTrack.constructor.name, audioTrack.kind, audioTrack.getSettings().deviceId, ); | |
console.log(await audioTrack.getConstraints()); | |
/* | |
let { readable } = new MediaStreamTrackProcessor({track: audioTrack}); | |
readable.pipeTo(new WritableStream({ | |
write(v) { | |
console.log(v); | |
}, close () { | |
console.log(audioTrack); | |
} | |
})); | |
*/ | |
let ac = new AudioContext({ | |
latencyHint: 0, | |
sampleRate: 44100, | |
numberOfChannels: 2, | |
}); | |
ac.onstatechange = async(e)=>{ | |
console.log(e); | |
if (ac.state === "closed") { | |
const mp3buf = mp3encoder.flush(); | |
if (mp3buf.length > 0) { | |
mp3controller.enqueue(new Uint8Array(mp3buf)); | |
mp3controller.close(); | |
} | |
const blob = new Blob([await new Response(mp3stream).arrayBuffer()],{ | |
type: "audio/mp3", | |
}); | |
console.log(URL.createObjectURL(blob)); | |
} | |
} | |
; | |
await ac.audioWorklet.addModule(worklet); | |
const aw = new AudioWorkletNode(ac,"audio-worklet-stream",{ | |
numberOfInputs: 1, | |
numberOfOutputs: 2, | |
outputChannelCount: [2, 2], | |
processorOptions: { | |
length, | |
offset: 0, | |
}, | |
}); | |
aw.onprocessorerror = (e)=>{ | |
console.error(e); | |
console.trace(); | |
} | |
; | |
aw.port.onmessage = async(e)=>{ | |
// console.log(e.data); | |
var channels = e.data; | |
if (!Array.isArray(e.data)) { | |
msd.disconnect(); | |
aw.disconnect(); | |
await ac.close(); | |
return; | |
} | |
const left = channels.shift(); | |
const right = channels.shift(); | |
let leftChannel, rightChannel; | |
const sampleBlockSize = 128; | |
leftChannel = new Int32Array(left.length); | |
rightChannel = new Int32Array(right.length); | |
for (let i = 0; i < left.length; i++) { | |
leftChannel[i] = left[i] < 0 ? left[i] * 32768 : left[i] * 32767; | |
rightChannel[i] = right[i] < 0 ? right[i] * 32768 : right[i] * 32767; | |
} | |
const mp3buf = mp3encoder.encodeBuffer(leftChannel, rightChannel); | |
if (mp3buf.length > 0) { | |
mp3controller.enqueue(new Uint8Array(mp3buf)); | |
} | |
} | |
; | |
let msd = new MediaStreamAudioSourceNode(ac,{ | |
mediaStream: stream, | |
}); | |
msd.connect(aw); | |
//let recorder = new MediaRecorder(stream); | |
//recorder.onstart = log; | |
/* | |
recorder.onstop = async(e)=>{ | |
recorder.stream.getTracks().forEach((track)=>track.stop()); | |
await ac.close(); | |
log(e); | |
} | |
; | |
recorder.ondataavailable = (e)=>{ | |
console.log(URL.createObjectURL(e.data)); | |
log(e); | |
} | |
; | |
*/ | |
let utterance = new SpeechSynthesisUtterance(`Test, test, test. Test to the point it breaks`,); | |
utterance.onstart = async(e)=>{ | |
if (ac.state === "suspended") { | |
await ac.resume(); | |
} | |
log(e); | |
} | |
; | |
utterance.onend = async(e)=>{ | |
stream.getTracks().forEach((track)=>track.stop()); | |
if (ac.state === "running") { | |
aw.port.postMessage(null); | |
msd.disconnect(); | |
aw.disconnect(); | |
await ac.close(); | |
} | |
log(e); | |
} | |
; | |
audioTrack.onended = async(e)=>{ | |
console.log(e); | |
aw.port.postMessage(null); | |
msd.disconnect(); | |
aw.disconnect(); | |
await ac.close(); | |
} | |
; | |
globalThis.speechSynthesis.speak(utterance); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment