-
-
Save halcy/d20b0bc2de82ceae2f6ba8a83901b265 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| <!DOCTYPE html> | |
| <html lang="en"> | |
| <head> | |
| <meta charset="UTF-8"> | |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
| <title>Ultrasound FSK Transceiver</title> | |
| <style> | |
| body { | |
| font-family: sans-serif; | |
| display: flex; | |
| flex-direction: column; | |
| align-items: center; | |
| padding: 2rem; | |
| } | |
| textarea, | |
| select, | |
| .debug { | |
| width: 80%; | |
| margin-bottom: 1rem; | |
| } | |
| textarea { | |
| height: 150px; | |
| font-size: 1.2rem; | |
| padding: 0.5rem; | |
| } | |
| select { | |
| height: 2rem; | |
| font-size: 1rem; | |
| } | |
| button { | |
| padding: 0.5rem 1rem; | |
| font-size: 1rem; | |
| margin: 0.5rem; | |
| } | |
| .debug { | |
| height: 100px; | |
| overflow-y: auto; | |
| border: 1px solid #ccc; | |
| padding: 0.5rem; | |
| background: #f9f9f9; | |
| } | |
| canvas { | |
| margin-bottom: 1rem; | |
| background: #000; | |
| } | |
| </style> | |
| </head> | |
| <body> | |
| <h1>Ultrasound RZ-FSK Transmitter & Receiver</h1> | |
| <textarea id="send-input" placeholder="Type message and press Enter to send"></textarea> | |
| <textarea id="recv-output" placeholder="Decoded message" readonly></textarea> | |
| <label for="mic-select">Select Microphone:</label> | |
| <select id="mic-select"></select> | |
| <div> | |
| <button id="start-listen">Start Listening</button> | |
| <button id="stop-listen" disabled>Stop Listening</button> | |
| </div> | |
| <canvas id="spectrum" width="800" height="150"></canvas> | |
| <div class="debug" id="debug"></div> | |
| <script> | |
| // DOM | |
| const sendInput = document.getElementById('send-input'); | |
| const recvOutput = document.getElementById('recv-output'); | |
| const micSelect = document.getElementById('mic-select'); | |
| const startListen = document.getElementById('start-listen'); | |
| const stopListen = document.getElementById('stop-listen'); | |
| const debugEl = document.getElementById('debug'); | |
| const canvas = document.getElementById('spectrum'); | |
| const canvasCtx = canvas.getContext('2d'); | |
| // FSK params | |
| const f0 = 17000; // Hz, frequency for '0' | |
| const f1 = 18000; // Hz, frequency for '1' | |
| const fp = 17500; // Hz, pilot frequency (between f0 and f1) | |
| const baud = 10; // bits per second, baud rate for transmission | |
| const bitDuration = 1 / baud; // seconds, duration of one bit | |
| const pilotFrac = 0.2; // fraction of bit duration for pilot pulse | |
| const dataFrac = 0.6; // fraction of bit duration for data pulse | |
| const pilotDur = bitDuration * pilotFrac; // seconds, pilot pulse duration | |
| const dataDur = bitDuration * dataFrac; // seconds, data pulse duration | |
| const ramp = 0.0005; // seconds, ramp time for frequency changes | |
| const fadeTime = 0.1; // seconds, initial fade in/out | |
| const sampleRate = 48000; // this is apparently Just A Suggestion | |
| // Detector params | |
| const threshold = -70; // dBFS minimum signal level to detect | |
| const margin = 6; // dBFS margin for edge detection | |
| // Audio context and state | |
| const audioCtx = new (window.AudioContext || window.webkitAudioContext)({ sampleRate }); | |
| let sendQueue = [] | |
| let sending = false; | |
| let rxState = 'pilot'; | |
| let bufferBits = ''; | |
| // Debug helper | |
| function log(msg) { const d = document.createElement('div'); d.textContent = msg; debugEl.appendChild(d); debugEl.scrollTop = debugEl.scrollHeight; } | |
| // Sleep function is apparently just not a thing in JS? | |
| function sleep(ms) { return new Promise(r => setTimeout(r, ms)); } | |
| // Populate mics | |
| // Only really works in Chrome, probably because it can be used for | |
| // tracking, Firefox just exposes "Mic 1". | |
| audioCtx.resume().then(() => navigator.mediaDevices.enumerateDevices().then(devs => { | |
| devs.filter(d => d.kind === 'audioinput').forEach(d => | |
| micSelect.add(new Option(d.label || `Mic${micSelect.length + 1}`, d.deviceId)) | |
| ); | |
| })); | |
| // Send on Enter | |
| sendInput.addEventListener('keydown', e => { | |
| if (e.key === 'Enter') { | |
| e.preventDefault(); const msg = sendInput.value; sendInput.value = ''; queueSend(msg); | |
| } | |
| }); | |
| function queueSend(msg) { | |
| // Preamble | |
| let bits = ''; | |
| const preamble = '0000000000'; | |
| bits += preamble; | |
| // Character frames: 1[8 data bits, ascii]1 | |
| for (let ch of msg) { | |
| bits += '1'; | |
| const code = ch.charCodeAt(0); | |
| for (let i = 0; i < 8; i++) bits += ((code >> i) & 1) ? '1' : '0'; | |
| bits += '1'; | |
| } | |
| sendQueue.push(bits); | |
| if (!sending) transmit(); | |
| } | |
| async function transmit() { | |
| sending = true; | |
| while (sendQueue.length) { | |
| const bits = sendQueue.shift(); | |
| log('TX bits:' + bits); | |
| playSequence(bits); | |
| const totalTime = fadeTime * 2 + bits.length * bitDuration; | |
| await sleep(totalTime * 1000); | |
| await sleep(50); | |
| } | |
| sending = false; | |
| } | |
| // Turn a sequence of 0s and 1s (as a string) into a RTZ FSK audio signal | |
| function playSequence(bits) { | |
| const now = audioCtx.currentTime; | |
| // The oscillator generates a sine wave at a specified frequency | |
| const osc = audioCtx.createOscillator(); | |
| const gainNode = audioCtx.createGain(); | |
| // Fade in the oscillator to avoid turn-on click | |
| osc.connect(gainNode).connect(audioCtx.destination); | |
| osc.frequency.setValueAtTime(fp, now); | |
| gainNode.gain.setValueAtTime(0, now); | |
| gainNode.gain.linearRampToValueAtTime(1, now + fadeTime); | |
| // Send all the bits | |
| const startTime = now + fadeTime; | |
| const rampTime = ramp; | |
| for (let i = 0; i < bits.length; i++) { | |
| // Decide to which frequency we are going to change | |
| const bit = bits[i]; | |
| const dataFreq = bit === '1' ? f1 : f0; | |
| // Compute start and end times | |
| const t0 = startTime + i * bitDuration; | |
| const t1 = t0 + pilotDur; | |
| const t2 = t1 + dataDur; | |
| // Transition from pilot tone to one of two data tones | |
| osc.frequency.setValueAtTime(fp, t1 - rampTime); | |
| osc.frequency.linearRampToValueAtTime(dataFreq, t1); | |
| // Transition from data tone back to pilot tone | |
| osc.frequency.setValueAtTime(dataFreq, t2 - rampTime); | |
| osc.frequency.linearRampToValueAtTime(fp, t2); | |
| } | |
| // Fade out the oscillator to avoid turn-off click | |
| const endTime = startTime + bits.length * bitDuration; | |
| gainNode.gain.setValueAtTime(1, endTime); | |
| gainNode.gain.linearRampToValueAtTime(0, endTime + fadeTime); | |
| // Activate | |
| osc.start(now); | |
| osc.stop(endTime + fadeTime + 0.001); | |
| } | |
| // Receiver start | |
| startListen.addEventListener('click', async () => { | |
| const devId = micSelect.value; | |
| // Fun webaudio gotcha: Gain control is on by default lol | |
| // Also maybe echo cancellation and noise suppression, who knows | |
| const stream = await navigator.mediaDevices.getUserMedia( | |
| { | |
| audio: { | |
| deviceId: devId, | |
| echoCancellation: false, | |
| noiseSuppression: false, | |
| autoGainControl: false | |
| } | |
| }); | |
| // Validate sample rate | |
| // Apparently this property is not always reported, which is wild? | |
| const [track] = stream.getAudioTracks(); | |
| const settings = track.getSettings(); | |
| console.log('Device capture sampleRate:', settings.sampleRate ?? 'not reported') | |
| if (settings.sampleRate !== sampleRate) { | |
| log('Warning: Device sample rate (' + settings.sampleRate + ') does not match expected (' + sampleRate + ')'); | |
| log('Probably nothing will work'); | |
| } | |
| streamSource = audioCtx.createMediaStreamSource(stream); | |
| // Create biquad bandpass filters for each frequency | |
| filterP = audioCtx.createBiquadFilter(); filterP.type = 'bandpass'; filterP.frequency.value = fp; filterP.Q.value = 200; | |
| filter0 = audioCtx.createBiquadFilter(); filter0.type = 'bandpass'; filter0.frequency.value = f0; filter0.Q.value = 200; | |
| filter1 = audioCtx.createBiquadFilter(); filter1.type = 'bandpass'; filter1.frequency.value = f1; filter1.Q.value = 200; | |
| // Create three FFT analyzers to get spectra | |
| analyserP = audioCtx.createAnalyser(); analyserP.fftSize = 256; analyserP.smoothingTimeConstant = 0.0; | |
| analyser0 = audioCtx.createAnalyser(); analyser0.fftSize = 256; analyser0.smoothingTimeConstant = 0.0; | |
| analyser1 = audioCtx.createAnalyser(); analyser1.fftSize = 256; analyser1.smoothingTimeConstant = 0.0; | |
| scriptNode = audioCtx.createScriptProcessor(256, 1, 1); | |
| // Connect graph | |
| streamSource.connect(filterP); filterP.connect(analyserP); | |
| streamSource.connect(filter0); filter0.connect(analyser0); | |
| streamSource.connect(filter1); filter1.connect(analyser1); | |
| analyserP.connect(scriptNode); | |
| analyser0.connect(scriptNode); | |
| analyser1.connect(scriptNode); | |
| scriptNode.connect(audioCtx.destination); | |
| scriptNode.onaudioprocess = process; | |
| rxState = 'pilot'; | |
| bufferBits = ''; | |
| startListen.disabled = true; | |
| stopListen.disabled = false; | |
| log('RX: Listening...'); drawSpectrum(); | |
| }); | |
| // Receiver stop | |
| stopListen.addEventListener('click', () => { | |
| scriptNode.disconnect(); analyserP.disconnect(); analyser0.disconnect(); analyser1.disconnect(); streamSource.disconnect(); | |
| startListen.disabled = false; stopListen.disabled = true; | |
| log('RX: Stopped'); | |
| }); | |
| function process() { | |
| // Get band energy from analyzers | |
| const dP = new Float32Array(analyserP.frequencyBinCount); | |
| const d0 = new Float32Array(analyser0.frequencyBinCount); | |
| const d1 = new Float32Array(analyser1.frequencyBinCount); | |
| analyserP.getFloatFrequencyData(dP); | |
| analyser0.getFloatFrequencyData(d0); | |
| analyser1.getFloatFrequencyData(d1); | |
| // Pick the bin with the correct frequency | |
| const binP = Math.round(fp / (audioCtx.sampleRate / analyserP.fftSize)); | |
| const bin0 = Math.round(f0 / (audioCtx.sampleRate / analyser0.fftSize)); | |
| const bin1 = Math.round(f1 / (audioCtx.sampleRate / analyser1.fftSize)); | |
| const maxP = dP[binP]; | |
| const max0 = d0[bin0]; | |
| const max1 = d1[bin1]; | |
| // Edge detect | |
| if (rxState === 'pilot') { | |
| if (max0 > maxP + margin && max0 > threshold) { bufferBits += '0'; rxState = 'data'; } | |
| else if (max1 > maxP + margin && max1 > threshold) { bufferBits += '1'; rxState = 'data'; } | |
| } else if (rxState === 'data') { | |
| if (maxP > max0 + margin && maxP > max1 + margin && maxP > threshold) { | |
| rxState = 'pilot'; | |
| } | |
| } | |
| decodeBuffer(); | |
| } | |
| // Detect preamble and decode frames | |
| function decodeBuffer() { | |
| // Find preamble: ten zeros (pilot pulses) | |
| const preamble = '0000000000'; | |
| const preambleIdx = bufferBits.indexOf(preamble, 0); | |
| if (preambleIdx >= 0) { | |
| // Found preamble, cut everything before it from buffer | |
| bufferBits = bufferBits.slice(preambleIdx + preamble.length); | |
| log('Syncing to preamble at index ' + preambleIdx + ', remaining bits: ' + bufferBits.length); | |
| return; | |
| } | |
| if (bufferBits.length < 10) return; // Need at least 10 bits for a frame | |
| log('Buffer bits: ' + bufferBits); | |
| // Parse frame: start(1), data LSB, stop(1) | |
| const frame = bufferBits.slice(0, 10); | |
| if (frame[0] !== '1' || frame[9] !== '1') { | |
| // Frame invalid, drop one bit and retry | |
| log('Invalid frame start or end: ' + frame); | |
| bufferBits = bufferBits.slice(1); | |
| } | |
| else { | |
| // Valid frame, decode data | |
| let c = 0; | |
| for (let i = 0; i < 8; i++) if (frame[1 + i] === '1') c |= (1 << i); | |
| const char = String.fromCharCode(c); | |
| recvOutput.value += char; | |
| log('Decoded char: ' + char); | |
| bufferBits = bufferBits.slice(10); | |
| } | |
| } | |
| // Visualize spectrum | |
| // Useful for debugging and also looks cool | |
| function drawSpectrum() { | |
| requestAnimationFrame(drawSpectrum); | |
| if (!analyserP) return; | |
| const bp = new Uint8Array(analyserP.frequencyBinCount); | |
| const b0 = new Uint8Array(analyser0.frequencyBinCount); | |
| const b1 = new Uint8Array(analyser1.frequencyBinCount); | |
| analyserP.getByteFrequencyData(bp); | |
| analyser0.getByteFrequencyData(b0); | |
| analyser1.getByteFrequencyData(b1); | |
| canvasCtx.clearRect(0, 0, canvas.width, canvas.height); | |
| const w = canvas.width / bp.length; | |
| bp.forEach((v, i) => { canvasCtx.fillStyle = 'rgb(200,200,200)'; canvasCtx.fillRect(i * w, canvas.height - v, w, v); }); | |
| b0.forEach((v, i) => { canvasCtx.fillStyle = 'rgb(50,255,50)'; canvasCtx.fillRect(i * w, canvas.height - v, w, v); }); | |
| b1.forEach((v, i) => { canvasCtx.fillStyle = 'rgb(50,50,255)'; canvasCtx.fillRect(i * w, canvas.height - v, w, v); }); | |
| } | |
| // Resume on click because you don't get to autoplay audio online if you're not google | |
| document.body.addEventListener('click', () => { if (audioCtx.state === 'suspended') audioCtx.resume(); }); | |
| </script> | |
| </body> | |
| </html> |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment