Skip to content

Instantly share code, notes, and snippets.

@JolifantoBambla
Last active April 25, 2024 21:01
Show Gist options
  • Save JolifantoBambla/0a4e9c2a0a8bc475f081bc6f9d1aa1a8 to your computer and use it in GitHub Desktop.
Save JolifantoBambla/0a4e9c2a0a8bc475f081bc6f9d1aa1a8 to your computer and use it in GitHub Desktop.
Create audio data in WebGPU compute shaders
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>WebGPU Audio</title>
</head>
<body>
<button type="button" id="play">Play</button>
<script type="module">
const code = `
override WORKGROUP_SIZE: u32 = 256;
override SAMPLING_RATE: f32 = 44100.0;
struct TimeInfo {
// time since song start in seconds
offset: f32,
}
@group(0) @binding(0) var<uniform> time_info: TimeInfo;
@group(0) @binding(1) var<storage, read_write> song_chunk: array<vec2<f32>>; // 2 channel pcm data
@compute
@workgroup_size(WORKGROUP_SIZE)
fn synthezise(@builtin(global_invocation_id) global_id: vec3<u32>) {
let sample = global_id.x;
if sample >= arrayLength(&song_chunk) {
return;
}
let t = f32(sample) / SAMPLING_RATE;
song_chunk[sample] = song(time_info.offset + t);
}
// -------------------------------------------------------------------------------------------------
// The rest of the shader is copied & ported from https://www.shadertoy.com/view/7lyfWR
// -------------------------------------------------------------------------------------------------
const PI: f32 = 3.141592654;
const TAU: f32 = 6.283185307179586476925286766559;
// length of string (approx 25 inches, standard guitar string length)
const L: f32 = 0.635;
// height of pluck (12.5 cm, just a random number to make it clearly audible)
const h: f32 = 0.125;
// position of pluck along string (5 inches from lower bridge)
const d: f32 = 0.15;
// Damping coefficient (bigger = shorter)
const GAMMA: f32 = 2.5;
// String stiffness coefficient
const b: f32 = 0.008;
const MAX_HARMONICS: u32 = 50u;
// fundamental frequencies of each string
// changing these will "tune" the guitar
const FUNDAMENTAL: array<f32, 6> = array<f32, 6>(
329.63, // E4
246.94, // B3
196.00, // G3
146.83, // D3
110.00, // A2
082.41 // E2
);
fn song(time: f32) -> vec2<f32> {
var sig = 0.0;
// for each string
for (var s = 0u; s < 6u; s += 1) {
// repeat at a different offset
let t = (time + f32(s) / 6.) % (8./6.);
// for each harmonic
for (var n = 0u; n < MAX_HARMONICS; n += 1) {
// amplitude for each harmonic
let a_n: f32 = ((2. * h * L * L) / (PI * PI * d * (L - d) * f32(n+1u) * f32(n+1u))) * sin((f32(n+1u) * PI * d) / L );
// frequency for each harmonic
let f_n = f32(n+1u) * FUNDAMENTAL[s] * sqrt(1. + b * b * f32(n+1u) * f32(n+1u));
// add value to total sound signal, with exponential falloff
sig += a_n * sin(TAU * f_n * t) * exp(-f32(n+1u) * GAMMA * FUNDAMENTAL[s]/200.0 * t);
}
}
// x = left channel, y = right channel
return vec2(sig);
}
`;
const shaderModuleDescriptor = {code};
async function playSong() {
// CONFIG STUFF
const chunkDurationSeconds = 1;
const numChannels = 2; // currently only two channels allowed (shader uses vec2)
const workgroupSize = 256;
const maxBufferedChunks = 5;
if (numChannels !== 2) {
throw new Error('Currently the number of channels has to be 2, sorry :/');
}
// CREATE AUDIO CONTEXT
const audioCtx = new (window.AudioContext || window.webkitAudioContext)();
// SET UP WEBGPU + BUFFERS + RESOURCES
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
const chunkNumSamplesPerChannel = audioCtx.sampleRate * chunkDurationSeconds;
const chunkNumSamples = numChannels * chunkNumSamplesPerChannel;
const chunkBufferSize = Float32Array.BYTES_PER_ELEMENT * chunkNumSamples;
const chunkBuffer = device.createBuffer({
size: chunkBufferSize,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
// We did not cover this in the workshop: storage & uniform buffers can't be read back to the CPU directly.
// Instead, we need to copy the data to an extra buffer with the MAP_READ & COPY_DST usages.
// To get the data to the CPU, we need to map the buffer to CPU memory and then copy the mapped memory to a JavaScript ArrayBuffer.
// Note that a mapped buffer must not be used in a command encoder.
const chunkMapBuffer = device.createBuffer({
size: chunkBufferSize,
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
});
const timeInfoBuffer = device.createBuffer({
size: Float32Array.BYTES_PER_ELEMENT * 1,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
});
const shaderModule = device.createShaderModule(shaderModuleDescriptor);
const pipeline = device.createComputePipeline({
layout: 'auto',
compute: {
module: shaderModule,
entryPoint: 'synthezise',
constants: {
SAMPLING_RATE: audioCtx.sampleRate,
WORKGROUP_SIZE: workgroupSize,
}
}
});
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{binding: 0, resource: {buffer: timeInfoBuffer}},
{binding: 1, resource: {buffer: chunkBuffer}},
]
});
// CHUNK CREATION
// state tracking
const startTime = performance.now() / 1000.0;
let nextChunkOffset = 0.0;
// create sound data on the GPU, read back to CPU and schedule for playback
async function createSongChunk() {
// if we've already scheduled `maxBufferedChunks` of sound data for playback, reschedule sound data creation for later
const bufferedSeconds = (startTime + nextChunkOffset) - (performance.now() / 1000.0);
const numBufferedChunks = Math.floor(bufferedSeconds / chunkDurationSeconds);
if (numBufferedChunks > maxBufferedChunks) {
const timeout = chunkDurationSeconds * 0.9;
setTimeout(createSongChunk, timeout * 1000.0);
console.log(`buffered chunks ${numBufferedChunks} (${bufferedSeconds} seconds), next chunk creation starts in ${timeout} seconds`);
return;
}
// update uniform buffer: set the new chunk's offset in seconds from t = 0
console.log('writing nextChunkOffset', nextChunkOffset);
device.queue.writeBuffer(timeInfoBuffer, 0, new Float32Array([nextChunkOffset]));
const commandEncoder = device.createCommandEncoder();
// encode compute pass, i.e., sound chunk creation
const pass = commandEncoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(
Math.ceil(chunkNumSamplesPerChannel / workgroupSize)
);
pass.end();
// copy sound chunk to map buffer
commandEncoder.copyBufferToBuffer(chunkBuffer, 0, chunkMapBuffer, 0, chunkBufferSize);
device.queue.submit([commandEncoder.finish()]);
// after submitting(!) chunk creation & copy commands, map chunkMapBuffer's memory to CPU memory for reading
// Note: a mapped buffer is not allowed to be used in a command encoder.
// To avoid an illegal use of the map buffer in a command encoder (i.e., when copying the data from the storage buffer),
// we wait for the buffer's memory to be mapped.
// In this case, this is okay, because we have a couple of seconds of sound data cached in the audio context's destination,
// so we can easily afford to wait for the GPU commands to finish and the buffer to be mapped.
// However, doing this within the render loop of a real-time renderer is usually a bad idea, since it forces a CPU-GPU sync.
// In such cases, it might be a good idea to have a ring buffer of map-buffers to not use the same map buffer in each frame.
await chunkMapBuffer.mapAsync(GPUMapMode.READ, 0, chunkBufferSize);
// when the buffer's memory is mapped, copy it to a JavaScript array and unmap the buffer
const chunkData = new Float32Array(chunkNumSamples);
chunkData.set(new Float32Array(chunkMapBuffer.getMappedRange()));
chunkMapBuffer.unmap();
// copy chunk data to audio buffer
const audioBuffer = audioCtx.createBuffer(
numChannels,
chunkNumSamplesPerChannel,
audioCtx.sampleRate
);
const channels = [];
for (let i = 0; i < numChannels; ++i) {
channels.push(audioBuffer.getChannelData(i));
}
for (let i = 0; i < audioBuffer.length; ++i) {
for (const [offset, channel] of channels.entries()) {
channel[i] = chunkData[i * numChannels + offset];
}
}
// create new audio source from audio buffer and schedule for execution
const audioSource = audioCtx.createBufferSource();
audioSource.buffer = audioBuffer;
audioSource.connect(audioCtx.destination);
// (there is some issue with the second chunk's offset - no idea why, music's hard I guess)
audioSource.start(nextChunkOffset);
console.log(`created new chunk, starts at ${startTime + nextChunkOffset}`);
// schedule next chunk creation
nextChunkOffset += audioSource.buffer.duration;
await createSongChunk();
}
createSongChunk();
}
const playButton = document.getElementById('play');
const clickHandler = playButton.addEventListener('click', _ => {
playButton.removeEventListener('click', clickHandler);
playButton.remove();
playSong();
});
</script>
</body>
</html>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment