Skip to content

Instantly share code, notes, and snippets.

@bellbind
Last active May 9, 2022 03:40
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save bellbind/31e8f0a00a65a17dc4d73cfda4c37952 to your computer and use it in GitHub Desktop.
Save bellbind/31e8f0a00a65a17dc4d73cfda4c37952 to your computer and use it in GitHub Desktop.
[WebGPU] Draw a square with WebGPU for chrome-103
// Compute example for WebGPU API for Chrome-103: https://www.w3.org/TR/webgpu/
// [Usage] Paste whole codes into Web Comsole of the WebGPU demo page, then output 1024 Float32Array of squares
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
// WGSL shaders: https://www.w3.org/TR/WGSL/
const workgroupSize = device.limits?.maxComputeInvocationsPerWorkgroup ?? 64;
const computeWgsl = `
struct IO {
values: array<i32>,
};
@binding(0) @group(0) var<storage, read> input: IO;
@binding(1) @group(0) var<storage, write> output: IO;
//override workgroupSize = 16; //'override' constant is not yet implemented until Chrome-101
let workgroupSize = ${workgroupSize};
@stage(compute) @workgroup_size(workgroupSize) fn square(@builtin(global_invocation_id) giid: vec3<u32>) {
output.values[giid.x] = input.values[giid.x] * input.values[giid.x];
}
`;
const computeShader = device.createShaderModule({code: computeWgsl});
// pipeline
const pipeline = device.createComputePipeline({
compute: {module: computeShader, entryPoint: "square", constants: {workgroupSize}},
});
// data
const count = 1024;
const input = new Int32Array([...Array(count).keys()]);
// buffers
const inputBuffer = device.createBuffer({size: input.byteLength, usage: GPUBufferUsage.STORAGE, mappedAtCreation: true});
new Int32Array(inputBuffer.getMappedRange()).set(input);
inputBuffer.unmap();
const outputBuffer = device.createBuffer({size: input.byteLength, usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC});
const readBuffer = device.createBuffer({size: input.byteLength, usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST});
// bind group
const bindGroupLayout = pipeline.getBindGroupLayout(0);
const bindGroup = device.createBindGroup({
layout: bindGroupLayout,
entries: [
{binding: 0, resource: {buffer: inputBuffer}},
{binding: 1, resource: {buffer: outputBuffer}},
]
});
// command encoder
const commandEncoder = device.createCommandEncoder();
const passEncoder = commandEncoder.beginComputePass();
passEncoder.setPipeline(pipeline);
passEncoder.setBindGroup(0, bindGroup);
passEncoder.dispatchWorkgroups(count / workgroupSize); // before chrome-102: dispatch(count /workgroupSize)
passEncoder.end();
commandEncoder.copyBufferToBuffer(outputBuffer, 0, readBuffer, 0, input.byteLength);
device.queue.submit([commandEncoder.finish()]);
// read and copy
await readBuffer.mapAsync(GPUMapMode.READ);
const output = new Int32Array(readBuffer.getMappedRange().slice());
readBuffer.unmap();
console.log(output); // 1024-result of square 0-1046529
<!doctype html>
<html>
<head>
<!-- IMPORTANT: The current Chrome requires some origin-trial token in <meta>.
To register origins at the last "WebGPU REGISTER" in https://developer.chrome.com/origintrials/
This token is for a Web Origin "https://gist.githack.com" (maybe expired at Mar 31, 2022)
It can register localhost origin as "http://localhost:8000"
-->
<meta http-equiv="origin-trial"
content="Akv07qcAop5MFaZYxJtHHjUuM8eV3GpbHkTeuhZo/4wsNjYnQ7GSGJyo7hRVZvpvyjYwilbJ8KbFVchI4O1DpA0AAABQeyJvcmlnaW4iOiJodHRwczovL2dpc3QuZ2l0aGFjay5jb206NDQzIiwiZmVhdHVyZSI6IldlYkdQVSIsImV4cGlyeSI6MTY1MjgzMTk5OX0=" />
<meta http-equiv="origin-trial"
content="AkIL+/THBoi1QEsWbX5SOuMpL6+KGAXKrZE5Bz6yHTuijzvKz2MznuLqE+MH4YSqRi/v1fDK/6JyFzgibTTeNAsAAABJeyJvcmlnaW4iOiJodHRwOi8vbG9jYWxob3N0OjgwMDAiLCJmZWF0dXJlIjoiV2ViR1BVIiwiZXhwaXJ5IjoxNjUyODMxOTk5fQ==" />
<script src="./main.js" type="module"></script>
<style>@media(prefers-color-scheme: dark){:root {color-scheme: dark;}}</style>
<link rel="icon" href="data:image/x-icon;," />
</head>
<body>
<h1>(Notice: The origin-trial token in this page will be expired at May 15, 2022)</h1>
<canvas style="width: 80vmin; height: 80vmin; border: solid;" id="canvas"></canvas>
</body>
</html>
// Simple example for WebGPU API for Chrome-102: https://www.w3.org/TR/webgpu/
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
// triangle-strip square: 4-(x,y)
const square = new Float32Array([-1/2, -1/2, -1/2, +1/2, +1/2, -1/2, +1/2, +1/2,]);
const vertexBuffer = device.createBuffer({size: square.byteLength, usage: GPUBufferUsage.VERTEX, mappedAtCreation: true});
new Float32Array(vertexBuffer.getMappedRange()).set(square);
vertexBuffer.unmap();
const stride = {arrayStride: 2 * square.BYTES_PER_ELEMENT, attributes: [{shaderLocation: 0, offset: 0, format: "float32x2"}]};
// WGSL shaders: https://www.w3.org/TR/WGSL/
const vertexWgsl = `
struct Out {
@builtin(position) pos: vec4<f32>,
@location(0) vert: vec2<f32>,
};
@stage(vertex) fn main(@location(0) xy: vec2<f32>) -> Out {
return Out(vec4<f32>(xy, 0.0, 1.0), xy + 0.5);
}
`;
const vertexShader = device.createShaderModule({code: vertexWgsl});
const fragmentWgsl = `
//@stage(fragment) fn main(@builtin(position) pos: vec4<f32>, @location(0) vert: vec2<f32>) -> @location(0) vec4<f32> {
@stage(fragment) fn main(@location(0) vert: vec2<f32>) -> @location(0) vec4<f32> {
return vec4<f32>(vert, 0.0, 1.0);
}
`;
const fragmentShader = device.createShaderModule({code: fragmentWgsl});
// gpu config for canvas
const canvas = document.getElementById("canvas");
const gpu = canvas.getContext("webgpu");
const format = gpu.getPreferredFormat(adapter);
const compositingAlphaMode = "premultiplied";
gpu.configure({device, format, compositingAlphaMode, size: [canvas.width, canvas.height]});
// pipeline
const pipeline = device.createRenderPipeline({
primitive: {topology: "triangle-strip"},
vertex: {module: vertexShader, entryPoint: "main", buffers: [stride]},
fragment: {module: fragmentShader, entryPoint: "main", targets: [{format}]},
});
// render
const render = () => {
const view = gpu.getCurrentTexture().createView();
const renderPass = {colorAttachments: [{view, loadOp: "clear", storeOp: "store"}]};
const commandEncoder = device.createCommandEncoder();
const passEncoder = commandEncoder.beginRenderPass(renderPass);
passEncoder.setVertexBuffer(0, vertexBuffer);
passEncoder.setPipeline(pipeline);
passEncoder.draw(4, 1);
passEncoder.end();
device.queue.submit([commandEncoder.finish()]);
};
(function loop() {
render();
requestAnimationFrame(loop);
})();
@bellbind
Copy link
Author

bellbind commented Mar 4, 2022

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment