Skip to content

Instantly share code, notes, and snippets.

@bellbind
Last active November 23, 2023 13:54
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save bellbind/c686d4a01306642646ec5ae476741b42 to your computer and use it in GitHub Desktop.
Save bellbind/c686d4a01306642646ec5ae476741b42 to your computer and use it in GitHub Desktop.
[WebGPU] Rendering animated 3D object for Chrome-100
<!doctype html>
<html>
<head>
<!-- IMPORTANT: The current Chrome requires some origin-trial token in <meta>.
To register origins at the last "WebGPU REGISTER" in https://developer.chrome.com/origintrials/
This token is for a Web Origin "http://localhost:8000" (maybe expired at Mar 31, 2022)
-->
<meta http-equiv="origin-trial"
content="AkIL+/THBoi1QEsWbX5SOuMpL6+KGAXKrZE5Bz6yHTuijzvKz2MznuLqE+MH4YSqRi/v1fDK/6JyFzgibTTeNAsAAABJeyJvcmlnaW4iOiJodHRwOi8vbG9jYWxob3N0OjgwMDAiLCJmZWF0dXJlIjoiV2ViR1BVIiwiZXhwaXJ5IjoxNjUyODMxOTk5fQ==" />
<meta http-equiv="origin-trial"
content="Akv07qcAop5MFaZYxJtHHjUuM8eV3GpbHkTeuhZo/4wsNjYnQ7GSGJyo7hRVZvpvyjYwilbJ8KbFVchI4O1DpA0AAABQeyJvcmlnaW4iOiJodHRwczovL2dpc3QuZ2l0aGFjay5jb206NDQzIiwiZmVhdHVyZSI6IldlYkdQVSIsImV4cGlyeSI6MTY1MjgzMTk5OX0=" />
<script src="./main.js" type="module"></script>
<style>@media(prefers-color-scheme: dark){:root {color-scheme: dark;}}</style>
<link rel="icon" href="data:image/x-icon;," />
</head>
<body>
<h1>(Notice: The origin-trial token in this page will be expired at May 15, 2022)</h1>
<canvas style="width: 80vmin; height: 80vmin; border: solid;" id="canvas"></canvas>
</body>
</html>
// Simple example for WebGPU API for Chrome-100: https://www.w3.org/TR/webgpu/
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
// utility
const createBuffer = (device, desc, data) => {
const buffer = device.createBuffer(Object.assign({size: data.byteLength, mappedAtCreation: true}, desc));
new Uint8Array(buffer.getMappedRange()).set(new Uint8Array(data.buffer, data.byteOffset, data.byteLength));
buffer.unmap();
return buffer;
};
// vertex stride
const stride = {arrayStride: 6 * Float32Array.BYTES_PER_ELEMENT, attributes: [
{shaderLocation: 0, offset: 0, format: "float32x3"},
{shaderLocation: 1, offset: 3 * Float32Array.BYTES_PER_ELEMENT, format: "float32x3"},
]};
// WGSL shaders: https://www.w3.org/TR/WGSL/
const vertexWgsl = `
struct Perspective {
fov: f32;
aspect: f32;
near: f32;
far: f32;
};
@group(0) @binding(0) var<uniform> perspective: Perspective;
fn perspectiveMatrix(perspective: Perspective) -> mat4x4<f32> {
let zoom = 1.0 / tan(perspective.fov / 2.0);
return mat4x4<f32>(
zoom / perspective.aspect, 0.0, 0.0, 0.0,
0.0, zoom, 0.0, 0.0,
0.0, 0.0, perspective.far / (perspective.near - perspective.far), -1.0,
0.0, 0.0, perspective.near * perspective.far / (perspective.near - perspective.far), 0.0
);
};
struct LookAt {
eye: vec3<f32>;
target: vec3<f32>;
up: vec3<f32>;
}
@group(0) @binding(1) var<uniform> lookAt: LookAt;
fn lookAtMatrix(lookAt: LookAt) -> mat4x4<f32> {
let lz = normalize(lookAt.eye - lookAt.target);
let lx = normalize(cross(lookAt.up, lz));
let ly = cross(lz, lx);
return mat4x4<f32>(
lx.x, ly.x, lz.x, 0.0,
lx.y, ly.y, lz.y, 0.0,
lx.z, ly.z, lz.z, 0.0,
-dot(lookAt.eye, lx), -dot(lookAt.eye, ly), -dot(lookAt.eye, lz), 1.0
);
};
struct Light {
color: vec3<f32>;
dir: vec3<f32>;
};
@group(0) @binding(2) var<uniform> light: Light;
struct ModelView {
m: mat4x4<f32>;
};
@group(1) @binding(0) var<uniform> modelView: ModelView;
struct Material {
color: vec3<f32>;
ambient: f32;
diffuse: f32;
specular: f32;
};
@group(1) @binding(1) var<uniform> material: Material;
fn lighting(light: Light, matelial: Material, pos: vec3<f32>, norm: vec3<f32>) -> vec3<f32> {
let refl = normalize(reflect(-light.dir, norm));
let dir = normalize(-pos);
let spec = material.specular * max(dot(dir, refl), 0.0);
let diff = material.diffuse * max(dot(norm, light.dir), 0.0);
let d = material.ambient + diff;
return d * material.color + spec * light.color;
}
struct Out {
@builtin(position) pos: vec4<f32>;
@location(0) color: vec3<f32>;
};
@stage(vertex) fn main(@location(0) pos: vec3<f32>, @location(1) norm: vec3<f32>) -> Out {
return Out(
perspectiveMatrix(perspective) * lookAtMatrix(lookAt) * modelView.m * vec4<f32>(pos, 1.0),
lighting(light, material, pos, normalize(norm)));
}
`;
const vertexShader = device.createShaderModule({code: vertexWgsl});
const fragmentWgsl = `
@stage(fragment) fn main(@location(0) color: vec3<f32>) -> @location(0) vec4<f32> {
return vec4<f32>(color, 1.0);
}
`;
const fragmentShader = device.createShaderModule({code: fragmentWgsl});
// gpu config for canvas
const canvas = document.getElementById("canvas");
const size = [canvas.width, canvas.height];
const gpu = canvas.getContext("webgpu");
const format = gpu.getPreferredFormat(adapter);
gpu.configure({device, format, size});
// Texture for multisampling ant-alias(MSAA)
const sampleCount = 4; // NOTE: values except 4 are not available
const msaaTexture = device.createTexture({usage: GPUTextureUsage.RENDER_ATTACHMENT, format, size, sampleCount});
const msaaView = msaaTexture.createView();
// depth buffer preparation
const depthFormat = "depth24plus-stencil8";
const depthTexture = device.createTexture({usage: GPUTextureUsage.RENDER_ATTACHMENT, format: depthFormat, size, sampleCount});
const depthView = depthTexture.createView();
const depthStencilAttachment = {view: depthView, depthLoadOp: "clear", depthClearValue: 1, depthStoreOp: "store", stencilLoadOp: "clear", stencilStoreOp: "store"};
// blend
const blend = {
color: {srcFactor: "src-alpha", dstFactor: "one-minus-src-alpha", operation: "add"},
alpha: {srcFactor: "one", dstFactor: "one", operation: "add"},
};
// pipeline
const pipeline = device.createRenderPipeline({
primitive: {topology: "triangle-list", cullMode: "back"},
vertex: {module: vertexShader, entryPoint: "main", buffers: [stride]},
fragment: {module: fragmentShader, entryPoint: "main", targets: [{format, blend}]},
depthStencil: {depthWriteEnabled: true, depthCompare: "less", format: depthFormat},
multisample: {count: sampleCount},
});
// [schene]
// camera
const perspective = new Float32Array([Math.PI / 2, 1.0, 0.1, 12.0]); // fov, aspect, near, far,
const perspectiveBuffer = createBuffer(device, {usage: GPUBufferUsage.UNIFORM}, perspective);
// NOTE: uniform buffer has alignments: f32 = 4, vec2<f32> = 8, vec3<f32>,vec4<f32> = 16
const lookAt = new Float32Array([
0, 0, -6, // eye at
0, // padding for next vec3's alignment 16
0, 0, 0, // target at
0, // padding for next vec3's alignment 16
0, 1, 0, // upside direction
]);
const lookAtBuffer = createBuffer(device, {usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST}, lookAt);
// light setting
const light = new Float32Array([
0.5, 0.5, 0.5, // RGB
0, // padding for next vec3's alignment 16
5, 15, 0, // direction
]);
const lightBuffer = createBuffer(device, {usage: GPUBufferUsage.UNIFORM}, light);
// bind group
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{binding: 0, resource: {buffer: perspectiveBuffer}},
{binding: 1, resource: {buffer: lookAtBuffer}},
{binding: 2, resource: {buffer: lightBuffer}},
]
});
// Tetrahedron data
const Tetrahedron = (color, ambient, diffuse, specular) => {
// [cube index: vertex]
const cube = [
[-1, +1, -1],
[-1, -1, -1],
[+1, -1, -1],
[+1, +1, -1],
[+1, +1, +1],
[+1, -1, +1],
[-1, -1, +1],
[-1, +1, +1],
];
// [layout of cube index]
// rear =) front face = rear face (= front
// - 7 =) 0 - 3 = 4 - 7 (= 0 -
// | ) | | | | ( |
// - 6 =) 1 - 2 = 5 - 6 (= 1 -
//
// [tetrahedron faces]
// normal cube-index: (ccw) triangle cube-index list
// 0: 1-3-7
// 2: 3-1-5
// 4: 5-7-3
// 6: 7-5-1
const vertex = new Float32Array([
[1, 3, 7], [3, 1, 5], [5, 7, 3], [7, 5, 1]
].flatMap((face, fid) => face.flatMap(v => [v, fid * 2])).flatMap(v => cube[v]));
// uniform
const modelView = new Float32Array([
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
]);
const material = new Float32Array([
color[0], color[1], color[2], // RGB
ambient, diffuse, specular, // ambient, diffuse, specular
]);
return {vertex, modelView, material, count: 12};
};
const prepareBindGroup = obj => {
obj.vertexBuffer = createBuffer(device, {usage: GPUBufferUsage.VERTEX}, obj.vertex);
obj.modelViewBuffer = createBuffer(device, {usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST}, obj.modelView);
obj.materialBuffer = createBuffer(device, {usage: GPUBufferUsage.UNIFORM}, obj.material);
obj.bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(1),
entries: [
{binding: 0, resource: {buffer: obj.modelViewBuffer}},
{binding: 1, resource: {buffer: obj.materialBuffer}},
]
});
return obj;
};
const tetrahedron0 = prepareBindGroup(Tetrahedron([1, 0, 0], 0.1, 0.3, 0.5));
const tetrahedron1 = prepareBindGroup(Tetrahedron([0, 1, 0], 0.3, 0.5, 0.1));
const tetrahedron2 = prepareBindGroup(Tetrahedron([0, 0, 1], 0.5, 0.1, 0.3));
// render
const render = (t) => {
// update uniform values with queue.writeBuffer
lookAt[0] = lookAt[1] = 6 * Math.sin(Math.PI * t / 720) / (2 ** 0.5);
lookAt[2] = -6 * Math.cos(Math.PI * t / 720);
tetrahedron0.modelView[12] = tetrahedron1.modelView[13] = tetrahedron2[14] = 3 * Math.sin(Math.PI * t / 180);
device.queue.writeBuffer(lookAtBuffer, 0, lookAt.buffer);
device.queue.writeBuffer(tetrahedron0.modelViewBuffer, 0, tetrahedron0.modelView.buffer);
device.queue.writeBuffer(tetrahedron1.modelViewBuffer, 0, tetrahedron1.modelView.buffer);
device.queue.writeBuffer(tetrahedron2.modelViewBuffer, 0, tetrahedron2.modelView.buffer);
// NOTE: must getCurrentTexture().createView() everytime for animation updating
const resolveTarget = gpu.getCurrentTexture().createView();
const colorAttachment = {view: msaaView, resolveTarget, loadOp: "clear", clearValue: {r: 0, g: 0, b:0, a: 0.5}, storeOp: "discard"};
const renderPass = {colorAttachments: [colorAttachment], depthStencilAttachment};
const commandEncoder = device.createCommandEncoder();
const passEncoder = commandEncoder.beginRenderPass(renderPass);
passEncoder.setPipeline(pipeline);
passEncoder.setBindGroup(0, bindGroup);
passEncoder.setBindGroup(1, tetrahedron0.bindGroup);
passEncoder.setVertexBuffer(0, tetrahedron0.vertexBuffer);
passEncoder.draw(tetrahedron0.count, 1);
passEncoder.setBindGroup(1, tetrahedron1.bindGroup);
passEncoder.setVertexBuffer(0, tetrahedron1.vertexBuffer);
passEncoder.draw(tetrahedron1.count, 1);
passEncoder.setBindGroup(1, tetrahedron2.bindGroup);
passEncoder.setVertexBuffer(0, tetrahedron2.vertexBuffer);
passEncoder.draw(tetrahedron2.count, 1);
passEncoder.end();
device.queue.submit([commandEncoder.finish()]);
};
(function loop(t) {
render(t);
requestAnimationFrame(() => loop((t + 1) % 1440));
})(0);
@bellbind
Copy link
Author

bellbind commented Mar 6, 2022

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment