Skip to content

Instantly share code, notes, and snippets.

@greggman
Last active July 12, 2024 08:57
Show Gist options
  • Save greggman/19d21672ec057eb688a9a43f687deb42 to your computer and use it in GitHub Desktop.
Save greggman/19d21672ec057eb688a9a43f687deb42 to your computer and use it in GitHub Desktop.
WebGPU Cube (with depth texture visualization - as texture_2d<f32>)

WebGPU Cube (with depth texture visualization - as texture_2d)

view on jsgist

html, body { margin: 0; height: 100% }
canvas { width: 100%; height: 100%; display: block; }
#fail {
position: fixed;
left: 0;
top: 0;
width: 100%;
height: 100%;
display: flex;
justify-content: center;
align-items: center;
background: red;
color: white;
font-weight: bold;
font-family: monospace;
font-size: 16pt;
text-align: center;
}
<canvas></canvas>
<div id="fail" style="display: none">
<div class="content"></div>
</div>
// WebGPU Cube
/* global GPUBufferUsage */
/* global GPUTextureUsage */
import {vec3, mat4} from 'https://webgpufundamentals.org/3rdparty/wgpu-matrix.module.js';
async function main() {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
fail('need webgpu');
return;
}
const canvas = document.querySelector('canvas');
const context = canvas.getContext('webgpu');
const presentationFormat = navigator.gpu.getPreferredCanvasFormat(adapter);
context.configure({
alphaMode: "opaque",
format: presentationFormat,
device,
});
const litShaderModule = device.createShaderModule({code: `
struct VSUniforms {
worldViewProjection: mat4x4f,
worldInverseTranspose: mat4x4f,
};
@group(0) @binding(0) var<uniform> vsUniforms: VSUniforms;
struct MyVSInput {
@location(0) position: vec4f,
@location(1) normal: vec3f,
@location(2) texcoord: vec2f,
};
struct MyVSOutput {
@builtin(position) position: vec4f,
@location(0) normal: vec3f,
@location(1) texcoord: vec2f,
};
@vertex
fn myVSMain(v: MyVSInput) -> MyVSOutput {
var vsOut: MyVSOutput;
vsOut.position = vsUniforms.worldViewProjection * v.position;
vsOut.normal = (vsUniforms.worldInverseTranspose * vec4f(v.normal, 0.0)).xyz;
vsOut.texcoord = v.texcoord;
return vsOut;
}
struct FSUniforms {
lightDirection: vec3f,
};
@group(0) @binding(1) var<uniform> fsUniforms: FSUniforms;
@group(0) @binding(2) var diffuseSampler: sampler;
@group(0) @binding(3) var diffuseTexture: texture_2d<f32>;
@fragment
fn myFSMain(v: MyVSOutput) -> @location(0) vec4f {
var diffuseColor = textureSample(diffuseTexture, diffuseSampler, v.texcoord);
var a_normal = normalize(v.normal);
var l = dot(a_normal, fsUniforms.lightDirection) * 0.5 + 0.5;
return vec4f(diffuseColor.rgb * l, diffuseColor.a);
}
`});
const planeShaderModule = device.createShaderModule({code: `
struct Uniforms {
matrix: mat4x4f,
};
struct MyVSOutput {
@builtin(position) position: vec4f,
@location(1) texcoord: vec2f,
};
@vertex
fn myVSMain(@builtin(vertex_index) vNdx: u32) -> MyVSOutput {
let points = array(vec2f(0, 0), vec2f(0, 1), vec2f(1, 0), vec2f(1, 1));
var vsOut: MyVSOutput;
let p = points[vNdx];
vsOut.position = uni.matrix * vec4f(p, 0, 1);
vsOut.texcoord = p;
return vsOut;
}
@group(0) @binding(0) var<uniform> uni: Uniforms;
@group(0) @binding(1) var diffuseSampler: sampler;
@group(0) @binding(2) var diffuseTexture: texture_2d<f32>;
@fragment
fn myFSMain(v: MyVSOutput) -> @location(0) vec4f {
return textureSample(diffuseTexture, diffuseSampler, v.texcoord);
}
`});
const vUniformBufferSize = 2 * 16 * 4; // 2 mat4s * 16 floats per mat * 4 bytes per float
const fUniformBufferSize = 3 * 4 + 4; // 1 vec3 * 3 floats per vec3 * 4 bytes per float + pad
const vsUniformBuffer = device.createBuffer({
size: vUniformBufferSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
const fsUniformBuffer = device.createBuffer({
size: fUniformBufferSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
const vsUniformValues = new Float32Array(vUniformBufferSize / 4); // 2 mat4s
const worldViewProjection = vsUniformValues.subarray(0, 16);
const worldInverseTranspose = vsUniformValues.subarray(16, 32);
const fsUniformValues = new Float32Array(fUniformBufferSize / 4); // 1 vec3
const lightDirection = fsUniformValues.subarray(0, 3);
const planeUniformBuffer = device.createBuffer({
size: 64,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
const planeUniformValues = new Float32Array(planeUniformBuffer.size / 4);
const matrix = planeUniformValues.subarray(0, 16);
function createBuffer(device, data, usage) {
const buffer = device.createBuffer({
size: data.byteLength,
usage,
mappedAtCreation: true,
});
const dst = new data.constructor(buffer.getMappedRange());
dst.set(data);
buffer.unmap();
return buffer;
}
const positions = new Float32Array([1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1]);
const normals = new Float32Array([1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1]);
const texcoords = new Float32Array([1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1]);
const indices = new Uint16Array([0, 1, 2, 0, 2, 3, 4, 5, 6, 4, 6, 7, 8, 9, 10, 8, 10, 11, 12, 13, 14, 12, 14, 15, 16, 17, 18, 16, 18, 19, 20, 21, 22, 20, 22, 23]);
const positionBuffer = createBuffer(device, positions, GPUBufferUsage.VERTEX);
const normalBuffer = createBuffer(device, normals, GPUBufferUsage.VERTEX);
const texcoordBuffer = createBuffer(device, texcoords, GPUBufferUsage.VERTEX);
const indicesBuffer = createBuffer(device, indices, GPUBufferUsage.INDEX);
const tex = device.createTexture({
size: [2, 2, 1],
format: 'rgba8unorm',
usage:
GPUTextureUsage.TEXTURE_BINDING |
GPUTextureUsage.COPY_DST,
});
device.queue.writeTexture(
{ texture: tex },
new Uint8Array([
255, 255, 128, 255,
128, 255, 255, 255,
255, 128, 255, 255,
255, 128, 128, 255,
]),
{ bytesPerRow: 8, rowsPerImage: 2 },
{ width: 2, height: 2 },
);
const sampler = device.createSampler({
magFilter: 'nearest',
minFilter: 'nearest',
});
const litPipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module: litShaderModule,
buffers: [
// position
{
arrayStride: 3 * 4, // 3 floats, 4 bytes each
attributes: [
{shaderLocation: 0, offset: 0, format: 'float32x3'},
],
},
// normals
{
arrayStride: 3 * 4, // 3 floats, 4 bytes each
attributes: [
{shaderLocation: 1, offset: 0, format: 'float32x3'},
],
},
// texcoords
{
arrayStride: 2 * 4, // 2 floats, 4 bytes each
attributes: [
{shaderLocation: 2, offset: 0, format: 'float32x2',},
],
},
],
},
fragment: {
module: litShaderModule,
targets: [
{format: presentationFormat},
],
},
primitive: {
topology: 'triangle-list',
cullMode: 'back',
},
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth24plus',
},
});
const ufBindGroupLayout = device.createBindGroupLayout({
label: 'unfilterable-bgl',
entries: [
{
binding: 0,
visibility: GPUShaderStage.VERTEX,
buffer: {
minBindingSize: 4 * 4 * 4,
},
},
{
binding: 1,
visibility: GPUShaderStage.FRAGMENT,
sampler: {
type: "non-filtering", // <---------
},
},
{
binding: 2,
visibility: GPUShaderStage.FRAGMENT,
texture: {
sampleType: "unfilterable-float", // <---------
},
},
],
});
const planePipelineLayout = device.createPipelineLayout({
bindGroupLayouts: [ufBindGroupLayout],
})
const planePipeline = device.createRenderPipeline({
layout: planePipelineLayout,
vertex: {
module: planeShaderModule,
},
fragment: {
module: planeShaderModule,
targets: [
{format: presentationFormat},
],
},
primitive: {
topology: 'triangle-strip',
},
});
const depthSampler = device.createSampler({
magFilter: 'nearest',
minFilter: 'nearest',
});
const cubeBindGroup = device.createBindGroup({
layout: litPipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: vsUniformBuffer } },
{ binding: 1, resource: { buffer: fsUniformBuffer } },
{ binding: 2, resource: sampler },
{ binding: 3, resource: tex.createView() },
],
});
const renderPassDescriptor = {
colorAttachments: [
{
// view: undefined, // Assigned later
clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
loadOp: 'clear',
storeOp: 'store',
},
],
depthStencilAttachment: {
// view: undefined, // Assigned later
depthClearValue: 1.0,
depthLoadOp: 'clear',
depthStoreOp: 'store',
},
};
// We need a separate render pass because we're rendering to the depth
// texture in the first pass.
const depthRenderPassDescriptor = {
colorAttachments: [
{
// view: undefined, // Assigned later
loadOp: 'load',
storeOp: 'store',
},
],
};
function resizeToDisplaySize(device, canvas) {
const width = Math.min(device.limits.maxTextureDimension2D, canvas.clientWidth);
const height = Math.min(device.limits.maxTextureDimension2D, canvas.clientHeight);
const needResize = width !== canvas.width || height !== canvas.height;
if (needResize) {
canvas.width = width;
canvas.height = height;
}
return needResize;
}
let depthTexture;
let planeBindGroup;
function render(time) {
time *= 0.001;
resizeToDisplaySize(device, canvas);
const projection = mat4.perspective(30 * Math.PI / 180, canvas.clientWidth / canvas.clientHeight, 3, 8);
const eye = [1, 4, -6];
const target = [0, 0, 0];
const up = [0, 1, 0];
const view = mat4.lookAt(eye, target, up);
const viewProjection = mat4.multiply(projection, view);
const world = mat4.rotationY(time);
mat4.transpose(mat4.inverse(world), worldInverseTranspose);
mat4.multiply(viewProjection, world, worldViewProjection);
vec3.normalize([1, 8, -10], lightDirection);
device.queue.writeBuffer(vsUniformBuffer, 0, vsUniformValues);
device.queue.writeBuffer(fsUniformBuffer, 0, fsUniformValues);
const canvasTexture = context.getCurrentTexture();;
if (!depthTexture || depthTexture.width !== canvasTexture.width || depthTexture.height !== canvasTexture.height) {
depthTexture?.destroy();
depthTexture = device.createTexture({
size: canvasTexture, // canvasTexture has width, height, and depthOrArrayLayers properties
format: 'depth24plus',
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
});
planeBindGroup = device.createBindGroup({
layout: ufBindGroupLayout,
entries: [
{ binding: 0, resource: { buffer: planeUniformBuffer } },
{ binding: 1, resource: depthSampler },
{ binding: 2, resource: depthTexture.createView() },
],
});
}
const colorTexture = context.getCurrentTexture();
renderPassDescriptor.colorAttachments[0].view = colorTexture.createView();
renderPassDescriptor.depthStencilAttachment.view = depthTexture.createView();
depthRenderPassDescriptor.colorAttachments[0].view = colorTexture.createView();
const encoder = device.createCommandEncoder();
{
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.setPipeline(litPipeline);
pass.setBindGroup(0, cubeBindGroup);
pass.setVertexBuffer(0, positionBuffer);
pass.setVertexBuffer(1, normalBuffer);
pass.setVertexBuffer(2, texcoordBuffer);
pass.setIndexBuffer(indicesBuffer, 'uint16');
pass.drawIndexed(indices.length);
pass.end();
}
mat4.ortho(0, canvas.width, canvas.height, 0, -1, 1, matrix);
mat4.translate(matrix, [0, canvas.height * 3 / 4, 0], matrix);
mat4.scale(matrix, [canvas.width / 4, canvas.height / 4, 1], matrix);
device.queue.writeBuffer(planeUniformBuffer, 0, planeUniformValues);
{
const pass = encoder.beginRenderPass(depthRenderPassDescriptor);
pass.setPipeline(planePipeline);
pass.setBindGroup(0, planeBindGroup);
pass.draw(4);
pass.end();
}
device.queue.submit([encoder.finish()]);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
function fail(msg) {
const elem = document.querySelector('#fail');
const contentElem = elem.querySelector('.content');
elem.style.display = '';
contentElem.textContent = msg;
}
main();
{"name":"WebGPU Cube (with depth texture visualization - as texture_2d<f32>)","settings":{},"filenames":["index.html","index.css","index.js"]}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment