Last active
January 30, 2024 03:05
-
-
Save greggman/1065f8cf32e26ea5723a405b192c1f8c to your computer and use it in GitHub Desktop.
WebGPU Cube using 32 textures (16 per stage)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
html, body { margin: 0; height: 100% } | |
canvas { width: 100%; height: 100%; display: block; } | |
#fail { | |
position: fixed; | |
left: 0; | |
top: 0; | |
width: 100%; | |
height: 100%; | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
background: red; | |
color: white; | |
font-weight: bold; | |
font-family: monospace; | |
font-size: 16pt; | |
text-align: center; | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<canvas></canvas> | |
<div id="fail" style="display: none"> | |
<div class="content"></div> | |
</div> | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// WebGPU Cube | |
// from http://localhost:8080/webgpu/webgpu-cube.html | |
/* global GPUBufferUsage */ | |
/* global GPUTextureUsage */ | |
import {vec3, mat4} from 'https://webgpufundamentals.org/3rdparty/wgpu-matrix.module.js'; | |
const range = (i, fn) => new Array(i).fill(0).map((_, i) => fn(i)); | |
async function main() { | |
const adapter = await navigator.gpu?.requestAdapter(); | |
const device = await adapter?.requestDevice(); | |
if (!device) { | |
fail('need webgpu'); | |
return; | |
} | |
console.log(device.limits.maxSampledTexturesPerShaderStage); | |
const canvas = document.querySelector('canvas'); | |
const context = canvas.getContext('webgpu'); | |
const presentationFormat = navigator.gpu.getPreferredCanvasFormat(adapter); | |
const presentationSize = [300, 150]; // default canvas size | |
context.configure({ | |
alphaMode: "opaque", | |
format: presentationFormat, | |
device, | |
}); | |
const canvasInfo = { | |
canvas, | |
context, | |
presentationSize, | |
presentationFormat, | |
// these are filled out in resizeToDisplaySize | |
renderTarget: undefined, | |
renderTargetView: undefined, | |
depthTexture: undefined, | |
depthTextureView: undefined, | |
sampleCount: 4, // can be 1 or 4 | |
}; | |
const shaderSrc = ` | |
struct VSUniforms { | |
worldViewProjection: mat4x4<f32>, | |
worldInverseTranspose: mat4x4<f32>, | |
}; | |
@group(0) @binding(0) var<uniform> vsUniforms: VSUniforms; | |
${range(device.limits.maxSampledTexturesPerShaderStage, i => `@group(0) @binding(${2 + i}) var diffuseTextureVS${i}: texture_2d<f32>;`).join('\n')}; | |
struct MyVSInput { | |
@location(0) position: vec4<f32>, | |
@location(1) normal: vec3<f32>, | |
@location(2) texcoord: vec2<f32>, | |
}; | |
struct MyVSOutput { | |
@builtin(position) position: vec4<f32>, | |
@location(0) normal: vec3<f32>, | |
@location(1) texcoord: vec2<f32>, | |
@location(2) diffuse: vec4f, | |
}; | |
@vertex | |
fn myVSMain(v: MyVSInput) -> MyVSOutput { | |
var vsOut: MyVSOutput; | |
vsOut.position = vsUniforms.worldViewProjection * v.position; | |
vsOut.normal = (vsUniforms.worldInverseTranspose * vec4<f32>(v.normal, 0.0)).xyz; | |
vsOut.texcoord = v.texcoord; | |
var diffuseColor = vec4f(0); | |
${range(device.limits.maxSampledTexturesPerShaderStage, i => `diffuseColor += textureLoad(diffuseTextureVS${i}, vec2u(0), 0);`).join('\n')}; | |
diffuseColor /= f32(${device.limits.maxSampledTexturesPerShaderStage}); | |
vsOut.diffuse = diffuseColor; | |
return vsOut; | |
} | |
struct FSUniforms { | |
lightDirection: vec3<f32>, | |
}; | |
@group(1) @binding(0) var<uniform> fsUniforms: FSUniforms; | |
@group(1) @binding(1) var diffuseSampler: sampler; | |
${range(device.limits.maxSampledTexturesPerShaderStage, i => `@group(1) @binding(${2 + i}) var diffuseTexture${i}: texture_2d<f32>;`).join('\n')}; | |
@fragment | |
fn myFSMain(v: MyVSOutput) -> @location(0) vec4<f32> { | |
var diffuseColor = vec4f(0); | |
${range(device.limits.maxSampledTexturesPerShaderStage, i => `diffuseColor += textureSample(diffuseTexture${i}, diffuseSampler, v.texcoord);`).join('\n')}; | |
diffuseColor /= f32(${device.limits.maxSampledTexturesPerShaderStage}); | |
var a_normal = normalize(v.normal); | |
var l = dot(a_normal, fsUniforms.lightDirection) * 0.5 + 0.5; | |
return vec4<f32>(diffuseColor.rgb * l, diffuseColor.a); | |
} | |
`; | |
const shaderModule = device.createShaderModule({code: shaderSrc}); | |
const vUniformBufferSize = 2 * 16 * 4; // 2 mat4s * 16 floats per mat * 4 bytes per float | |
const fUniformBufferSize = 3 * 4 + 4; // 1 vec3 * 3 floats per vec3 * 4 bytes per float + pad | |
const vsUniformBuffer = device.createBuffer({ | |
size: vUniformBufferSize, | |
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, | |
}); | |
const fsUniformBuffer = device.createBuffer({ | |
size: fUniformBufferSize, | |
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, | |
}); | |
const vsUniformValues = new Float32Array(vUniformBufferSize / 4); // 2 mat4s | |
const worldViewProjection = vsUniformValues.subarray(0, 16); | |
const worldInverseTranspose = vsUniformValues.subarray(16, 32); | |
const fsUniformValues = new Float32Array(fUniformBufferSize / 4); // 1 vec3 | |
const lightDirection = fsUniformValues.subarray(0, 3); | |
function createBuffer(device, data, usage) { | |
const buffer = device.createBuffer({ | |
size: data.byteLength, | |
usage, | |
mappedAtCreation: true, | |
}); | |
const dst = new data.constructor(buffer.getMappedRange()); | |
dst.set(data); | |
buffer.unmap(); | |
return buffer; | |
} | |
const positions = new Float32Array([1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1]); | |
const normals = new Float32Array([1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1]); | |
const texcoords = new Float32Array([1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1]); | |
const indices = new Uint16Array([0, 1, 2, 0, 2, 3, 4, 5, 6, 4, 6, 7, 8, 9, 10, 8, 10, 11, 12, 13, 14, 12, 14, 15, 16, 17, 18, 16, 18, 19, 20, 21, 22, 20, 22, 23]); | |
const positionBuffer = createBuffer(device, positions, GPUBufferUsage.VERTEX); | |
const normalBuffer = createBuffer(device, normals, GPUBufferUsage.VERTEX); | |
const texcoordBuffer = createBuffer(device, texcoords, GPUBufferUsage.VERTEX); | |
const indicesBuffer = createBuffer(device, indices, GPUBufferUsage.INDEX); | |
const texs = range(device.limits.maxSampledTexturesPerShaderStage * 2, i => { | |
const tex = device.createTexture({ | |
size: [2, 2, 1], | |
format: 'rgba8unorm', | |
usage: | |
GPUTextureUsage.TEXTURE_BINDING | | |
GPUTextureUsage.COPY_DST, | |
}); | |
device.queue.writeTexture( | |
{ texture: tex }, | |
new Uint8Array([ | |
255, 255, 128, 255, | |
128, 255, 255, 255, | |
255, 128, 255, 255, | |
255, 128, 128, 255, | |
]), | |
{ bytesPerRow: 8, rowsPerImage: 2 }, | |
{ width: 2, height: 2 }, | |
); | |
return tex; | |
}); | |
const sampler = device.createSampler({ | |
magFilter: 'nearest', | |
minFilter: 'nearest', | |
}); | |
const pipeline = device.createRenderPipeline({ | |
layout: 'auto', | |
vertex: { | |
module: shaderModule, | |
entryPoint: 'myVSMain', | |
buffers: [ | |
// position | |
{ | |
arrayStride: 3 * 4, // 3 floats, 4 bytes each | |
attributes: [ | |
{shaderLocation: 0, offset: 0, format: 'float32x3'}, | |
], | |
}, | |
// normals | |
{ | |
arrayStride: 3 * 4, // 3 floats, 4 bytes each | |
attributes: [ | |
{shaderLocation: 1, offset: 0, format: 'float32x3'}, | |
], | |
}, | |
// texcoords | |
{ | |
arrayStride: 2 * 4, // 2 floats, 4 bytes each | |
attributes: [ | |
{shaderLocation: 2, offset: 0, format: 'float32x2',}, | |
], | |
}, | |
], | |
}, | |
fragment: { | |
module: shaderModule, | |
entryPoint: 'myFSMain', | |
targets: [ | |
{format: presentationFormat}, | |
], | |
}, | |
primitive: { | |
topology: 'triangle-list', | |
cullMode: 'back', | |
}, | |
depthStencil: { | |
depthWriteEnabled: true, | |
depthCompare: 'less', | |
format: 'depth24plus', | |
}, | |
...(canvasInfo.sampleCount > 1 && { | |
multisample: { | |
count: canvasInfo.sampleCount, | |
}, | |
}), | |
}); | |
const bindGroupVS = device.createBindGroup({ | |
layout: pipeline.getBindGroupLayout(0), | |
entries: [ | |
{ binding: 0, resource: { buffer: vsUniformBuffer } }, | |
...range(device.limits.maxSampledTexturesPerShaderStage, i => | |
({ binding: 2 + i, resource: texs[i + device.limits.maxSampledTexturesPerShaderStage].createView() })), | |
], | |
}); | |
const bindGroupFS = device.createBindGroup({ | |
layout: pipeline.getBindGroupLayout(1), | |
entries: [ | |
{ binding: 0, resource: { buffer: fsUniformBuffer } }, | |
{ binding: 1, resource: sampler }, | |
...range(device.limits.maxSampledTexturesPerShaderStage, i => | |
({ binding: 2 + i, resource: texs[i].createView() })), | |
], | |
}); | |
const renderPassDescriptor = { | |
colorAttachments: [ | |
{ | |
// view: undefined, // Assigned later | |
// resolveTarget: undefined, // Assigned Later | |
clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 }, | |
loadOp: 'clear', | |
storeOp: 'store', | |
}, | |
], | |
depthStencilAttachment: { | |
// view: undefined, // Assigned later | |
depthClearValue: 1.0, | |
depthLoadOp: 'clear', | |
depthStoreOp: 'store', | |
}, | |
}; | |
function resizeToDisplaySize(device, canvasInfo) { | |
const { | |
canvas, | |
context, | |
renderTarget, | |
presentationSize, | |
presentationFormat, | |
depthTexture, | |
sampleCount, | |
} = canvasInfo; | |
const width = Math.min(device.limits.maxTextureDimension2D, canvas.clientWidth); | |
const height = Math.min(device.limits.maxTextureDimension2D, canvas.clientHeight); | |
const needResize = !canvasInfo.renderTarget || | |
width !== presentationSize[0] || | |
height !== presentationSize[1]; | |
if (needResize) { | |
if (renderTarget) { | |
renderTarget.destroy(); | |
} | |
if (depthTexture) { | |
depthTexture.destroy(); | |
} | |
canvas.width = width; | |
canvas.height = height; | |
presentationSize[0] = width; | |
presentationSize[1] = height; | |
if (sampleCount > 1) { | |
const newRenderTarget = device.createTexture({ | |
size: presentationSize, | |
format: presentationFormat, | |
sampleCount, | |
usage: GPUTextureUsage.RENDER_ATTACHMENT, | |
}); | |
canvasInfo.renderTarget = newRenderTarget; | |
canvasInfo.renderTargetView = newRenderTarget.createView(); | |
} | |
const newDepthTexture = device.createTexture({ | |
size: presentationSize, | |
format: 'depth24plus', | |
sampleCount, | |
usage: GPUTextureUsage.RENDER_ATTACHMENT, | |
}); | |
canvasInfo.depthTexture = newDepthTexture; | |
canvasInfo.depthTextureView = newDepthTexture.createView(); | |
} | |
return needResize; | |
} | |
function render(time) { | |
time *= 0.001; | |
resizeToDisplaySize(device, canvasInfo); | |
const projection = mat4.perspective(30 * Math.PI / 180, canvas.clientWidth / canvas.clientHeight, 0.5, 10); | |
const eye = [1, 4, -6]; | |
const target = [0, 0, 0]; | |
const up = [0, 1, 0]; | |
const view = mat4.lookAt(eye, target, up); | |
const viewProjection = mat4.multiply(projection, view); | |
const world = mat4.rotationY(time); | |
mat4.transpose(mat4.inverse(world), worldInverseTranspose); | |
mat4.multiply(viewProjection, world, worldViewProjection); | |
vec3.normalize([1, 8, -10], lightDirection); | |
device.queue.writeBuffer(vsUniformBuffer, 0, vsUniformValues); | |
device.queue.writeBuffer(fsUniformBuffer, 0, fsUniformValues); | |
if (canvasInfo.sampleCount === 1) { | |
const colorTexture = context.getCurrentTexture(); | |
renderPassDescriptor.colorAttachments[0].view = colorTexture.createView(); | |
} else { | |
renderPassDescriptor.colorAttachments[0].view = canvasInfo.renderTargetView; | |
renderPassDescriptor.colorAttachments[0].resolveTarget = context.getCurrentTexture().createView(); | |
} | |
renderPassDescriptor.depthStencilAttachment.view = canvasInfo.depthTextureView; | |
const commandEncoder = device.createCommandEncoder(); | |
const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor); | |
passEncoder.setPipeline(pipeline); | |
passEncoder.setBindGroup(0, bindGroupVS); | |
passEncoder.setBindGroup(1, bindGroupFS); | |
passEncoder.setVertexBuffer(0, positionBuffer); | |
passEncoder.setVertexBuffer(1, normalBuffer); | |
passEncoder.setVertexBuffer(2, texcoordBuffer); | |
passEncoder.setIndexBuffer(indicesBuffer, 'uint16'); | |
passEncoder.drawIndexed(indices.length); | |
passEncoder.end(); | |
device.queue.submit([commandEncoder.finish()]); | |
requestAnimationFrame(render); | |
} | |
requestAnimationFrame(render); | |
} | |
function fail(msg) { | |
const elem = document.querySelector('#fail'); | |
const contentElem = elem.querySelector('.content'); | |
elem.style.display = ''; | |
contentElem.textContent = msg; | |
} | |
main(); | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{"name":"WebGPU Cube using 32 textures (16 per stage)","settings":{},"filenames":["index.html","index.css","index.js"]} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment