Created
May 22, 2023 07:56
-
-
Save greggman/b2b125ec5dc51345d7d85a3ba0fc666b to your computer and use it in GitHub Desktop.
WebGPU Simple Textured Quad Mipmap (use views with mips)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@import url(https://webgpufundamentals.org/webgpu/resources/webgpu-lesson.css); | |
html, body { | |
margin: 0; /* remove the default margin */ | |
height: 100%; /* make the html,body fill the page */ | |
} | |
canvas { | |
display: block; /* make the canvas act like a block */ | |
width: 100%; /* make the canvas fill its container */ | |
height: 100%; | |
image-rendering: pixelated; | |
image-rendering: crisp-edges; | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<canvas></canvas> | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// WebGPU Simple Textured Quad Mipmap | |
// from https://webgpufundamentals.org/webgpu/webgpu-simple-textured-quad-mipmap.html | |
import GUI from 'https://webgpufundamentals.org/3rdparty/muigui-0.x.module.js'; | |
async function main() { | |
const adapter = await navigator.gpu?.requestAdapter(); | |
const device = await adapter?.requestDevice(); | |
if (!device) { | |
fail('need a browser that supports WebGPU'); | |
return; | |
} | |
// Get a WebGPU context from the canvas and configure it | |
const canvas = document.querySelector('canvas'); | |
canvas.width = 10; | |
canvas.height = 10; | |
const context = canvas.getContext('webgpu'); | |
const presentationFormat = navigator.gpu.getPreferredCanvasFormat(); | |
context.configure({ | |
device, | |
format: presentationFormat, | |
}); | |
const module = device.createShaderModule({ | |
label: 'our hardcoded textured quad shaders', | |
code: ` | |
struct OurVertexShaderOutput { | |
@builtin(position) position: vec4f, | |
@location(0) texcoord: vec2f, | |
}; | |
struct Uniforms { | |
scale: vec2f, | |
offset: vec2f, | |
}; | |
@group(0) @binding(3) var<uniform> uni: Uniforms; | |
@vertex fn vs( | |
@builtin(vertex_index) vertexIndex : u32 | |
) -> OurVertexShaderOutput { | |
var pos = array<vec2f, 6>( | |
// 1st triangle | |
vec2f( 0.0, 0.0), // center | |
vec2f( 1.0, 0.0), // right, center | |
vec2f( 0.0, 1.0), // center, top | |
// 2st triangle | |
vec2f( 0.0, 1.0), // center, top | |
vec2f( 1.0, 0.0), // right, center | |
vec2f( 1.0, 1.0), // right, top | |
); | |
var vsOutput: OurVertexShaderOutput; | |
let xy = pos[vertexIndex]; | |
vsOutput.position = vec4f(xy * uni.scale + uni.offset, 0.0, 1.0); | |
vsOutput.texcoord = xy; | |
return vsOutput; | |
} | |
@group(0) @binding(0) var ourSampler: sampler; | |
@group(0) @binding(1) var ourTexture1: texture_2d<f32>; | |
@group(0) @binding(2) var ourTexture2: texture_2d<f32>; | |
@fragment fn fs(fsInput: OurVertexShaderOutput) -> @location(0) vec4f { | |
let c1 = textureSample(ourTexture1, ourSampler, fsInput.texcoord); | |
let c2 = textureSample(ourTexture2, ourSampler, fsInput.texcoord); | |
return select(c1, c2, i32(fsInput.position.x + fsInput.position.y) % 2 == 1); | |
} | |
`, | |
}); | |
const pipeline = device.createRenderPipeline({ | |
label: 'hardcoded textured quad pipeline', | |
layout: 'auto', | |
vertex: { | |
module, | |
entryPoint: 'vs', | |
}, | |
fragment: { | |
module, | |
entryPoint: 'fs', | |
targets: [{ format: presentationFormat }], | |
}, | |
}); | |
const kTextureWidth = 2; | |
const _ = [255, 0, 0, 255]; // red | |
const y = [255, 255, 0, 255]; // yellow | |
const b = [ 0, 0, 255, 255]; // blue | |
const textureData = new Uint8Array([ | |
y, y, | |
y, y, | |
].flat()); | |
const textureData2 = new Uint8Array([ | |
b, | |
].flat()) | |
const lerp = (a, b, t) => a + (b - a) * t; | |
const mix = (a, b, t) => a.map((v, i) => lerp(v, b[i], t)); | |
const bilinearFilter = (tl, tr, bl, br, t1, t2) => { | |
const t = mix(tl, tr, t1); | |
const b = mix(bl, br, t1); | |
return mix(t, b, t2); | |
}; | |
const createNextMipLevelRgba8Unorm = ({data: src, width: srcWidth, height: srcHeight}) => { | |
// compute the size of the next mip | |
const dstWidth = Math.max(1, srcWidth / 2 | 0); | |
const dstHeight = Math.max(1, srcHeight / 2 | 0); | |
const dst = new Uint8Array(dstWidth * dstHeight * 4); | |
const getSrcPixel = (x, y) => { | |
const offset = (y * srcWidth + x) * 4; | |
return src.subarray(offset, offset + 4); | |
}; | |
for (let y = 0; y < dstHeight; ++y) { | |
for (let x = 0; x < dstWidth; ++x) { | |
// compute texcoord of the center of the destination texel | |
const u = (x + 0.5) / dstWidth; | |
const v = (y + 0.5) / dstHeight; | |
// compute the same texcoord in the source - 0.5 a pixel | |
const au = (u * srcWidth - 0.5); | |
const av = (v * srcHeight - 0.5); | |
// compute the src top left texel coord (not texcoord) | |
const tx = au | 0; | |
const ty = av | 0; | |
// compute the mix amounts between pixels | |
const t1 = au % 1; | |
const t2 = av % 1; | |
// get the 4 pixels | |
const tl = getSrcPixel(tx, ty); | |
const tr = getSrcPixel(tx + 1, ty); | |
const bl = getSrcPixel(tx, ty); | |
const br = getSrcPixel(tx + 1, ty + 1); | |
// copy the "sampled" result into the dest. | |
const dstOffset = (y * dstWidth + x) * 4; | |
dst.set(bilinearFilter(tl, tr, bl, br, t1, t2), dstOffset); | |
} | |
} | |
return { data: dst, width: dstWidth, height: dstHeight }; | |
}; | |
const generateMips = (src, srcWidth) => { | |
const srcHeight = src.length / 4 / srcWidth; | |
// populate with first mip level (base level) | |
let mip = { data: src, width: srcWidth, height: srcHeight, }; | |
const mips = [mip]; | |
while (mip.width > 1 || mip.height > 1) { | |
mip = createNextMipLevelRgba8Unorm(mip); | |
mips.push(mip); | |
} | |
return mips; | |
}; | |
//const mips = generateMips(textureData, kTextureWidth); | |
const mips = [ | |
{ width: 2, height: 2, data: textureData }, | |
{ width: 1, height: 1, data: textureData2 }, | |
]; | |
const texture = device.createTexture({ | |
label: 'yellow F on red', | |
size: [mips[0].width, mips[0].height], | |
mipLevelCount: 2, | |
format: 'rgba8unorm', | |
usage: | |
GPUTextureUsage.TEXTURE_BINDING | | |
GPUTextureUsage.COPY_DST, | |
}); | |
mips.forEach(({data, width, height}, mipLevel) => { | |
device.queue.writeTexture( | |
{ texture, mipLevel }, | |
data, | |
{ bytesPerRow: width * 4 }, | |
{ width, height }, | |
); | |
}); | |
// create a buffer for the uniform values | |
const uniformBufferSize = | |
2 * 4 + // scale is 2 32bit floats (4bytes each) | |
2 * 4; // offset is 2 32bit floats (4bytes each) | |
const uniformBuffer = device.createBuffer({ | |
label: 'uniforms for quad', | |
size: uniformBufferSize, | |
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, | |
}); | |
// create a typedarray to hold the values for the uniforms in JavaScript | |
const uniformValues = new Float32Array(uniformBufferSize / 4); | |
// offsets to the various uniform values in float32 indices | |
const kScaleOffset = 0; | |
const kOffsetOffset = 2; | |
const bindGroups = []; | |
for (let i = 0; i < 16; ++i) { | |
const sampler = device.createSampler({ | |
addressModeU: (i & 1) ? 'repeat' : 'clamp-to-edge', | |
addressModeV: (i & 2) ? 'repeat' : 'clamp-to-edge', | |
magFilter: (i & 4) ? 'linear' : 'nearest', | |
minFilter: (i & 8) ? 'linear' : 'nearest', | |
}); | |
const bindGroup = device.createBindGroup({ | |
layout: pipeline.getBindGroupLayout(0), | |
entries: [ | |
{ binding: 0, resource: sampler }, | |
{ binding: 1, resource: texture.createView({baseMipLevel: 0, mipLevelCount: 1}) }, | |
{ binding: 2, resource: texture.createView({baseMipLevel: 1, mipLevelCount: 1}) }, | |
{ binding: 3, resource: { buffer: uniformBuffer }}, | |
], | |
}); | |
bindGroups.push(bindGroup); | |
} | |
const renderPassDescriptor = { | |
label: 'our basic canvas renderPass', | |
colorAttachments: [ | |
{ | |
// view: <- to be filled out when we render | |
clearValue: [0.3, 0.3, 0.3, 1], | |
loadOp: 'clear', | |
storeOp: 'store', | |
}, | |
], | |
}; | |
const settings = { | |
addressModeU: 'repeat', | |
addressModeV: 'repeat', | |
magFilter: 'linear', | |
minFilter: 'linear', | |
scale: 1, | |
}; | |
const addressOptions = ['repeat', 'clamp-to-edge']; | |
const filterOptions = ['nearest', 'linear']; | |
const gui = new GUI(); | |
Object.assign(gui.domElement.style, {right: '', left: '15px'}); | |
gui.add(settings, 'addressModeU', addressOptions); | |
gui.add(settings, 'addressModeV', addressOptions); | |
gui.add(settings, 'magFilter', filterOptions); | |
gui.add(settings, 'minFilter', filterOptions); | |
gui.add(settings, 'scale', 0.5, 6); | |
function render(time) { | |
time *= 0.001; | |
const ndx = (settings.addressModeU === 'repeat' ? 1 : 0) + | |
(settings.addressModeV === 'repeat' ? 2 : 0) + | |
(settings.magFilter === 'linear' ? 4 : 0) + | |
(settings.minFilter === 'linear' ? 8 : 0); | |
const bindGroup = bindGroups[ndx]; | |
const scaleX = 4 / canvas.width * settings.scale; | |
const scaleY = 4 / canvas.height * settings.scale; | |
uniformValues.set([scaleX, scaleY], kScaleOffset); // set the scale | |
uniformValues.set([Math.sin(time * 0.25) * 0.9, -0.8], kOffsetOffset); // set the scale | |
// copy the values from JavaScript to the GPU | |
device.queue.writeBuffer(uniformBuffer, 0, uniformValues); | |
// Get the current texture from the canvas context and | |
// set it as the texture to render to. | |
renderPassDescriptor.colorAttachments[0].view = | |
context.getCurrentTexture().createView(); | |
const encoder = device.createCommandEncoder({ | |
label: 'render quad encoder', | |
}); | |
const pass = encoder.beginRenderPass(renderPassDescriptor); | |
pass.setPipeline(pipeline); | |
pass.setBindGroup(0, bindGroup); | |
pass.draw(6); // call our vertex shader 6 times | |
pass.end(); | |
const commandBuffer = encoder.finish(); | |
device.queue.submit([commandBuffer]); | |
requestAnimationFrame(render); | |
} | |
requestAnimationFrame(render); | |
const observer = new ResizeObserver(entries => { | |
for (const entry of entries) { | |
const canvas = entry.target; | |
const width = entry.contentBoxSize[0].inlineSize / 64 | 0; | |
const height = entry.contentBoxSize[0].blockSize / 64 | 0; | |
canvas.width = Math.min(width, device.limits.maxTextureDimension2D); | |
canvas.height = Math.min(height, device.limits.maxTextureDimension2D); | |
} | |
}); | |
observer.observe(canvas); | |
} | |
function fail(msg) { | |
// eslint-disable-next-line no-alert | |
alert(msg); | |
} | |
main(); | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{"name":"WebGPU Simple Textured Quad Mipmap (use views with mips)","settings":{},"filenames":["index.html","index.css","index.js"]} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment