|
import canvasSketch from "canvas-sketch"; |
|
import fastpng from "fast-png"; |
|
|
|
const settings = { |
|
dimensions: [1024, 1024], |
|
}; |
|
|
|
const sketch = ({ context, data }) => { |
|
const { device } = data; |
|
|
|
const presentationFormat = navigator.gpu.getPreferredCanvasFormat(); |
|
context.configure({ |
|
device, |
|
format: presentationFormat, |
|
alphaMode: "opaque", |
|
}); |
|
|
|
const shaderCode = ` |
|
struct VertexOut { |
|
@builtin(position) position : vec4<f32>, |
|
@location(0) @interpolate(linear) color : vec4<f32>, |
|
}; |
|
|
|
@vertex |
|
fn vs_main(@builtin(vertex_index) in_vertex_index: u32) -> VertexOut { |
|
let x = f32(i32(in_vertex_index) - 1); |
|
let y = f32(i32(in_vertex_index & 1u) * 2 - 1); |
|
|
|
var output : VertexOut; |
|
output.position = vec4<f32>(x, y, 0.0, 1.0); |
|
output.color = vec4<f32>(x*0.5+0.5, y*0.5+0.5, 1.0, 1.0); |
|
return output; |
|
} |
|
|
|
@fragment |
|
fn fs_main(fragData: VertexOut) -> @location(0) vec4<f32> { |
|
return vec4<f32>(fragData.color.r, fragData.color.g, fragData.color.b, 1.0); |
|
} |
|
`; |
|
|
|
const shaderModule = device.createShaderModule({ |
|
code: shaderCode, |
|
}); |
|
|
|
const pipeline = device.createRenderPipeline({ |
|
layout: "auto", |
|
vertex: { |
|
module: shaderModule, |
|
entryPoint: "vs_main", |
|
}, |
|
fragment: { |
|
module: shaderModule, |
|
entryPoint: "fs_main", |
|
targets: [ |
|
{ |
|
format: presentationFormat, |
|
}, |
|
], |
|
}, |
|
}); |
|
|
|
return () => { |
|
const commandEncoder = device.createCommandEncoder(); |
|
const textureView = context.getCurrentTexture().createView(); |
|
|
|
const renderPassDescriptor = { |
|
colorAttachments: [ |
|
{ |
|
view: textureView, |
|
clearValue: { |
|
r: 0, |
|
g: 0, |
|
b: 0, |
|
a: 1, |
|
}, |
|
loadOp: "clear", |
|
storeOp: "store", |
|
}, |
|
], |
|
}; |
|
|
|
const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor); |
|
passEncoder.setPipeline(pipeline); |
|
passEncoder.draw(3, 1, 0, 0); |
|
passEncoder.end(); |
|
device.queue.submit([commandEncoder.finish()]); |
|
}; |
|
}; |
|
|
|
// canvas-sketch runner, agnostic to web & deno |
|
(async () => { |
|
const isWeb = typeof Deno === "undefined"; |
|
const adapter = await navigator.gpu.requestAdapter(); |
|
const device = await adapter.requestDevice(); |
|
|
|
const createDenoContext = () => { |
|
// following does not exist in deno |
|
navigator.gpu.getPreferredCanvasFormat = () => "rgba8unorm-srgb"; |
|
|
|
// add some other utils... |
|
const canvas = { |
|
toBuffer: async () => { |
|
const dimensions = { |
|
width: canvas.width, |
|
height: canvas.height, |
|
}; |
|
const encoder = device.createCommandEncoder(); |
|
copyToBuffer(encoder, texture, outputBuffer, dimensions); |
|
device.queue.submit([encoder.finish()]); |
|
const pixels = await createPixelBuffer(outputBuffer, dimensions); |
|
return fastpng.encode({ |
|
...dimensions, |
|
data: pixels, |
|
}); |
|
}, |
|
}; |
|
|
|
let texture, outputBuffer; |
|
const context = { |
|
getCurrentTexture() { |
|
if (!texture) { |
|
const r = createCapture(device, canvas); |
|
texture = r.texture; |
|
outputBuffer = r.outputBuffer; |
|
} |
|
return texture; |
|
}, |
|
canvas, |
|
configure: () => {}, |
|
}; |
|
return { |
|
canvas, |
|
context, |
|
}; |
|
}; |
|
|
|
const env = { |
|
...(isWeb |
|
? { |
|
context: "webgpu", |
|
} |
|
: createDenoContext()), |
|
data: { |
|
adapter, |
|
device, |
|
}, |
|
}; |
|
|
|
const manager = await canvasSketch(sketch, { |
|
...settings, |
|
...env, |
|
}); |
|
|
|
if (!isWeb) { |
|
const buffer = await manager.props.canvas.toBuffer(); |
|
Deno.writeFileSync("test.png", buffer); |
|
} |
|
})(); |
|
|
|
/////////////////// |
|
////// Utils ////// |
|
/////////////////// |
|
|
|
function getRowPadding(width) { |
|
// It is a webgpu requirement that BufferCopyView.layout.bytes_per_row % COPY_BYTES_PER_ROW_ALIGNMENT(256) == 0 |
|
// So we calculate padded_bytes_per_row by rounding unpadded_bytes_per_row |
|
// up to the next multiple of COPY_BYTES_PER_ROW_ALIGNMENT. |
|
// https://en.wikipedia.org/wiki/Data_structure_alignment#Computing_padding |
|
const bytesPerPixel = 4; |
|
const unpaddedBytesPerRow = width * bytesPerPixel; |
|
const align = 256; |
|
const paddedBytesPerRowPadding = |
|
(align - (unpaddedBytesPerRow % align)) % align; |
|
const paddedBytesPerRow = unpaddedBytesPerRow + paddedBytesPerRowPadding; |
|
|
|
return { |
|
unpadded: unpaddedBytesPerRow, |
|
padded: paddedBytesPerRow, |
|
}; |
|
} |
|
|
|
function createCapture(device, dimensions) { |
|
const { padded } = getRowPadding(dimensions.width); |
|
const outputBuffer = device.createBuffer({ |
|
label: "Capture", |
|
size: padded * dimensions.height, |
|
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST, |
|
}); |
|
const texture = device.createTexture({ |
|
label: "Capture", |
|
size: dimensions, |
|
format: "rgba8unorm-srgb", |
|
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC, |
|
}); |
|
|
|
return { outputBuffer, texture }; |
|
} |
|
|
|
function copyToBuffer(encoder, texture, outputBuffer, dimensions) { |
|
const { padded } = getRowPadding(dimensions.width); |
|
|
|
encoder.copyTextureToBuffer( |
|
{ |
|
texture, |
|
}, |
|
{ |
|
buffer: outputBuffer, |
|
bytesPerRow: padded, |
|
rowsPerImage: 0, |
|
}, |
|
dimensions |
|
); |
|
} |
|
|
|
async function createPixelBuffer(buffer, dimensions) { |
|
await buffer.mapAsync(1); |
|
const inputBuffer = new Uint8Array(buffer.getMappedRange()); |
|
const { padded, unpadded } = getRowPadding(dimensions.width); |
|
const outputBuffer = new Uint8Array(unpadded * dimensions.height); |
|
for (let i = 0; i < dimensions.height; i++) { |
|
const slice = inputBuffer |
|
.slice(i * padded, (i + 1) * padded) |
|
.slice(0, unpadded); |
|
outputBuffer.set(slice, i * unpadded); |
|
} |
|
buffer.unmap(); |
|
return outputBuffer; |
|
} |