Skip to content

Instantly share code, notes, and snippets.

@itamarst
Created February 14, 2024 13:37
Show Gist options
  • Save itamarst/441424cc940f2fde5c5d40e683129d82 to your computer and use it in GitHub Desktop.
Save itamarst/441424cc940f2fde5c5d40e683129d82 to your computer and use it in GitHub Desktop.
wgpu example
from wgpu.utils.compute import compute_with_buffers
from wgpu.utils import get_default_device
import numpy as np
from time import time
# A "shader"" is a program that will run on the GPU:
SHADER = """
@group(0) @binding(0)
var<storage,read> input1: array<i32>;
@group(0) @binding(1)
var<storage,read> input2: array<i32>;
@group(0) @binding(2)
var<storage,read_write> output: array<i32>;
@compute
@workgroup_size(50)
fn main(@builtin(global_invocation_id) index: vec3<u32>) {
let i: u32 = index.x;
output[i] = input1[i] + input2[i];
}
"""
INPUT1 = np.arange(0, 1_000_000, dtype=np.int32)
INPUT2 = np.arange(2, 1_000_002, dtype=np.int32)
assert INPUT1.shape == INPUT2.shape
def run_on_gpu():
# Map from input binding number to the relevant NumPy array:
inputs = {0: INPUT1, 1: INPUT2}
# Map from output binding numbers to the length and type of the
# output. "i" means signed 32-bit integer.
outputs = {2: (len(INPUT1), "i")}
# The workgroup size is 50...
result = compute_with_buffers(
inputs, outputs, SHADER, n=len(INPUT1) // 50
)
# The result maps binding numbers to a memory view.
return np.frombuffer(result[2], np.int32)
def run_on_cpu():
return INPUT1 + INPUT2
def main():
device = get_default_device()
print("GPU:", device._adapter.request_adapter_info()["device"])
# Run once to prep everything:
run_on_gpu()
start = time()
for _ in range(10):
gpu_result = run_on_gpu()
print("GPU mean elapsed:", (time() - start) / 10)
start = time()
for _ in range(10):
cpu_result = run_on_cpu()
print("CPU mean elapsed:", (time() - start) / 10)
assert np.array_equal(gpu_result, cpu_result)
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment