Skip to content

Instantly share code, notes, and snippets.

@Jomy10
Last active December 18, 2023 16:24
Show Gist options
  • Save Jomy10/bbf8812f8311be468ce6b244017aff6a to your computer and use it in GitHub Desktop.
Save Jomy10/bbf8812f8311be468ce6b244017aff6a to your computer and use it in GitHub Desktop.
Some WGPU code, very cool
use winit::{
window::Window,
event::{Event, WindowEvent, self},
event_loop,
};
use wgpu::util::DeviceExt;
use anyhow::Result;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
color: [f32; 3],
}
const VERTICES: &[Vertex] = &[
// triangle:
// Vertex { position: [0.0, 0.5, 0.0], color: [1.0, 0.0, 0.0] },
// Vertex { position: [-0.5, -0.5, 0.0], color: [0.0, 1.0, 0.0] },
// Vertex { position: [0.5, -0.5, 0.0], color: [0.0, 0.0, 1.0] },
Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
struct State {
// GPU
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
surface_config: wgpu::SurfaceConfiguration,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
// Window
size: winit::dpi::PhysicalSize<u32>,
window: Window
}
impl State {
async fn new(window: Window) -> Self {
let size = window.inner_size();
// handle to the GPU -> used to create Adapters and Surfaces
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(), // select backend for platorm
..Default::default()
});
// Safety: surface needs to live as long as the window that created it
// surface is part of the window we draw to
let surface = unsafe { instance.create_surface(&window) }.unwrap();
// hande for actual graphics card
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(), // HighPerformance or LowPower
compatible_surface: Some(&surface),
force_fallback_adapter: false, // forces wgpu to pick an adapter that will work on
// all hardware (software renderer)
},
).await.unwrap(); // replace unwrap: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-surface/#state-new
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: if cfg!(target_arch = "warm32") {
// limits for webgl
wgpu::Limits::downlevel_webgl2_defaults()
} else {
// we don't require extra features
wgpu::Limits::default()
},
label: Some("device")
},
None // trace path
).await.unwrap(); // https://sotrh.github.io/learn-wgpu/beginner/tutorial2-surface/#state-new
let surface_capabilities = surface.get_capabilities(&adapter);
// assuming sRGB surfce!
let surface_format = surface_capabilities.formats.iter()
.copied()
.filter(|f| f.is_srgb())
.next()
.unwrap_or(surface_capabilities.formats[0]);
let surface_config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT, // textures will be used to draw to the screen
format: surface_format, // how textures will be stred n the GPU
width: size.width,
height: size.height,
present_mode: surface_capabilities.present_modes[0], // Fifo ~= VSync
alpha_mode: surface_capabilities.alpha_modes[0],
view_formats: Vec::new() // list of formats that we can use when creating TextureViews
};
surface.configure(&device, &surface_config);
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into())
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[
// how to read the buffer
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress, // Width of a vertex
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3] //&[
// wgpu::VertexAttribute {
// offset: 0,
// shader_location: 0,
// format: wgpu::VertexFormat::Float32x3,
// },
// wgpu::VertexAttribute {
// offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
// shader_location: 1,
// format: wgpu::VertexFormat::Float32x3,
// }
// ]
}
],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
// Color outputs to set up
targets: &[Some(wgpu::ColorTargetState {
format: surface_config.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL, // write to a, r, g & b
})],
}),
// How to interpret vertices when conveting to triangles
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// \/ Require addition features \/
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1, // sample count
mask: !0, // all samples active
alpha_to_coverage_enabled: false, // anti-aliasing
},
multiview: None, // for array textures
});
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsages::INDEX,
}
);
Self {
surface_config,
surface,
device,
queue,
render_pipeline,
vertex_buffer,
index_buffer,
size,
window,
}
}
pub fn window(&self) -> &Window {
&self.window
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 {
self.size = new_size;
self.surface_config.width = new_size.width;
self.surface_config.height = new_size.height;
self.surface.configure(&self.device, &self.surface_config);
}
}
/// Proceses input.
/// Returns true when the event has been fully processed and doesn't need furhter processing by
/// the main event loop
fn input(&mut self, event: &WindowEvent) -> bool {
_ = event;
false
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
// get frame to render to; texture and view to the screen
let output = self.surface.get_current_texture()?;
let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
// Render pass
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
// where we are going to draw our color to
color_attachments: &[
Some(wgpu::RenderPassColorAttachment {
// render to the screen
view: &view,
resolve_target: None, // for multisampling
ops: wgpu::Operations {
// Screen clear color
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: wgpu::StoreOp::Store,
},
}
)],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
// draw smth with 3 vertices and 1 instance; @builtin(vertex_index)
// only vertex buffer: render_pass.draw(0..VERTICES.len() as u32, 0..1);
render_pass.draw_indexed(0..INDICES.len() as u32, 0, 0..1);
}
// Finish the command buffer and submit to the GPU's render queue
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
}
#[allow(unused)]
async fn run() {
env_logger::init();
let event_loop = event_loop::EventLoop::new(); // loop provided by winit to handle window events
let window = winit::window::WindowBuilder::new()
.build(&event_loop).unwrap();
let mut state = State::new(window).await;
// Open the window and start processing events
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent { ref event, window_id } if window_id == state.window.id()
=> if !state.input(event) {
match event {
WindowEvent::CloseRequested | WindowEvent::KeyboardInput {
input: event::KeyboardInput {
state: event::ElementState::Pressed ,
virtual_keycode: Some(event::VirtualKeyCode::Escape),
..
},
..
} => *control_flow = event_loop::ControlFlow::Exit,
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
},
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
},
_ => {}
}
},
Event::RedrawRequested(window_id) if window_id == state.window.id() => {
state.update();
match state.render() {
Ok(_) => {},
// Reconfigure surface when lost
Err(wgpu::SurfaceError::Lost) => state.resize(state.size),
Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = event_loop::ControlFlow::Exit,
// Other errors should be resolved by the next frame
Err(e) => eprintln!("[ERROR] {:?}", e),
}
},
Event::MainEventsCleared => {
// RedrawRequested will onlly trigger once unless we manually request it
state.window().request_redraw();
}
_ => {}
}
});
}
fn main() {
pollster::block_on(run());
}
// Vertex shader
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) color: vec3<f32>,
}
struct VertexOutput {
// values within framebuffer dimensions
@builtin(position) clip_position: vec4<f32>,
// position in world
//@location(0) vert_pos: vec3<f32>,
@location(0) color: vec3<f32>,
}
@vertex // mark as vertex entry point
fn vs_main(
// param gets its value from @builtin(vertex_index)
//@builtin(vertex_index) in_vertex_index: u32,
model: VertexInput,
) -> VertexOutput {
var out: VertexOutput;
out.color = model.color;
out.clip_position = vec4<f32>(model.position, 1.0);
// let x = f32(1 - i32(in_vertex_index)) * 0.5;
// let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
// out.clip_position = vec4<f32>(x, y, 0., 1.);
//out.vert_pos = out.clip_position.xyz;
return out;
}
// Fragment shader
@fragment // store output in first color target
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
// Color
return vec4<f32>(in.color, 1.0);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment