diff --git a/Cargo.toml b/Cargo.toml index 57365350d..1006b4c02 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,28 +21,23 @@ exclude = ["etc/**/*", "examples/**/*", "tests/**/*", "Cargo.lock", "target/**/* [features] default = [] # Make Vulkan backend available on platforms where it is by default not, e.g. macOS -vulkan = ["wgn/vulkan-portability"] - -[target.'cfg(not(target_arch = "wasm32"))'.dependencies.wgn] -package = "wgpu-native" -version = "0.5" -git = "https://github.com/gfx-rs/wgpu" -rev = "49dbe08f37f8396cff0d6672667a48116ec487f5" +vulkan = ["wgc/gfx-backend-vulkan"] [target.'cfg(not(target_arch = "wasm32"))'.dependencies.wgc] package = "wgpu-core" version = "0.5" git = "https://github.com/gfx-rs/wgpu" -rev = "49dbe08f37f8396cff0d6672667a48116ec487f5" +rev = "5c172dd4756aa152b4f3350e624d7b1b5d24ddda" [dependencies.wgt] package = "wgpu-types" version = "0.5" git = "https://github.com/gfx-rs/wgpu" -rev = "49dbe08f37f8396cff0d6672667a48116ec487f5" +rev = "5c172dd4756aa152b4f3350e624d7b1b5d24ddda" [dependencies] arrayvec = "0.5" +futures = "0.3" smallvec = "1" raw-window-handle = "0.3" parking_lot = "0.10" @@ -54,7 +49,6 @@ png = "0.15" winit = { version = "0.22.1", features = ["web-sys"] } rand = { version = "0.7.2", features = ["wasm-bindgen"] } bytemuck = "1" -futures = "0.3" [[example]] name="hello-compute" @@ -64,7 +58,6 @@ test = true [patch.crates-io] #wgpu-types = { version = "0.5.0", path = "../wgpu/wgpu-types" } #wgpu-core = { version = "0.5.0", path = "../wgpu/wgpu-core" } -#wgpu-native = { version = "0.5.0", path = "../wgpu/wgpu-native" } #gfx-hal = { version = "0.5.0", path = "../gfx/src/hal" } #gfx-backend-empty = { version = "0.5.0", path = "../gfx/src/backend/empty" } #gfx-backend-vulkan = { version = "0.5.0", path = "../gfx/src/backend/vulkan" } @@ -78,6 +71,9 @@ wasm-bindgen-futures = { git = "https://github.com/rustwasm/wasm-bindgen" } web-sys = { git = "https://github.com/rustwasm/wasm-bindgen" } js-sys = { git = "https://github.com/rustwasm/wasm-bindgen" } +[target.'cfg(target_os = "macos")'.dependencies] +objc = "0.2.7" + [target.'cfg(not(target_arch = "wasm32"))'.dependencies] env_logger = "0.7" diff --git a/README.md b/README.md index 640610b11..d27ad0cff 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ wgpu-rs is an idiomatic Rust wrapper over [wgpu-core](https://github.com/gfx-rs/wgpu). It's designed to be suitable for general purpose graphics and computation needs of Rust community. -Currently wgpu-rs works on native platforms, but [WASM support is currently being added](https://github.com/gfx-rs/wgpu-rs/issues/101) as well. +wgpu-rs can target both the natively supported backends and WASM directly. ## Gallery diff --git a/examples/capture/main.rs b/examples/capture/main.rs index 5f3cc8429..1034a178d 100644 --- a/examples/capture/main.rs +++ b/examples/capture/main.rs @@ -5,15 +5,16 @@ use std::fs::File; use std::mem::size_of; async fn run() { - let adapter = wgpu::Adapter::request( - &wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::Default, - compatible_surface: None, - }, - wgpu::BackendBit::PRIMARY, - ) - .await - .unwrap(); + let adapter = wgpu::Instance::new() + .request_adapter( + &wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::Default, + compatible_surface: None, + }, + wgpu::BackendBit::PRIMARY, + ) + .await + .unwrap(); let (device, queue) = adapter .request_device(&wgpu::DeviceDescriptor { @@ -22,7 +23,8 @@ async fn run() { }, limits: wgpu::Limits::default(), }) - .await; + .await + .unwrap(); // Rendered image is 256×256 with 32-bit RGBA color let size = 256u32; @@ -86,7 +88,7 @@ async fn run() { encoder.finish() }; - queue.submit(&[command_buffer]); + queue.submit(Some(command_buffer)); // Note that we're not calling `.await` here. let buffer_future = output_buffer.map_read(0, (size * size) as u64 * size_of::() as u64); diff --git a/examples/describe/main.rs b/examples/describe/main.rs index 2b5d75939..8b123cfd7 100644 --- a/examples/describe/main.rs +++ b/examples/describe/main.rs @@ -1,14 +1,15 @@ /// This example shows how to describe the adapter in use. async fn run() { - let adapter = wgpu::Adapter::request( - &wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::Default, - compatible_surface: None, - }, - wgpu::BackendBit::PRIMARY, - ) - .await - .unwrap(); + let adapter = wgpu::Instance::new() + .request_adapter( + &wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::Default, + compatible_surface: None, + }, + wgpu::BackendBit::PRIMARY, + ) + .await + .unwrap(); #[cfg(not(target_arch = "wasm32"))] println!("{:?}", adapter.get_info()) diff --git a/examples/framework.rs b/examples/framework.rs index 221dfbe77..84c55ddbf 100644 --- a/examples/framework.rs +++ b/examples/framework.rs @@ -49,21 +49,23 @@ pub trait Example: 'static + Sized { async fn run_async(event_loop: EventLoop<()>, window: Window) { log::info!("Initializing the surface..."); - let (size, surface) = { + let instance = wgpu::Instance::new(); + let (size, surface) = unsafe { let size = window.inner_size(); - let surface = wgpu::Surface::create(&window); + let surface = instance.create_surface(&window); (size, surface) }; - let adapter = wgpu::Adapter::request( - &wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::Default, - compatible_surface: Some(&surface), - }, - wgpu::BackendBit::PRIMARY, - ) - .await - .unwrap(); + let adapter = instance + .request_adapter( + &wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::Default, + compatible_surface: Some(&surface), + }, + wgpu::BackendBit::PRIMARY, + ) + .await + .unwrap(); let (device, queue) = adapter .request_device(&wgpu::DeviceDescriptor { @@ -72,7 +74,8 @@ async fn run_async(event_loop: EventLoop<()>, window: Window) { }, limits: wgpu::Limits::default(), }) - .await; + .await + .unwrap(); let mut sc_desc = wgpu::SwapChainDescriptor { usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT, @@ -90,8 +93,8 @@ async fn run_async(event_loop: EventLoop<()>, window: Window) { log::info!("Initializing the example..."); let (mut example, init_command_buf) = E::init(&sc_desc, &device); - if let Some(command_buf) = init_command_buf { - queue.submit(&[command_buf]); + if init_command_buf.is_some() { + queue.submit(init_command_buf); } log::info!("Entering render loop..."); @@ -112,8 +115,8 @@ async fn run_async(event_loop: EventLoop<()>, window: Window) { sc_desc.height = size.height; swap_chain = device.create_swap_chain(&surface, &sc_desc); let command_buf = example.resize(&sc_desc, &device); - if let Some(command_buf) = command_buf { - queue.submit(&[command_buf]); + if command_buf.is_some() { + queue.submit(command_buf); } } event::Event::WindowEvent { event, .. } => match event { @@ -138,7 +141,7 @@ async fn run_async(event_loop: EventLoop<()>, window: Window) { .get_next_texture() .expect("Timeout when acquiring next swap chain texture"); let command_buf = example.render(&frame, &device); - queue.submit(&[command_buf]); + queue.submit(Some(command_buf)); } _ => {} } diff --git a/examples/hello-compute/main.rs b/examples/hello-compute/main.rs index 2afafe2ec..9e60f9920 100644 --- a/examples/hello-compute/main.rs +++ b/examples/hello-compute/main.rs @@ -20,15 +20,17 @@ async fn execute_gpu(numbers: Vec) -> Vec { let slice_size = numbers.len() * std::mem::size_of::(); let size = slice_size as wgpu::BufferAddress; - let adapter = wgpu::Adapter::request( - &wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::Default, - compatible_surface: None, - }, - wgpu::BackendBit::PRIMARY, - ) - .await - .unwrap(); + let instace = wgpu::Instance::new(); + let adapter = instace + .request_adapter( + &wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::Default, + compatible_surface: None, + }, + wgpu::BackendBit::PRIMARY, + ) + .await + .unwrap(); let (device, queue) = adapter .request_device(&wgpu::DeviceDescriptor { @@ -37,7 +39,8 @@ async fn execute_gpu(numbers: Vec) -> Vec { }, limits: wgpu::Limits::default(), }) - .await; + .await + .unwrap(); let cs = include_bytes!("shader.comp.spv"); let cs_module = @@ -103,7 +106,7 @@ async fn execute_gpu(numbers: Vec) -> Vec { } encoder.copy_buffer_to_buffer(&storage_buffer, 0, &staging_buffer, 0, size); - queue.submit(&[encoder.finish()]); + queue.submit(Some(encoder.finish())); // Note that we're not calling `.await` here. let buffer_future = staging_buffer.map_read(0, size); diff --git a/examples/hello-triangle/main.rs b/examples/hello-triangle/main.rs index 9eefcbae2..7df29c320 100644 --- a/examples/hello-triangle/main.rs +++ b/examples/hello-triangle/main.rs @@ -6,17 +6,18 @@ use winit::{ async fn run(event_loop: EventLoop<()>, window: Window, swapchain_format: wgpu::TextureFormat) { let size = window.inner_size(); - let surface = wgpu::Surface::create(&window); - - let adapter = wgpu::Adapter::request( - &wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::Default, - compatible_surface: Some(&surface), - }, - wgpu::BackendBit::PRIMARY, - ) - .await - .unwrap(); + let instance = wgpu::Instance::new(); + let surface = unsafe { instance.create_surface(&window) }; + let adapter = instance + .request_adapter( + &wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::Default, + compatible_surface: Some(&surface), + }, + wgpu::BackendBit::PRIMARY, + ) + .await + .unwrap(); let (device, queue) = adapter .request_device(&wgpu::DeviceDescriptor { @@ -25,7 +26,8 @@ async fn run(event_loop: EventLoop<()>, window: Window, swapchain_format: wgpu:: }, limits: wgpu::Limits::default(), }) - .await; + .await + .unwrap(); let vs = include_bytes!("shader.vert.spv"); let vs_module = @@ -116,7 +118,7 @@ async fn run(event_loop: EventLoop<()>, window: Window, swapchain_format: wgpu:: rpass.draw(0..3, 0..1); } - queue.submit(&[encoder.finish()]); + queue.submit(Some(encoder.finish())); } Event::WindowEvent { event: WindowEvent::CloseRequested, @@ -150,10 +152,6 @@ fn main() { .ok() }) .expect("couldn't append canvas to document body"); - wasm_bindgen_futures::spawn_local(run( - event_loop, - window, - wgpu::TextureFormat::Bgra8Unorm, - )); + wasm_bindgen_futures::spawn_local(run(event_loop, window, wgpu::TextureFormat::Bgra8Unorm)); } } diff --git a/src/backend/direct.rs b/src/backend/direct.rs new file mode 100644 index 000000000..fed7fc352 --- /dev/null +++ b/src/backend/direct.rs @@ -0,0 +1,1011 @@ +use crate::{ + backend::native_gpu_future, BindGroupDescriptor, BindGroupLayoutDescriptor, BindingResource, + BindingType, BufferDescriptor, CommandEncoderDescriptor, ComputePipelineDescriptor, + PipelineLayoutDescriptor, RenderPipelineDescriptor, SamplerDescriptor, TextureDescriptor, + TextureViewDescriptor, TextureViewDimension, +}; + +use arrayvec::ArrayVec; +use futures::future::{ready, Ready}; +use smallvec::SmallVec; +use std::{ffi::CString, marker::PhantomData, ptr, slice}; + +macro_rules! gfx_select { + ($id:expr => $global:ident.$method:ident( $($param:expr),+ )) => { + match $id.backend() { + #[cfg(any(not(any(target_os = "ios", target_os = "macos")), feature = "gfx-backend-vulkan"))] + wgt::Backend::Vulkan => $global.$method::( $($param),+ ), + #[cfg(any(target_os = "ios", target_os = "macos"))] + wgt::Backend::Metal => $global.$method::( $($param),+ ), + #[cfg(windows)] + wgt::Backend::Dx12 => $global.$method::( $($param),+ ), + #[cfg(windows)] + wgt::Backend::Dx11 => $global.$method::( $($param),+ ), + _ => unreachable!() + } + }; +} + +pub type Context = wgc::hub::Global; + +mod pass_impl { + use super::Context; + use std::ops::Range; + use wgc::command::{compute_ffi::*, render_ffi::*}; + + impl crate::ComputePassInner for wgc::command::RawPass { + fn set_pipeline(&mut self, pipeline: &wgc::id::ComputePipelineId) { + unsafe { wgpu_compute_pass_set_pipeline(self, *pipeline) } + } + fn set_bind_group( + &mut self, + index: u32, + bind_group: &wgc::id::BindGroupId, + offsets: &[wgt::DynamicOffset], + ) { + unsafe { + wgpu_compute_pass_set_bind_group( + self, + index, + *bind_group, + offsets.as_ptr(), + offsets.len(), + ) + } + } + fn dispatch(&mut self, x: u32, y: u32, z: u32) { + unsafe { wgpu_compute_pass_dispatch(self, x, y, z) } + } + fn dispatch_indirect( + &mut self, + indirect_buffer: &wgc::id::BufferId, + indirect_offset: wgt::BufferAddress, + ) { + unsafe { wgpu_compute_pass_dispatch_indirect(self, *indirect_buffer, indirect_offset) } + } + } + + impl crate::RenderPassInner for wgc::command::RawPass { + fn set_pipeline(&mut self, pipeline: &wgc::id::RenderPipelineId) { + unsafe { wgpu_render_pass_set_pipeline(self, *pipeline) } + } + fn set_bind_group( + &mut self, + index: u32, + bind_group: &wgc::id::BindGroupId, + offsets: &[wgt::DynamicOffset], + ) { + unsafe { + wgpu_render_pass_set_bind_group( + self, + index, + *bind_group, + offsets.as_ptr(), + offsets.len(), + ) + } + } + fn set_index_buffer( + &mut self, + buffer: &wgc::id::BufferId, + offset: wgt::BufferAddress, + size: wgt::BufferAddress, + ) { + unsafe { wgpu_render_pass_set_index_buffer(self, *buffer, offset, size) } + } + fn set_vertex_buffer( + &mut self, + slot: u32, + buffer: &wgc::id::BufferId, + offset: wgt::BufferAddress, + size: wgt::BufferAddress, + ) { + unsafe { wgpu_render_pass_set_vertex_buffer(self, slot, *buffer, offset, size) } + } + fn set_blend_color(&mut self, color: wgt::Color) { + unsafe { wgpu_render_pass_set_blend_color(self, &color) } + } + fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { + unsafe { wgpu_render_pass_set_scissor_rect(self, x, y, width, height) } + } + fn set_viewport( + &mut self, + x: f32, + y: f32, + width: f32, + height: f32, + min_depth: f32, + max_depth: f32, + ) { + unsafe { + wgpu_render_pass_set_viewport(self, x, y, width, height, min_depth, max_depth) + } + } + fn set_stencil_reference(&mut self, reference: u32) { + unsafe { wgpu_render_pass_set_stencil_reference(self, reference) } + } + fn draw(&mut self, vertices: Range, instances: Range) { + unsafe { + wgpu_render_pass_draw( + self, + vertices.end - vertices.start, + instances.end - instances.start, + vertices.start, + instances.start, + ) + } + } + fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { + unsafe { + wgpu_render_pass_draw_indexed( + self, + indices.end - indices.start, + instances.end - instances.start, + indices.start, + base_vertex, + instances.start, + ) + } + } + fn draw_indirect( + &mut self, + indirect_buffer: &wgc::id::BufferId, + indirect_offset: wgt::BufferAddress, + ) { + unsafe { wgpu_render_pass_draw_indirect(self, *indirect_buffer, indirect_offset) } + } + fn draw_indexed_indirect( + &mut self, + indirect_buffer: &wgc::id::BufferId, + indirect_offset: wgt::BufferAddress, + ) { + unsafe { + wgpu_render_pass_draw_indexed_indirect(self, *indirect_buffer, indirect_offset) + } + } + } +} + +fn map_buffer_copy_view(view: crate::BufferCopyView<'_>) -> wgc::command::BufferCopyView { + wgc::command::BufferCopyView { + buffer: view.buffer.id, + offset: view.offset, + bytes_per_row: view.bytes_per_row, + rows_per_image: view.rows_per_image, + } +} + +fn map_texture_copy_view<'a>(view: crate::TextureCopyView<'a>) -> wgc::command::TextureCopyView { + wgc::command::TextureCopyView { + texture: view.texture.id, + mip_level: view.mip_level, + array_layer: view.array_layer, + origin: view.origin, + } +} + +impl crate::Context for Context { + type AdapterId = wgc::id::AdapterId; + type DeviceId = wgc::id::DeviceId; + type QueueId = wgc::id::QueueId; + type ShaderModuleId = wgc::id::ShaderModuleId; + type BindGroupLayoutId = wgc::id::BindGroupLayoutId; + type BindGroupId = wgc::id::BindGroupId; + type TextureViewId = wgc::id::TextureViewId; + type SamplerId = wgc::id::SamplerId; + type BufferId = wgc::id::BufferId; + type TextureId = wgc::id::TextureId; + type PipelineLayoutId = wgc::id::PipelineLayoutId; + type RenderPipelineId = wgc::id::RenderPipelineId; + type ComputePipelineId = wgc::id::ComputePipelineId; + type CommandEncoderId = wgc::id::CommandEncoderId; + type ComputePassId = wgc::command::RawPass; + type CommandBufferId = wgc::id::CommandBufferId; + type SurfaceId = wgc::id::SurfaceId; + type SwapChainId = wgc::id::SwapChainId; + type RenderPassId = wgc::command::RawPass; + + type CreateBufferMappedDetail = CreateBufferMappedDetail; + type BufferReadMappingDetail = BufferReadMappingDetail; + type BufferWriteMappingDetail = BufferWriteMappingDetail; + type SwapChainOutputDetail = SwapChainOutputDetail; + + type RequestAdapterFuture = Ready>; + type RequestDeviceFuture = + Ready>; + type MapReadFuture = + native_gpu_future::GpuFuture>; + type MapWriteFuture = + native_gpu_future::GpuFuture>; + + fn init() -> Self { + wgc::hub::Global::new("wgpu", wgc::hub::IdentityManagerFactory) + } + + fn instance_create_surface( + &self, + window: &W, + ) -> Self::SurfaceId { + use raw_window_handle::RawWindowHandle as Rwh; + + let surface = match window.raw_window_handle() { + #[cfg(target_os = "ios")] + Rwh::IOS(h) => wgc::instance::Surface { + #[cfg(feature = "vulkan-portability")] + vulkan: None, + metal: self + .instance + .metal + .create_surface_from_uiview(h.ui_view, cfg!(debug_assertions)), + }, + #[cfg(target_os = "macos")] + Rwh::MacOS(h) => { + use objc::{msg_send, runtime::Object, sel, sel_impl}; + let ns_view = if h.ns_view.is_null() { + let ns_window = h.ns_window as *mut Object; + unsafe { msg_send![ns_window, contentView] } + } else { + h.ns_view + }; + wgc::instance::Surface { + #[cfg(feature = "vulkan-portability")] + vulkan: self + .instance + .vulkan + .as_ref() + .map(|inst| inst.create_surface_from_ns_view(ns_view)), + metal: self + .instance + .metal + .create_surface_from_nsview(ns_view, cfg!(debug_assertions)), + } + } + #[cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))] + Rwh::Xlib(h) => wgc::instance::Surface { + vulkan: self + .instance + .vulkan + .as_ref() + .map(|inst| inst.create_surface_from_xlib(h.display as _, h.window as _)), + }, + #[cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))] + Rwh::Wayland(h) => wgc::instance::Surface { + vulkan: self + .instance + .vulkan + .as_ref() + .map(|inst| inst.create_surface_from_wayland(h.display, h.surface)), + }, + #[cfg(windows)] + Rwh::Windows(h) => wgc::instance::Surface { + vulkan: self + .instance + .vulkan + .as_ref() + .map(|inst| inst.create_surface_from_hwnd(std::ptr::null_mut(), h.hwnd)), + dx12: self + .instance + .dx12 + .as_ref() + .map(|inst| inst.create_surface_from_hwnd(h.hwnd)), + dx11: self.instance.dx11.create_surface_from_hwnd(h.hwnd), + }, + _ => panic!("Unsupported window handle"), + }; + + let mut token = wgc::hub::Token::root(); + self.surfaces + .register_identity(PhantomData, surface, &mut token) + } + + fn instance_request_adapter( + &self, + options: &crate::RequestAdapterOptions<'_>, + backends: wgt::BackendBit, + ) -> Self::RequestAdapterFuture { + let id = self.pick_adapter( + &wgc::instance::RequestAdapterOptions { + power_preference: options.power_preference, + compatible_surface: options.compatible_surface.map(|surface| surface.id), + }, + wgc::instance::AdapterInputs::Mask(backends, || PhantomData), + ); + ready(id) + } + + fn adapter_request_device( + &self, + adapter: &Self::AdapterId, + desc: &crate::DeviceDescriptor, + ) -> Self::RequestDeviceFuture { + let device_id = + gfx_select!(*adapter => self.adapter_request_device(*adapter, desc, PhantomData)); + ready(Ok((device_id, device_id))) + } + + fn device_create_swap_chain( + &self, + device: &Self::DeviceId, + surface: &Self::SurfaceId, + desc: &wgt::SwapChainDescriptor, + ) -> Self::SwapChainId { + gfx_select!(*device => self.device_create_swap_chain(*device, *surface, desc)) + } + + fn device_create_shader_module( + &self, + device: &Self::DeviceId, + spv: &[u32], + ) -> Self::ShaderModuleId { + let desc = wgc::pipeline::ShaderModuleDescriptor { + code: wgc::U32Array { + bytes: spv.as_ptr(), + length: spv.len(), + }, + }; + gfx_select!(*device => self.device_create_shader_module(*device, &desc, PhantomData)) + } + + fn device_create_bind_group_layout( + &self, + device: &Self::DeviceId, + desc: &BindGroupLayoutDescriptor, + ) -> Self::BindGroupLayoutId { + use wgc::binding_model as bm; + + let temp_layouts = desc + .bindings + .iter() + .map(|bind| bm::BindGroupLayoutEntry { + binding: bind.binding, + visibility: bind.visibility, + ty: match bind.ty { + BindingType::UniformBuffer { .. } => bm::BindingType::UniformBuffer, + BindingType::StorageBuffer { + readonly: false, .. + } => bm::BindingType::StorageBuffer, + BindingType::StorageBuffer { readonly: true, .. } => { + bm::BindingType::ReadonlyStorageBuffer + } + BindingType::Sampler { comparison: false } => bm::BindingType::Sampler, + BindingType::Sampler { .. } => bm::BindingType::ComparisonSampler, + BindingType::SampledTexture { .. } => bm::BindingType::SampledTexture, + BindingType::StorageTexture { readonly: true, .. } => { + bm::BindingType::ReadonlyStorageTexture + } + BindingType::StorageTexture { .. } => bm::BindingType::WriteonlyStorageTexture, + }, + has_dynamic_offset: match bind.ty { + BindingType::UniformBuffer { dynamic } + | BindingType::StorageBuffer { dynamic, .. } => dynamic, + _ => false, + }, + multisampled: match bind.ty { + BindingType::SampledTexture { multisampled, .. } => multisampled, + _ => false, + }, + view_dimension: match bind.ty { + BindingType::SampledTexture { dimension, .. } + | BindingType::StorageTexture { dimension, .. } => dimension, + _ => TextureViewDimension::D2, + }, + texture_component_type: match bind.ty { + BindingType::SampledTexture { component_type, .. } + | BindingType::StorageTexture { component_type, .. } => component_type, + _ => wgt::TextureComponentType::Float, + }, + storage_texture_format: match bind.ty { + BindingType::StorageTexture { format, .. } => format, + _ => wgt::TextureFormat::Rgb10a2Unorm, // doesn't matter + }, + }) + .collect::>(); + + let owned_label = OwnedLabel::new(desc.label.as_deref()); + gfx_select!(*device => self.device_create_bind_group_layout( + *device, + &bm::BindGroupLayoutDescriptor { + entries: temp_layouts.as_ptr(), + entries_length: temp_layouts.len(), + label: owned_label.as_ptr(), + }, + PhantomData + )) + } + + fn device_create_bind_group( + &self, + device: &Self::DeviceId, + desc: &BindGroupDescriptor, + ) -> Self::BindGroupId { + use wgc::binding_model as bm; + + let bindings = desc + .bindings + .iter() + .map(|binding| bm::BindGroupEntry { + binding: binding.binding, + resource: match binding.resource { + BindingResource::Buffer { + ref buffer, + ref range, + } => bm::BindingResource::Buffer(bm::BufferBinding { + buffer: buffer.id, + offset: range.start, + size: range.end - range.start, + }), + BindingResource::Sampler(ref sampler) => { + bm::BindingResource::Sampler(sampler.id) + } + BindingResource::TextureView(ref texture_view) => { + bm::BindingResource::TextureView(texture_view.id) + } + }, + }) + .collect::>(); + + let owned_label = OwnedLabel::new(desc.label.as_deref()); + gfx_select!(*device => self.device_create_bind_group( + *device, + &bm::BindGroupDescriptor { + layout: desc.layout.id, + entries: bindings.as_ptr(), + entries_length: bindings.len(), + label: owned_label.as_ptr(), + }, + PhantomData + )) + } + + fn device_create_pipeline_layout( + &self, + device: &Self::DeviceId, + desc: &PipelineLayoutDescriptor, + ) -> Self::PipelineLayoutId { + //TODO: avoid allocation here + let temp_layouts = desc + .bind_group_layouts + .iter() + .map(|bgl| bgl.id) + .collect::>(); + + gfx_select!(*device => self.device_create_pipeline_layout( + *device, + &wgc::binding_model::PipelineLayoutDescriptor { + bind_group_layouts: temp_layouts.as_ptr(), + bind_group_layouts_length: temp_layouts.len(), + }, + PhantomData + )) + } + + fn device_create_render_pipeline( + &self, + device: &Self::DeviceId, + desc: &RenderPipelineDescriptor, + ) -> Self::RenderPipelineId { + use wgc::pipeline as pipe; + + let vertex_entry_point = CString::new(desc.vertex_stage.entry_point).unwrap(); + let vertex_stage = pipe::ProgrammableStageDescriptor { + module: desc.vertex_stage.module.id, + entry_point: vertex_entry_point.as_ptr(), + }; + let (_fragment_entry_point, fragment_stage) = + if let Some(fragment_stage) = &desc.fragment_stage { + let fragment_entry_point = CString::new(fragment_stage.entry_point).unwrap(); + let fragment_stage = pipe::ProgrammableStageDescriptor { + module: fragment_stage.module.id, + entry_point: fragment_entry_point.as_ptr(), + }; + (fragment_entry_point, Some(fragment_stage)) + } else { + (CString::default(), None) + }; + + let temp_color_states = desc.color_states.to_vec(); + let temp_vertex_buffers = desc + .vertex_state + .vertex_buffers + .iter() + .map(|vbuf| pipe::VertexBufferLayoutDescriptor { + array_stride: vbuf.stride, + step_mode: vbuf.step_mode, + attributes: vbuf.attributes.as_ptr(), + attributes_length: vbuf.attributes.len(), + }) + .collect::>(); + + gfx_select!(*device => self.device_create_render_pipeline( + *device, + &pipe::RenderPipelineDescriptor { + layout: desc.layout.id, + vertex_stage, + fragment_stage: fragment_stage + .as_ref() + .map_or(ptr::null(), |fs| fs as *const _), + rasterization_state: desc + .rasterization_state + .as_ref() + .map_or(ptr::null(), |p| p as *const _), + primitive_topology: desc.primitive_topology, + color_states: temp_color_states.as_ptr(), + color_states_length: temp_color_states.len(), + depth_stencil_state: desc + .depth_stencil_state + .as_ref() + .map_or(ptr::null(), |p| p as *const _), + vertex_state: pipe::VertexStateDescriptor { + index_format: desc.vertex_state.index_format, + vertex_buffers: temp_vertex_buffers.as_ptr(), + vertex_buffers_length: temp_vertex_buffers.len(), + }, + sample_count: desc.sample_count, + sample_mask: desc.sample_mask, + alpha_to_coverage_enabled: desc.alpha_to_coverage_enabled, + }, + PhantomData + )) + } + + fn device_create_compute_pipeline( + &self, + device: &Self::DeviceId, + desc: &ComputePipelineDescriptor, + ) -> Self::ComputePipelineId { + use wgc::pipeline as pipe; + + let entry_point = CString::new(desc.compute_stage.entry_point).unwrap(); + + gfx_select!(*device => self.device_create_compute_pipeline( + *device, + &pipe::ComputePipelineDescriptor { + layout: desc.layout.id, + compute_stage: pipe::ProgrammableStageDescriptor { + module: desc.compute_stage.module.id, + entry_point: entry_point.as_ptr(), + }, + }, + PhantomData + )) + } + + fn device_create_buffer_mapped<'a>( + &self, + device: &Self::DeviceId, + desc: &BufferDescriptor, + ) -> (Self::BufferId, &'a mut [u8], Self::CreateBufferMappedDetail) { + let owned_label = OwnedLabel::new(desc.label.as_deref()); + unsafe { + let (id, ptr) = gfx_select!(*device => self.device_create_buffer_mapped( + *device, + &wgt::BufferDescriptor { + label: owned_label.as_ptr(), + size: desc.size, + usage: desc.usage, + }, + PhantomData + )); + let mapped_data = std::slice::from_raw_parts_mut(ptr, desc.size as usize); + (id, mapped_data, CreateBufferMappedDetail) + } + } + + fn device_create_buffer( + &self, + device: &Self::DeviceId, + desc: &BufferDescriptor, + ) -> Self::BufferId { + let owned_label = OwnedLabel::new(desc.label.as_deref()); + gfx_select!(*device => self.device_create_buffer( + *device, + &wgt::BufferDescriptor { + label: owned_label.as_ptr(), + size: desc.size, + usage: desc.usage, + }, + PhantomData + )) + } + + fn device_create_texture( + &self, + device: &Self::DeviceId, + desc: &TextureDescriptor, + ) -> Self::TextureId { + let owned_label = OwnedLabel::new(desc.label.as_deref()); + gfx_select!(*device => self.device_create_texture( + *device, + &wgt::TextureDescriptor { + label: owned_label.as_ptr(), + size: desc.size, + mip_level_count: desc.mip_level_count, + sample_count: desc.sample_count, + dimension: desc.dimension, + format: desc.format, + usage: desc.usage, + }, + PhantomData + )) + } + + fn device_create_sampler( + &self, + device: &Self::DeviceId, + desc: &SamplerDescriptor, + ) -> Self::SamplerId { + gfx_select!(*device => self.device_create_sampler(*device, desc, PhantomData)) + } + + fn device_create_command_encoder( + &self, + device: &Self::DeviceId, + desc: &CommandEncoderDescriptor, + ) -> Self::CommandEncoderId { + let owned_label = OwnedLabel::new(desc.label.as_deref()); + gfx_select!(*device => self.device_create_command_encoder( + *device, + &wgt::CommandEncoderDescriptor { + label: owned_label.as_ptr(), + }, + PhantomData + )) + } + + fn device_drop(&self, device: &Self::DeviceId) { + #[cfg(not(target_arch = "wasm32"))] + gfx_select!(*device => self.device_poll(*device, true)); + //TODO: make this work in general + #[cfg(not(target_arch = "wasm32"))] + #[cfg(feature = "metal-auto-capture")] + gfx_select!(*device => self.device_destroy(*device)); + } + + fn device_poll(&self, device: &Self::DeviceId, maintain: crate::Maintain) { + gfx_select!(*device => self.device_poll( + *device, + match maintain { + crate::Maintain::Poll => false, + crate::Maintain::Wait => true, + } + )); + } + + fn buffer_map_read( + &self, + buffer: &Self::BufferId, + start: wgt::BufferAddress, + size: wgt::BufferAddress, + ) -> Self::MapReadFuture { + let (future, completion) = native_gpu_future::new_gpu_future(*buffer, size); + + extern "C" fn buffer_map_read_future_wrapper( + status: wgc::resource::BufferMapAsyncStatus, + data: *const u8, + user_data: *mut u8, + ) { + let completion = + unsafe { native_gpu_future::GpuFutureCompletion::from_raw(user_data as _) }; + let (buffer_id, size) = completion.get_buffer_info(); + + if let wgc::resource::BufferMapAsyncStatus::Success = status { + completion.complete(Ok(BufferReadMappingDetail { + data, + size: size as usize, + buffer_id, + })); + } else { + completion.complete(Err(crate::BufferAsyncError)); + } + } + + let operation = wgc::resource::BufferMapOperation::Read { + callback: buffer_map_read_future_wrapper, + userdata: completion.to_raw() as _, + }; + gfx_select!(*buffer => self.buffer_map_async(*buffer, start .. start + size, operation)); + + future + } + + fn buffer_map_write( + &self, + buffer: &Self::BufferId, + start: wgt::BufferAddress, + size: wgt::BufferAddress, + ) -> Self::MapWriteFuture { + let (future, completion) = native_gpu_future::new_gpu_future(*buffer, size); + + extern "C" fn buffer_map_write_future_wrapper( + status: wgc::resource::BufferMapAsyncStatus, + data: *mut u8, + user_data: *mut u8, + ) { + let completion = + unsafe { native_gpu_future::GpuFutureCompletion::from_raw(user_data as _) }; + let (buffer_id, size) = completion.get_buffer_info(); + + if let wgc::resource::BufferMapAsyncStatus::Success = status { + completion.complete(Ok(BufferWriteMappingDetail { + data, + size: size as usize, + buffer_id, + })); + } else { + completion.complete(Err(crate::BufferAsyncError)); + } + } + + let operation = wgc::resource::BufferMapOperation::Write { + callback: buffer_map_write_future_wrapper, + userdata: completion.to_raw() as _, + }; + gfx_select!(*buffer => self.buffer_map_async(*buffer, start .. start + size, operation)); + + future + } + + fn buffer_unmap(&self, buffer: &Self::BufferId) { + gfx_select!(*buffer => self.buffer_unmap(*buffer)) + } + + fn swap_chain_get_next_texture( + &self, + swap_chain: &Self::SwapChainId, + ) -> Result<(Self::TextureViewId, Self::SwapChainOutputDetail), crate::TimeOut> { + gfx_select!(*swap_chain => self.swap_chain_get_next_texture(*swap_chain, PhantomData)) + .map(|output| { + ( + output.view_id.unwrap(), + SwapChainOutputDetail { + swap_chain_id: *swap_chain, + }, + ) + }) + .map_err(|_| crate::TimeOut) + } + + fn swap_chain_present(&self, view: &Self::TextureViewId, detail: &Self::SwapChainOutputDetail) { + gfx_select!(*view => self.swap_chain_present(detail.swap_chain_id)) + } + + fn texture_create_view( + &self, + texture: &Self::TextureId, + desc: Option<&TextureViewDescriptor>, + ) -> Self::TextureViewId { + gfx_select!(*texture => self.texture_create_view(*texture, desc, PhantomData)) + } + + fn texture_drop(&self, texture: &Self::TextureId) { + gfx_select!(*texture => self.texture_destroy(*texture)) + } + fn texture_view_drop(&self, texture_view: &Self::TextureViewId) { + gfx_select!(*texture_view => self.texture_view_destroy(*texture_view)) + } + fn sampler_drop(&self, sampler: &Self::SamplerId) { + gfx_select!(*sampler => self.sampler_destroy(*sampler)) + } + fn buffer_drop(&self, buffer: &Self::BufferId) { + gfx_select!(*buffer => self.buffer_destroy(*buffer)) + } + fn bind_group_drop(&self, bind_group: &Self::BindGroupId) { + gfx_select!(*bind_group => self.bind_group_destroy(*bind_group)) + } + fn bind_group_layout_drop(&self, bind_group_layout: &Self::BindGroupLayoutId) { + gfx_select!(*bind_group_layout => self.bind_group_layout_destroy(*bind_group_layout)) + } + fn pipeline_layout_drop(&self, pipeline_layout: &Self::PipelineLayoutId) { + gfx_select!(*pipeline_layout => self.pipeline_layout_destroy(*pipeline_layout)) + } + fn shader_module_drop(&self, shader_module: &Self::ShaderModuleId) { + gfx_select!(*shader_module => self.shader_module_destroy(*shader_module)) + } + fn command_buffer_drop(&self, command_buffer: &Self::CommandBufferId) { + gfx_select!(*command_buffer => self.command_buffer_destroy(*command_buffer)) + } + fn compute_pipeline_drop(&self, pipeline: &Self::ComputePipelineId) { + gfx_select!(*pipeline => self.compute_pipeline_destroy(*pipeline)) + } + fn render_pipeline_drop(&self, pipeline: &Self::RenderPipelineId) { + gfx_select!(*pipeline => self.render_pipeline_destroy(*pipeline)) + } + + fn flush_mapped_data(_data: &mut [u8], _detail: CreateBufferMappedDetail) {} + + fn encoder_copy_buffer_to_buffer( + &self, + encoder: &Self::CommandEncoderId, + source: &Self::BufferId, + source_offset: wgt::BufferAddress, + destination: &Self::BufferId, + destination_offset: wgt::BufferAddress, + copy_size: wgt::BufferAddress, + ) { + gfx_select!(*encoder => self.command_encoder_copy_buffer_to_buffer( + *encoder, + *source, + source_offset, + *destination, + destination_offset, + copy_size + )) + } + + fn encoder_copy_buffer_to_texture( + &self, + encoder: &Self::CommandEncoderId, + source: crate::BufferCopyView, + destination: crate::TextureCopyView, + copy_size: wgt::Extent3d, + ) { + gfx_select!(*encoder => self.command_encoder_copy_buffer_to_texture( + *encoder, + &map_buffer_copy_view(source), + &map_texture_copy_view(destination), + copy_size + )) + } + + fn encoder_copy_texture_to_buffer( + &self, + encoder: &Self::CommandEncoderId, + source: crate::TextureCopyView, + destination: crate::BufferCopyView, + copy_size: wgt::Extent3d, + ) { + gfx_select!(*encoder => self.command_encoder_copy_texture_to_buffer( + *encoder, + &map_texture_copy_view(source), + &map_buffer_copy_view(destination), + copy_size + )) + } + + fn encoder_copy_texture_to_texture( + &self, + encoder: &Self::CommandEncoderId, + source: crate::TextureCopyView, + destination: crate::TextureCopyView, + copy_size: wgt::Extent3d, + ) { + gfx_select!(*encoder => self.command_encoder_copy_texture_to_texture( + *encoder, + &map_texture_copy_view(source), + &map_texture_copy_view(destination), + copy_size + )) + } + + fn encoder_begin_compute_pass(&self, encoder: &Self::CommandEncoderId) -> Self::ComputePassId { + unsafe { wgc::command::RawPass::new_compute(*encoder) } + } + + fn encoder_end_compute_pass( + &self, + encoder: &Self::CommandEncoderId, + pass: &mut Self::ComputePassId, + ) { + let data = unsafe { + let mut length = 0; + let ptr = wgc::command::compute_ffi::wgpu_compute_pass_finish(pass, &mut length); + slice::from_raw_parts(ptr, length) + }; + gfx_select!(*encoder => self.command_encoder_run_compute_pass(*encoder, data)) + } + + fn encoder_begin_render_pass<'a>( + &self, + encoder: &Self::CommandEncoderId, + desc: &crate::RenderPassDescriptor<'a, '_>, + ) -> Self::RenderPassId { + let colors = desc + .color_attachments + .iter() + .map(|ca| wgc::command::RenderPassColorAttachmentDescriptor { + attachment: ca.attachment.id, + resolve_target: ca.resolve_target.map(|rt| rt.id), + load_op: ca.load_op, + store_op: ca.store_op, + clear_color: ca.clear_color, + }) + .collect::>(); + + let depth_stencil = desc.depth_stencil_attachment.as_ref().map(|dsa| { + wgc::command::RenderPassDepthStencilAttachmentDescriptor { + attachment: dsa.attachment.id, + depth_load_op: dsa.depth_load_op, + depth_store_op: dsa.depth_store_op, + clear_depth: dsa.clear_depth, + stencil_load_op: dsa.stencil_load_op, + stencil_store_op: dsa.stencil_store_op, + clear_stencil: dsa.clear_stencil, + } + }); + + unsafe { + wgc::command::RawPass::new_render( + *encoder, + &wgc::command::RenderPassDescriptor { + color_attachments: colors.as_ptr(), + color_attachments_length: colors.len(), + depth_stencil_attachment: depth_stencil.as_ref(), + }, + ) + } + } + + fn encoder_end_render_pass( + &self, + encoder: &Self::CommandEncoderId, + pass: &mut Self::RenderPassId, + ) { + let data = unsafe { + let mut length = 0; + let ptr = wgc::command::render_ffi::wgpu_render_pass_finish(pass, &mut length); + slice::from_raw_parts(ptr, length) + }; + gfx_select!(*encoder => self.command_encoder_run_render_pass(*encoder, data)) + } + + fn encoder_finish(&self, encoder: &Self::CommandEncoderId) -> Self::CommandBufferId { + let desc = wgt::CommandBufferDescriptor::default(); + gfx_select!(*encoder => self.command_encoder_finish(*encoder, &desc)) + } + + fn queue_submit>( + &self, + queue: &Self::QueueId, + command_buffers: I, + ) { + let temp_command_buffers = command_buffers.collect::>(); + + gfx_select!(*queue => self.queue_submit(*queue, &temp_command_buffers)) + } +} + +pub(crate) struct CreateBufferMappedDetail; + +pub(crate) struct BufferReadMappingDetail { + pub(crate) buffer_id: wgc::id::BufferId, + data: *const u8, + size: usize, +} + +impl BufferReadMappingDetail { + pub(crate) fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.data as *const u8, self.size) } + } +} + +pub(crate) struct BufferWriteMappingDetail { + pub(crate) buffer_id: wgc::id::BufferId, + data: *mut u8, + size: usize, +} + +impl BufferWriteMappingDetail { + pub(crate) fn as_slice(&mut self) -> &mut [u8] { + unsafe { slice::from_raw_parts_mut(self.data as *mut u8, self.size) } + } +} + +#[derive(Debug)] +pub(crate) struct SwapChainOutputDetail { + swap_chain_id: wgc::id::SwapChainId, +} + +struct OwnedLabel(Option); + +impl OwnedLabel { + fn new(text: Option<&str>) -> Self { + Self(text.map(|t| CString::new(t).expect("invalid label"))) + } + + fn as_ptr(&self) -> *const std::os::raw::c_char { + match self.0 { + Some(ref c_string) => c_string.as_ptr(), + None => ptr::null(), + } + } +} diff --git a/src/backend/mod.rs b/src/backend/mod.rs index e447274f6..854488de9 100644 --- a/src/backend/mod.rs +++ b/src/backend/mod.rs @@ -1,13 +1,13 @@ #[cfg(target_arch = "wasm32")] mod web; #[cfg(target_arch = "wasm32")] -pub use web::*; +pub(crate) use web::Context; #[cfg(not(target_arch = "wasm32"))] -mod native; +mod direct; #[cfg(not(target_arch = "wasm32"))] -pub use native::*; +pub(crate) use direct::Context; #[cfg(not(target_arch = "wasm32"))] mod native_gpu_future; diff --git a/src/backend/native.rs b/src/backend/native.rs deleted file mode 100644 index b6f46bd0b..000000000 --- a/src/backend/native.rs +++ /dev/null @@ -1,941 +0,0 @@ -use wgn; - -use crate::{ - backend::native_gpu_future, BindGroupDescriptor, BindGroupLayoutDescriptor, BindingResource, - BindingType, BufferDescriptor, CommandEncoderDescriptor, ComputePipelineDescriptor, - PipelineLayoutDescriptor, RenderPipelineDescriptor, SamplerDescriptor, TextureDescriptor, - TextureViewDescriptor, TextureViewDimension, -}; - -use arrayvec::ArrayVec; -use smallvec::SmallVec; -use std::{ffi::CString, future::Future, ops::Range, ptr, slice}; - -pub type AdapterId = wgc::id::AdapterId; -pub type DeviceId = wgc::id::DeviceId; -pub type QueueId = wgc::id::QueueId; -pub type ShaderModuleId = wgc::id::ShaderModuleId; -pub type BindGroupLayoutId = wgc::id::BindGroupLayoutId; -pub type BindGroupId = wgc::id::BindGroupId; -pub type TextureViewId = wgc::id::TextureViewId; -pub type SamplerId = wgc::id::SamplerId; -pub type BufferId = wgc::id::BufferId; -pub type TextureId = wgc::id::TextureId; -pub type PipelineLayoutId = wgc::id::PipelineLayoutId; -pub type RenderPipelineId = wgc::id::RenderPipelineId; -pub type ComputePipelineId = wgc::id::ComputePipelineId; -pub type CommandEncoderId = wgc::id::CommandEncoderId; -pub type ComputePassId = wgc::id::ComputePassId; -pub type CommandBufferId = wgc::id::CommandBufferId; -pub type SurfaceId = wgc::id::SurfaceId; -pub type SwapChainId = wgc::id::SwapChainId; -pub type RenderPassEncoderId = wgc::id::RenderPassId; - -fn map_buffer_copy_view(view: crate::BufferCopyView<'_>) -> wgc::command::BufferCopyView { - wgc::command::BufferCopyView { - buffer: view.buffer.id, - offset: view.offset, - bytes_per_row: view.bytes_per_row, - rows_per_image: view.rows_per_image, - } -} - -fn map_texture_copy_view<'a>(view: crate::TextureCopyView<'a>) -> wgc::command::TextureCopyView { - wgc::command::TextureCopyView { - texture: view.texture.id, - mip_level: view.mip_level, - array_layer: view.array_layer, - origin: view.origin, - } -} - -pub(crate) async fn request_adapter( - options: &crate::RequestAdapterOptions<'_>, - backends: wgt::BackendBit, -) -> Option { - unsafe extern "C" fn adapter_callback( - id: Option, - user_data: *mut std::ffi::c_void, - ) { - *(user_data as *mut Option) = id; - } - - let mut id_maybe = None; - unsafe { - wgn::wgpu_request_adapter_async( - Some(&wgc::instance::RequestAdapterOptions { - power_preference: options.power_preference, - compatible_surface: options.compatible_surface.map(|surface| surface.id), - }), - backends, - adapter_callback, - &mut id_maybe as *mut _ as *mut std::ffi::c_void, - ) - }; - id_maybe -} - -pub(crate) async fn request_device_and_queue( - adapter: &AdapterId, - desc: Option<&wgt::DeviceDescriptor>, -) -> (DeviceId, QueueId) { - let device_id = wgn::wgpu_adapter_request_device(*adapter, desc); - (device_id, wgn::wgpu_device_get_default_queue(device_id)) -} - -pub(crate) fn create_shader_module(device: &DeviceId, spv: &[u32]) -> ShaderModuleId { - let desc = wgc::pipeline::ShaderModuleDescriptor { - code: wgc::U32Array { - bytes: spv.as_ptr(), - length: spv.len(), - }, - }; - wgn::wgpu_device_create_shader_module(*device, &desc) -} - -pub(crate) fn create_bind_group_layout( - device: &DeviceId, - desc: &BindGroupLayoutDescriptor, -) -> BindGroupLayoutId { - use wgc::binding_model as bm; - - let temp_layouts = desc - .bindings - .iter() - .map(|bind| bm::BindGroupLayoutEntry { - binding: bind.binding, - visibility: bind.visibility, - ty: match bind.ty { - BindingType::UniformBuffer { .. } => bm::BindingType::UniformBuffer, - BindingType::StorageBuffer { - readonly: false, .. - } => bm::BindingType::StorageBuffer, - BindingType::StorageBuffer { readonly: true, .. } => { - bm::BindingType::ReadonlyStorageBuffer - } - BindingType::Sampler { comparison: false } => bm::BindingType::Sampler, - BindingType::Sampler { .. } => bm::BindingType::ComparisonSampler, - BindingType::SampledTexture { .. } => bm::BindingType::SampledTexture, - BindingType::StorageTexture { readonly: true, .. } => { - bm::BindingType::ReadonlyStorageTexture - } - BindingType::StorageTexture { .. } => bm::BindingType::WriteonlyStorageTexture, - }, - has_dynamic_offset: match bind.ty { - BindingType::UniformBuffer { dynamic } - | BindingType::StorageBuffer { dynamic, .. } => dynamic, - _ => false, - }, - multisampled: match bind.ty { - BindingType::SampledTexture { multisampled, .. } => multisampled, - _ => false, - }, - view_dimension: match bind.ty { - BindingType::SampledTexture { dimension, .. } - | BindingType::StorageTexture { dimension, .. } => dimension, - _ => TextureViewDimension::D2, - }, - texture_component_type: match bind.ty { - BindingType::SampledTexture { component_type, .. } - | BindingType::StorageTexture { component_type, .. } => component_type, - _ => wgt::TextureComponentType::Float, - }, - storage_texture_format: match bind.ty { - BindingType::StorageTexture { format, .. } => format, - _ => wgt::TextureFormat::Rgb10a2Unorm, // doesn't matter - }, - }) - .collect::>(); - - let owned_label = OwnedLabel::new(desc.label.as_deref()); - wgn::wgpu_device_create_bind_group_layout( - *device, - &bm::BindGroupLayoutDescriptor { - entries: temp_layouts.as_ptr(), - entries_length: temp_layouts.len(), - label: owned_label.as_ptr(), - }, - ) -} - -pub(crate) fn create_bind_group(device: &DeviceId, desc: &BindGroupDescriptor) -> BindGroupId { - use wgc::binding_model as bm; - - let bindings = desc - .bindings - .iter() - .map(|binding| bm::BindGroupEntry { - binding: binding.binding, - resource: match binding.resource { - BindingResource::Buffer { - ref buffer, - ref range, - } => bm::BindingResource::Buffer(bm::BufferBinding { - buffer: buffer.id, - offset: range.start, - size: range.end - range.start, - }), - BindingResource::Sampler(ref sampler) => bm::BindingResource::Sampler(sampler.id), - BindingResource::TextureView(ref texture_view) => { - bm::BindingResource::TextureView(texture_view.id) - } - }, - }) - .collect::>(); - - let owned_label = OwnedLabel::new(desc.label.as_deref()); - wgn::wgpu_device_create_bind_group( - *device, - &bm::BindGroupDescriptor { - layout: desc.layout.id, - entries: bindings.as_ptr(), - entries_length: bindings.len(), - label: owned_label.as_ptr(), - }, - ) -} - -pub(crate) fn create_pipeline_layout( - device: &DeviceId, - desc: &PipelineLayoutDescriptor, -) -> PipelineLayoutId { - //TODO: avoid allocation here - let temp_layouts = desc - .bind_group_layouts - .iter() - .map(|bgl| bgl.id) - .collect::>(); - wgn::wgpu_device_create_pipeline_layout( - *device, - &wgc::binding_model::PipelineLayoutDescriptor { - bind_group_layouts: temp_layouts.as_ptr(), - bind_group_layouts_length: temp_layouts.len(), - }, - ) -} - -pub(crate) fn create_render_pipeline( - device: &DeviceId, - desc: &RenderPipelineDescriptor, -) -> RenderPipelineId { - use wgc::pipeline as pipe; - - let vertex_entry_point = CString::new(desc.vertex_stage.entry_point).unwrap(); - let vertex_stage = pipe::ProgrammableStageDescriptor { - module: desc.vertex_stage.module.id, - entry_point: vertex_entry_point.as_ptr(), - }; - let (_fragment_entry_point, fragment_stage) = if let Some(fragment_stage) = &desc.fragment_stage - { - let fragment_entry_point = CString::new(fragment_stage.entry_point).unwrap(); - let fragment_stage = pipe::ProgrammableStageDescriptor { - module: fragment_stage.module.id, - entry_point: fragment_entry_point.as_ptr(), - }; - (fragment_entry_point, Some(fragment_stage)) - } else { - (CString::default(), None) - }; - - let temp_color_states = desc.color_states.to_vec(); - let temp_vertex_buffers = desc - .vertex_state - .vertex_buffers - .iter() - .map(|vbuf| pipe::VertexBufferLayoutDescriptor { - array_stride: vbuf.stride, - step_mode: vbuf.step_mode, - attributes: vbuf.attributes.as_ptr(), - attributes_length: vbuf.attributes.len(), - }) - .collect::>(); - - wgn::wgpu_device_create_render_pipeline( - *device, - &pipe::RenderPipelineDescriptor { - layout: desc.layout.id, - vertex_stage, - fragment_stage: fragment_stage - .as_ref() - .map_or(ptr::null(), |fs| fs as *const _), - rasterization_state: desc - .rasterization_state - .as_ref() - .map_or(ptr::null(), |p| p as *const _), - primitive_topology: desc.primitive_topology, - color_states: temp_color_states.as_ptr(), - color_states_length: temp_color_states.len(), - depth_stencil_state: desc - .depth_stencil_state - .as_ref() - .map_or(ptr::null(), |p| p as *const _), - vertex_state: pipe::VertexStateDescriptor { - index_format: desc.vertex_state.index_format, - vertex_buffers: temp_vertex_buffers.as_ptr(), - vertex_buffers_length: temp_vertex_buffers.len(), - }, - sample_count: desc.sample_count, - sample_mask: desc.sample_mask, - alpha_to_coverage_enabled: desc.alpha_to_coverage_enabled, - }, - ) -} - -pub(crate) fn create_compute_pipeline( - device: &DeviceId, - desc: &ComputePipelineDescriptor, -) -> ComputePipelineId { - use wgc::pipeline as pipe; - - let entry_point = CString::new(desc.compute_stage.entry_point).unwrap(); - - wgn::wgpu_device_create_compute_pipeline( - *device, - &pipe::ComputePipelineDescriptor { - layout: desc.layout.id, - compute_stage: pipe::ProgrammableStageDescriptor { - module: desc.compute_stage.module.id, - entry_point: entry_point.as_ptr(), - }, - }, - ) -} - -pub(crate) type CreateBufferMappedDetail = BufferDetail; - -pub(crate) fn device_create_buffer_mapped<'a>( - device: &DeviceId, - desc: &BufferDescriptor, -) -> crate::CreateBufferMapped<'a> { - let owned_label = OwnedLabel::new(desc.label.as_deref()); - let mut data_ptr: *mut u8 = std::ptr::null_mut(); - unsafe { - let id = wgn::wgpu_device_create_buffer_mapped( - *device, - &wgt::BufferDescriptor { - label: owned_label.as_ptr(), - size: desc.size, - usage: desc.usage, - }, - &mut data_ptr as *mut *mut u8, - ); - let mapped_data = std::slice::from_raw_parts_mut(data_ptr as *mut u8, desc.size as usize); - crate::CreateBufferMapped { - id, - mapped_data, - detail: CreateBufferMappedDetail { device_id: *device }, - } - } -} - -#[derive(Debug, Hash, PartialEq)] -pub(crate) struct BufferDetail { - /// On native we need to track the device in order to later destroy the - /// buffer. - device_id: DeviceId, -} - -pub(crate) fn device_create_buffer_mapped_finish( - create_buffer_mapped: crate::CreateBufferMapped<'_>, -) -> crate::Buffer { - buffer_unmap(&create_buffer_mapped.id); - crate::Buffer { - id: create_buffer_mapped.id, - detail: BufferDetail { - device_id: create_buffer_mapped.detail.device_id, - }, - } -} - -pub(crate) fn buffer_unmap(buffer: &BufferId) { - wgn::wgpu_buffer_unmap(*buffer); -} - -pub(crate) fn buffer_drop(buffer: &BufferId) { - wgn::wgpu_buffer_destroy(*buffer); -} - -pub(crate) fn device_create_buffer(device: &DeviceId, desc: &BufferDescriptor) -> crate::Buffer { - let owned_label = OwnedLabel::new(desc.label.as_deref()); - crate::Buffer { - id: wgn::wgpu_device_create_buffer( - *device, - &wgt::BufferDescriptor { - label: owned_label.as_ptr(), - size: desc.size, - usage: desc.usage, - }, - ), - detail: BufferDetail { device_id: *device }, - } -} - -pub(crate) fn device_create_texture(device: &DeviceId, desc: &TextureDescriptor) -> TextureId { - let owned_label = OwnedLabel::new(desc.label.as_deref()); - wgn::wgpu_device_create_texture( - *device, - &wgt::TextureDescriptor { - label: owned_label.as_ptr(), - size: desc.size, - mip_level_count: desc.mip_level_count, - sample_count: desc.sample_count, - dimension: desc.dimension, - format: desc.format, - usage: desc.usage, - }, - ) -} - -pub(crate) fn device_create_sampler(device: &DeviceId, desc: &SamplerDescriptor) -> SamplerId { - wgn::wgpu_device_create_sampler(*device, desc) -} - -pub(crate) fn create_command_encoder( - device: &DeviceId, - desc: &CommandEncoderDescriptor, -) -> CommandEncoderId { - let owned_label = OwnedLabel::new(desc.label.as_deref()); - wgn::wgpu_device_create_command_encoder( - *device, - Some(&wgt::CommandEncoderDescriptor { - label: owned_label.as_ptr(), - }), - ) -} - -pub(crate) fn command_encoder_copy_buffer_to_buffer( - command_encoder: &CommandEncoderId, - source: &crate::Buffer, - source_offset: wgt::BufferAddress, - destination: &crate::Buffer, - destination_offset: wgt::BufferAddress, - copy_size: wgt::BufferAddress, -) { - wgn::wgpu_command_encoder_copy_buffer_to_buffer( - *command_encoder, - source.id, - source_offset, - destination.id, - destination_offset, - copy_size, - ); -} - -pub(crate) fn command_encoder_copy_buffer_to_texture( - command_encoder: &CommandEncoderId, - source: crate::BufferCopyView, - destination: crate::TextureCopyView, - copy_size: wgt::Extent3d, -) { - wgn::wgpu_command_encoder_copy_buffer_to_texture( - *command_encoder, - &map_buffer_copy_view(source), - &map_texture_copy_view(destination), - copy_size, - ); -} - -pub(crate) fn command_encoder_copy_texture_to_buffer( - command_encoder: &CommandEncoderId, - source: crate::TextureCopyView, - destination: crate::BufferCopyView, - copy_size: wgt::Extent3d, -) { - wgn::wgpu_command_encoder_copy_texture_to_buffer( - *command_encoder, - &map_texture_copy_view(source), - &map_buffer_copy_view(destination), - copy_size, - ); -} - -pub(crate) fn command_encoder_copy_texture_to_texture( - command_encoder: &CommandEncoderId, - source: crate::TextureCopyView, - destination: crate::TextureCopyView, - copy_size: wgt::Extent3d, -) { - wgn::wgpu_command_encoder_copy_texture_to_texture( - *command_encoder, - &map_texture_copy_view(source), - &map_texture_copy_view(destination), - copy_size, - ); -} - -pub(crate) fn begin_compute_pass(command_encoder: &CommandEncoderId) -> ComputePassId { - unsafe { wgn::wgpu_command_encoder_begin_compute_pass(*command_encoder, None) } -} - -pub(crate) fn compute_pass_set_pipeline( - compute_pass: &ComputePassId, - pipeline: &ComputePipelineId, -) { - unsafe { - wgn::wgpu_compute_pass_set_pipeline(compute_pass.as_mut().unwrap(), *pipeline); - } -} - -pub(crate) fn compute_pass_set_bind_group<'a>( - compute_pass: &ComputePassId, - index: u32, - bind_group: &BindGroupId, - offsets: &[wgt::DynamicOffset], -) { - unsafe { - wgn::wgpu_compute_pass_set_bind_group( - compute_pass.as_mut().unwrap(), - index, - *bind_group, - offsets.as_ptr(), - offsets.len(), - ); - } -} - -pub(crate) fn compute_pass_dispatch(compute_pass: &ComputePassId, x: u32, y: u32, z: u32) { - unsafe { - wgn::wgpu_compute_pass_dispatch(compute_pass.as_mut().unwrap(), x, y, z); - } -} - -pub(crate) fn compute_pass_dispatch_indirect( - compute_pass: &ComputePassId, - indirect_buffer: &BufferId, - indirect_offset: wgt::BufferAddress, -) { - unsafe { - wgn::wgpu_compute_pass_dispatch_indirect( - compute_pass.as_mut().unwrap(), - *indirect_buffer, - indirect_offset, - ); - } -} - -pub(crate) fn compute_pass_end_pass(compute_pass: &ComputePassId) { - unsafe { - wgn::wgpu_compute_pass_end_pass(*compute_pass); - } -} - -pub(crate) fn command_encoder_finish(command_encoder: &CommandEncoderId) -> CommandBufferId { - wgn::wgpu_command_encoder_finish(*command_encoder, None) -} - -pub(crate) fn queue_submit(queue: &QueueId, command_buffers: &[crate::CommandBuffer]) { - let temp_command_buffers = command_buffers - .iter() - .map(|cb| cb.id) - .collect::>(); - - unsafe { wgn::wgpu_queue_submit(*queue, temp_command_buffers.as_ptr(), command_buffers.len()) }; -} - -pub(crate) fn buffer_map_read( - buffer: &crate::Buffer, - start: wgt::BufferAddress, - size: wgt::BufferAddress, -) -> impl Future> { - let (future, completion) = native_gpu_future::new_gpu_future(buffer.id, size); - - extern "C" fn buffer_map_read_future_wrapper( - status: wgc::resource::BufferMapAsyncStatus, - data: *const u8, - user_data: *mut u8, - ) { - let completion = - unsafe { native_gpu_future::GpuFutureCompletion::from_raw(user_data as _) }; - let (buffer_id, size) = completion.get_buffer_info(); - - if let wgc::resource::BufferMapAsyncStatus::Success = status { - completion.complete(Ok(crate::BufferReadMapping { - detail: BufferReadMappingDetail { - data, - size: size as usize, - buffer_id, - }, - })); - } else { - completion.complete(Err(crate::BufferAsyncErr)); - } - } - - wgn::wgpu_buffer_map_read_async( - buffer.id, - start, - size, - buffer_map_read_future_wrapper, - completion.to_raw() as _, - ); - - future -} - -pub(crate) fn buffer_map_write( - buffer: &crate::Buffer, - start: wgt::BufferAddress, - size: wgt::BufferAddress, -) -> impl Future> { - let (future, completion) = native_gpu_future::new_gpu_future(buffer.id, size); - - extern "C" fn buffer_map_write_future_wrapper( - status: wgc::resource::BufferMapAsyncStatus, - data: *mut u8, - user_data: *mut u8, - ) { - let completion = - unsafe { native_gpu_future::GpuFutureCompletion::from_raw(user_data as _) }; - let (buffer_id, size) = completion.get_buffer_info(); - - if let wgc::resource::BufferMapAsyncStatus::Success = status { - completion.complete(Ok(crate::BufferWriteMapping { - detail: BufferWriteMappingDetail { - data, - size: size as usize, - buffer_id, - }, - })); - } else { - completion.complete(Err(crate::BufferAsyncErr)); - } - } - - wgn::wgpu_buffer_map_write_async( - buffer.id, - start, - size, - buffer_map_write_future_wrapper, - completion.to_raw() as _, - ); - - future -} - -pub(crate) struct BufferReadMappingDetail { - pub(crate) buffer_id: BufferId, - data: *const u8, - size: usize, -} - -impl BufferReadMappingDetail { - pub(crate) fn as_slice(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self.data as *const u8, self.size) } - } -} - -pub(crate) struct BufferWriteMappingDetail { - pub(crate) buffer_id: BufferId, - data: *mut u8, - size: usize, -} - -impl BufferWriteMappingDetail { - pub(crate) fn as_slice(&mut self) -> &mut [u8] { - unsafe { slice::from_raw_parts_mut(self.data as *mut u8, self.size) } - } -} - -pub(crate) fn device_create_surface( - window: &W, -) -> SurfaceId { - wgn::wgpu_create_surface(window.raw_window_handle()) -} - -pub(crate) fn device_create_swap_chain( - device: &DeviceId, - surface: &SurfaceId, - desc: &wgt::SwapChainDescriptor, -) -> SwapChainId { - wgn::wgpu_device_create_swap_chain(*device, *surface, desc) -} - -pub(crate) fn device_drop(device: &DeviceId) { - #[cfg(not(target_arch = "wasm32"))] - wgn::wgpu_device_poll(*device, true); - //TODO: make this work in general - #[cfg(not(target_arch = "wasm32"))] - #[cfg(feature = "metal-auto-capture")] - wgn::wgpu_device_destroy(*device); -} - -pub(crate) fn swap_chain_get_next_texture( - swap_chain: &SwapChainId, -) -> Result { - match wgn::wgpu_swap_chain_get_next_texture(*swap_chain).view_id { - Some(id) => Ok(crate::SwapChainOutput { - view: crate::TextureView { id, owned: false }, - detail: SwapChainOutputDetail { - swap_chain_id: *swap_chain, - }, - }), - None => Err(crate::TimeOut), - } -} - -#[derive(Debug)] -pub(crate) struct SwapChainOutputDetail { - swap_chain_id: SwapChainId, -} - -pub(crate) fn command_encoder_begin_render_pass<'a>( - command_encoder: &CommandEncoderId, - desc: &crate::RenderPassDescriptor<'a, '_>, -) -> RenderPassEncoderId { - let colors = desc - .color_attachments - .iter() - .map(|ca| wgc::command::RenderPassColorAttachmentDescriptor { - attachment: ca.attachment.id, - resolve_target: ca.resolve_target.map(|rt| rt.id), - load_op: ca.load_op, - store_op: ca.store_op, - clear_color: ca.clear_color, - }) - .collect::>(); - - let depth_stencil = desc.depth_stencil_attachment.as_ref().map(|dsa| { - wgc::command::RenderPassDepthStencilAttachmentDescriptor { - attachment: dsa.attachment.id, - depth_load_op: dsa.depth_load_op, - depth_store_op: dsa.depth_store_op, - clear_depth: dsa.clear_depth, - stencil_load_op: dsa.stencil_load_op, - stencil_store_op: dsa.stencil_store_op, - clear_stencil: dsa.clear_stencil, - } - }); - - unsafe { - wgn::wgpu_command_encoder_begin_render_pass( - *command_encoder, - &wgc::command::RenderPassDescriptor { - color_attachments: colors.as_ptr(), - color_attachments_length: colors.len(), - depth_stencil_attachment: depth_stencil.as_ref(), - }, - ) - } -} - -pub(crate) fn render_pass_set_pipeline( - render_pass: &RenderPassEncoderId, - pipeline: &RenderPipelineId, -) { - unsafe { - wgn::wgpu_render_pass_set_pipeline(render_pass.as_mut().unwrap(), *pipeline); - } -} - -pub(crate) fn render_pass_set_blend_color(render_pass: &RenderPassEncoderId, color: wgt::Color) { - unsafe { - wgn::wgpu_render_pass_set_blend_color(render_pass.as_mut().unwrap(), &color); - } -} - -pub(crate) fn render_pass_set_bind_group( - render_pass: &RenderPassEncoderId, - index: u32, - bind_group: &BindGroupId, - offsets: &[wgt::DynamicOffset], -) { - unsafe { - wgn::wgpu_render_pass_set_bind_group( - render_pass.as_mut().unwrap(), - index, - *bind_group, - offsets.as_ptr(), - offsets.len(), - ); - } -} - -pub(crate) fn render_pass_set_index_buffer<'a>( - render_pass: &RenderPassEncoderId, - buffer: &'a crate::Buffer, - offset: wgt::BufferAddress, - size: wgt::BufferAddress, -) { - unsafe { - wgn::wgpu_render_pass_set_index_buffer( - render_pass.as_mut().unwrap(), - buffer.id, - offset, - size, - ); - } -} - -pub(crate) fn render_pass_set_vertex_buffer<'a>( - render_pass: &RenderPassEncoderId, - slot: u32, - buffer: &'a crate::Buffer, - offset: wgt::BufferAddress, - size: wgt::BufferAddress, -) { - unsafe { - wgn::wgpu_render_pass_set_vertex_buffer( - render_pass.as_mut().unwrap(), - slot, - buffer.id, - offset, - size, - ) - }; -} - -pub(crate) fn render_pass_set_scissor_rect( - render_pass: &RenderPassEncoderId, - x: u32, - y: u32, - width: u32, - height: u32, -) { - unsafe { - wgn::wgpu_render_pass_set_scissor_rect(render_pass.as_mut().unwrap(), x, y, width, height); - } -} - -pub(crate) fn render_pass_set_viewport( - render_pass: &RenderPassEncoderId, - x: f32, - y: f32, - width: f32, - height: f32, - min_depth: f32, - max_depth: f32, -) { - unsafe { - wgn::wgpu_render_pass_set_viewport( - render_pass.as_mut().unwrap(), - x, - y, - width, - height, - min_depth, - max_depth, - ); - } -} - -pub(crate) fn render_pass_set_stencil_reference(render_pass: &RenderPassEncoderId, reference: u32) { - unsafe { - wgn::wgpu_render_pass_set_stencil_reference(render_pass.as_mut().unwrap(), reference); - } -} - -pub(crate) fn render_pass_draw( - render_pass: &RenderPassEncoderId, - vertices: Range, - instances: Range, -) { - unsafe { - wgn::wgpu_render_pass_draw( - render_pass.as_mut().unwrap(), - vertices.end - vertices.start, - instances.end - instances.start, - vertices.start, - instances.start, - ); - } -} - -pub(crate) fn render_pass_draw_indexed( - render_pass: &RenderPassEncoderId, - indices: Range, - base_vertex: i32, - instances: Range, -) { - unsafe { - wgn::wgpu_render_pass_draw_indexed( - render_pass.as_mut().unwrap(), - indices.end - indices.start, - instances.end - instances.start, - indices.start, - base_vertex, - instances.start, - ); - } -} - -pub(crate) fn render_pass_draw_indirect<'a>( - render_pass: &RenderPassEncoderId, - indirect_buffer: &'a crate::Buffer, - indirect_offset: wgt::BufferAddress, -) { - unsafe { - wgn::wgpu_render_pass_draw_indirect( - render_pass.as_mut().unwrap(), - indirect_buffer.id, - indirect_offset, - ); - } -} - -pub(crate) fn render_pass_draw_indexed_indirect<'a>( - render_pass: &RenderPassEncoderId, - indirect_buffer: &'a crate::Buffer, - indirect_offset: wgt::BufferAddress, -) { - unsafe { - wgn::wgpu_render_pass_draw_indexed_indirect( - render_pass.as_mut().unwrap(), - indirect_buffer.id, - indirect_offset, - ); - } -} - -pub(crate) fn render_pass_end_pass(render_pass: &RenderPassEncoderId) { - unsafe { - wgn::wgpu_render_pass_end_pass(*render_pass); - } -} - -pub(crate) fn texture_create_view( - texture: &TextureId, - desc: Option<&TextureViewDescriptor>, -) -> TextureViewId { - wgn::wgpu_texture_create_view(*texture, desc) -} - -pub(crate) fn texture_drop(texture: &TextureId) { - wgn::wgpu_texture_destroy(*texture); -} - -pub(crate) fn texture_view_drop(texture_view: &TextureViewId) { - wgn::wgpu_texture_view_destroy(*texture_view); -} - -pub(crate) fn bind_group_drop(bind_group: &BindGroupId) { - wgn::wgpu_bind_group_destroy(*bind_group); -} - -pub(crate) fn swap_chain_present(swap_chain_output: &crate::SwapChainOutput) { - wgn::wgpu_swap_chain_present(swap_chain_output.detail.swap_chain_id); -} - -pub(crate) fn device_poll(device: &DeviceId, maintain: crate::Maintain) { - wgn::wgpu_device_poll( - *device, - match maintain { - crate::Maintain::Poll => false, - crate::Maintain::Wait => true, - }, - ); -} - -struct OwnedLabel(Option); - -impl OwnedLabel { - fn new(text: Option<&str>) -> Self { - Self(text.map(|t| CString::new(t).expect("invalid label"))) - } - - fn as_ptr(&self) -> *const std::os::raw::c_char { - match self.0 { - Some(ref c_string) => c_string.as_ptr(), - None => ptr::null(), - } - } -} diff --git a/src/backend/web.rs b/src/backend/web.rs index 8c9fe3cb0..f8a815c08 100644 --- a/src/backend/web.rs +++ b/src/backend/web.rs @@ -5,203 +5,141 @@ use crate::{ TextureViewDescriptor, TextureViewDimension, }; -use std::ops::Range; +use futures::FutureExt; +use std::{marker::PhantomData, ops::Range}; use wasm_bindgen::prelude::*; -pub type AdapterId = web_sys::GpuAdapter; -pub type DeviceId = web_sys::GpuDevice; -pub type QueueId = web_sys::GpuQueue; -pub type ShaderModuleId = web_sys::GpuShaderModule; -pub type BindGroupLayoutId = web_sys::GpuBindGroupLayout; -pub type BindGroupId = web_sys::GpuBindGroup; -pub type TextureViewId = web_sys::GpuTextureView; -pub type SamplerId = web_sys::GpuSampler; -pub type BufferId = web_sys::GpuBuffer; -pub type TextureId = web_sys::GpuTexture; -pub type PipelineLayoutId = web_sys::GpuPipelineLayout; -pub type RenderPipelineId = web_sys::GpuRenderPipeline; -pub type ComputePipelineId = web_sys::GpuComputePipeline; -pub type CommandEncoderId = web_sys::GpuCommandEncoder; -pub type ComputePassId = web_sys::GpuComputePassEncoder; -pub type CommandBufferId = web_sys::GpuCommandBuffer; -pub type SurfaceId = web_sys::GpuCanvasContext; -pub type SwapChainId = web_sys::GpuSwapChain; -pub type RenderPassEncoderId = web_sys::GpuRenderPassEncoder; - -fn gpu() -> web_sys::Gpu { - web_sys::window().unwrap().navigator().gpu() -} +pub type Context = web_sys::Gpu; +pub(crate) struct ComputePass(web_sys::GpuComputePassEncoder); +pub(crate) struct RenderPass(web_sys::GpuRenderPassEncoder); -pub(crate) async fn request_adapter( - options: &crate::RequestAdapterOptions<'_>, - backends: wgt::BackendBit, -) -> Option { - if !backends.contains(wgt::BackendBit::BROWSER_WEBGPU) { - return None; +impl crate::ComputePassInner for ComputePass { + fn set_pipeline(&mut self, pipeline: &web_sys::GpuComputePipeline) { + self.0.set_pipeline(pipeline); } - - let mut mapped_options = web_sys::GpuRequestAdapterOptions::new(); - let mapped_power_preference = match options.power_preference { - wgt::PowerPreference::LowPower => web_sys::GpuPowerPreference::LowPower, - wgt::PowerPreference::HighPerformance | wgt::PowerPreference::Default => { - web_sys::GpuPowerPreference::HighPerformance - } - }; - mapped_options.power_preference(mapped_power_preference); - let adapter_promise = gpu().request_adapter_with_options(&mapped_options); - Some( - wasm_bindgen_futures::JsFuture::from(adapter_promise) - .await - .expect("Unable to get adapter") - .into(), - ) -} - -pub(crate) async fn request_device_and_queue( - adapter: &AdapterId, - desc: Option<&wgt::DeviceDescriptor>, -) -> (DeviceId, QueueId) { - let device_promise = match desc { - Some(d) => { - let mut mapped_desc = web_sys::GpuDeviceDescriptor::new(); - // TODO: label, extensions - let mut mapped_limits = web_sys::GpuLimits::new(); - mapped_limits.max_bind_groups(d.limits.max_bind_groups); - mapped_desc.limits(&mapped_limits); - adapter.request_device_with_descriptor(&mapped_desc) - } - None => adapter.request_device(), - }; - let js_value = wasm_bindgen_futures::JsFuture::from(device_promise) - .await - .expect("Unable to get device"); - let device_id = DeviceId::from(js_value); - let queue_id = device_id.default_queue(); - (device_id, queue_id) -} - -pub(crate) fn create_shader_module(device: &DeviceId, spv: &[u32]) -> ShaderModuleId { - let desc = web_sys::GpuShaderModuleDescriptor::new(&js_sys::Uint32Array::from(spv)); - // TODO: label - device.create_shader_module(&desc) -} - -pub(crate) fn create_bind_group_layout( - device: &DeviceId, - desc: &BindGroupLayoutDescriptor, -) -> BindGroupLayoutId { - use web_sys::GpuBindingType as bt; - - let mapped_bindings = desc - .bindings - .iter() - .map(|bind| { - let mapped_type = match bind.ty { - BindingType::UniformBuffer { .. } => bt::UniformBuffer, - BindingType::StorageBuffer { - readonly: false, .. - } => bt::StorageBuffer, - BindingType::StorageBuffer { readonly: true, .. } => bt::ReadonlyStorageBuffer, - BindingType::Sampler { comparison: false } => bt::Sampler, - BindingType::Sampler { .. } => bt::ComparisonSampler, - BindingType::SampledTexture { .. } => bt::SampledTexture, - BindingType::StorageTexture { readonly: true, .. } => bt::ReadonlyStorageTexture, - BindingType::StorageTexture { .. } => bt::WriteonlyStorageTexture, - }; - - let mut mapped_entry = web_sys::GpuBindGroupLayoutEntry::new( - bind.binding, - mapped_type, - bind.visibility.bits(), + fn set_bind_group( + &mut self, + index: u32, + bind_group: &web_sys::GpuBindGroup, + offsets: &[wgt::DynamicOffset], + ) { + self.0 + .set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( + index, + bind_group, + offsets, + 0f64, + offsets.len() as u32, ); - - match bind.ty { - BindingType::UniformBuffer { dynamic } - | BindingType::StorageBuffer { dynamic, .. } => { - mapped_entry.has_dynamic_offset(dynamic); - } - _ => {} - } - - if let BindingType::SampledTexture { multisampled, .. } = bind.ty { - mapped_entry.multisampled(multisampled); - } - - match bind.ty { - BindingType::SampledTexture { dimension, .. } - | BindingType::StorageTexture { dimension, .. } => { - mapped_entry.view_dimension(map_texture_view_dimension(dimension)); - } - _ => {} - } - - if let BindingType::StorageTexture { format, .. } = bind.ty { - mapped_entry.storage_texture_format(map_texture_format(format)); - } - - match bind.ty { - BindingType::SampledTexture { component_type, .. } - | BindingType::StorageTexture { component_type, .. } => { - mapped_entry.texture_component_type(map_texture_component_type(component_type)); - } - _ => {} - } - - mapped_entry - }) - .collect::(); - - let mut mapped_desc = web_sys::GpuBindGroupLayoutDescriptor::new(&mapped_bindings); - if let Some(label) = desc.label { - mapped_desc.label(label); } - device.create_bind_group_layout(&mapped_desc) -} - -pub(crate) fn create_bind_group(device: &DeviceId, desc: &BindGroupDescriptor) -> BindGroupId { - let mapped_entries = desc - .bindings - .iter() - .map(|binding| { - let mapped_resource = match binding.resource { - BindingResource::Buffer { - ref buffer, - ref range, - } => { - let mut mapped_buffer_binding = web_sys::GpuBufferBinding::new(&buffer.id); - mapped_buffer_binding.offset(range.start as f64); - mapped_buffer_binding.size((range.end - range.start) as f64); - JsValue::from(mapped_buffer_binding.clone()) - } - BindingResource::Sampler(ref sampler) => JsValue::from(sampler.id.clone()), - BindingResource::TextureView(ref texture_view) => { - JsValue::from(texture_view.id.clone()) - } - }; - - web_sys::GpuBindGroupEntry::new(binding.binding, &mapped_resource) - }) - .collect::(); - - let mut mapped_desc = web_sys::GpuBindGroupDescriptor::new(&mapped_entries, &desc.layout.id); - if let Some(label) = desc.label { - mapped_desc.label(label); + fn dispatch(&mut self, x: u32, y: u32, z: u32) { + self.0.dispatch_with_y_and_z(x, y, z); + } + fn dispatch_indirect( + &mut self, + indirect_buffer: &web_sys::GpuBuffer, + indirect_offset: wgt::BufferAddress, + ) { + self.0 + .dispatch_indirect_with_f64(indirect_buffer, indirect_offset as f64); } - device.create_bind_group(&mapped_desc) } -pub(crate) fn create_pipeline_layout( - device: &DeviceId, - desc: &PipelineLayoutDescriptor, -) -> PipelineLayoutId { - let temp_layouts = desc - .bind_group_layouts - .iter() - .map(|bgl| bgl.id.clone()) - .collect::(); - let mapped_desc = web_sys::GpuPipelineLayoutDescriptor::new(&temp_layouts); - // TODO: label - device.create_pipeline_layout(&mapped_desc) +impl crate::RenderPassInner for RenderPass { + fn set_pipeline(&mut self, pipeline: &web_sys::GpuRenderPipeline) { + self.0.set_pipeline(pipeline); + } + fn set_bind_group( + &mut self, + index: u32, + bind_group: &web_sys::GpuBindGroup, + offsets: &[wgt::DynamicOffset], + ) { + self.0 + .set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( + index, + bind_group, + offsets, + 0f64, + offsets.len() as u32, + ); + } + fn set_index_buffer( + &mut self, + buffer: &web_sys::GpuBuffer, + offset: wgt::BufferAddress, + size: wgt::BufferAddress, + ) { + self.0 + .set_index_buffer_with_f64_and_f64(buffer, offset as f64, size as f64); + } + fn set_vertex_buffer( + &mut self, + slot: u32, + buffer: &web_sys::GpuBuffer, + offset: wgt::BufferAddress, + size: wgt::BufferAddress, + ) { + self.0 + .set_vertex_buffer_with_f64_and_f64(slot, buffer, offset as f64, size as f64); + } + fn set_blend_color(&mut self, color: wgt::Color) { + self.0 + .set_blend_color_with_gpu_color_dict(&map_color(color)); + } + fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { + self.0.set_scissor_rect(x, y, width, height); + } + fn set_viewport( + &mut self, + x: f32, + y: f32, + width: f32, + height: f32, + min_depth: f32, + max_depth: f32, + ) { + self.0 + .set_viewport(x, y, width, height, min_depth, max_depth); + } + fn set_stencil_reference(&mut self, reference: u32) { + self.0.set_stencil_reference(reference); + } + fn draw(&mut self, vertices: Range, instances: Range) { + self.0 + .draw_with_instance_count_and_first_vertex_and_first_instance( + vertices.end - vertices.start, + instances.end - instances.start, + vertices.start, + instances.start, + ); + } + fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { + self.0 + .draw_indexed_with_instance_count_and_first_index_and_base_vertex_and_first_instance( + indices.end - indices.start, + instances.end - instances.start, + indices.start, + base_vertex, + instances.start, + ); + } + fn draw_indirect( + &mut self, + indirect_buffer: &web_sys::GpuBuffer, + indirect_offset: wgt::BufferAddress, + ) { + self.0 + .draw_indirect_with_f64(indirect_buffer, indirect_offset as f64); + } + fn draw_indexed_indirect( + &mut self, + indirect_buffer: &web_sys::GpuBuffer, + indirect_offset: wgt::BufferAddress, + ) { + self.0 + .draw_indexed_indirect_with_f64(indirect_buffer, indirect_offset as f64); + } } fn map_texture_format(texture_format: wgt::TextureFormat) -> web_sys::GpuTextureFormat { @@ -561,653 +499,804 @@ fn map_color(color: wgt::Color) -> web_sys::GpuColorDict { web_sys::GpuColorDict::new(color.a, color.b, color.g, color.r) } -pub(crate) fn create_render_pipeline( - device: &DeviceId, - desc: &RenderPipelineDescriptor, -) -> RenderPipelineId { - use web_sys::GpuPrimitiveTopology as pt; - - let mapped_color_states = desc - .color_states - .iter() - .map(|color_state_desc| { - let mapped_format = map_texture_format(color_state_desc.format); - let mut mapped_color_state_desc = web_sys::GpuColorStateDescriptor::new(mapped_format); - mapped_color_state_desc - .alpha_blend(&map_blend_descriptor(&color_state_desc.alpha_blend)); - mapped_color_state_desc - .color_blend(&map_blend_descriptor(&color_state_desc.color_blend)); - mapped_color_state_desc.write_mask(color_state_desc.write_mask.bits()); - mapped_color_state_desc - }) - .collect::(); - - let mapped_primitive_topology = match desc.primitive_topology { - wgt::PrimitiveTopology::PointList => pt::PointList, - wgt::PrimitiveTopology::LineList => pt::LineList, - wgt::PrimitiveTopology::LineStrip => pt::LineStrip, - wgt::PrimitiveTopology::TriangleList => pt::TriangleList, - wgt::PrimitiveTopology::TriangleStrip => pt::TriangleStrip, - }; - - let mapped_vertex_stage = map_stage_descriptor(&desc.vertex_stage); - - let mut mapped_desc = web_sys::GpuRenderPipelineDescriptor::new( - &desc.layout.id, - &mapped_color_states, - mapped_primitive_topology, - &mapped_vertex_stage, - ); - - // TODO: label - - if let Some(ref frag) = desc.fragment_stage { - mapped_desc.fragment_stage(&map_stage_descriptor(frag)); +fn map_store_op(op: wgt::StoreOp) -> web_sys::GpuStoreOp { + match op { + wgt::StoreOp::Clear => web_sys::GpuStoreOp::Clear, + wgt::StoreOp::Store => web_sys::GpuStoreOp::Store, } +} - if let Some(ref rasterization) = desc.rasterization_state { - mapped_desc.rasterization_state(&map_rasterization_state_descriptor(rasterization)); - } +type JsFutureResult = Result; +type FutureMap = futures::future::Map T>; - if let Some(ref depth_stencil) = desc.depth_stencil_state { - mapped_desc.depth_stencil_state(&map_depth_stencil_state_descriptor(depth_stencil)); +fn future_request_adapter(result: JsFutureResult) -> Option { + match result { + Ok(js_value) => Some(web_sys::GpuAdapter::from(js_value)), + Err(_) => None, } - - mapped_desc.vertex_state(&map_vertex_state_descriptor(&desc)); - mapped_desc.sample_count(desc.sample_count); - mapped_desc.sample_mask(desc.sample_mask); - mapped_desc.alpha_to_coverage_enabled(desc.alpha_to_coverage_enabled); - - device.create_render_pipeline(&mapped_desc) } - -pub(crate) fn create_compute_pipeline( - device: &DeviceId, - desc: &ComputePipelineDescriptor, -) -> ComputePipelineId { - let mapped_compute_stage = map_stage_descriptor(&desc.compute_stage); - let mapped_desc = - web_sys::GpuComputePipelineDescriptor::new(&desc.layout.id, &mapped_compute_stage); - // TODO: label - device.create_compute_pipeline(&mapped_desc) +fn future_request_device( + result: JsFutureResult, +) -> Result<(web_sys::GpuDevice, web_sys::GpuQueue), crate::RequestDeviceError> { + result + .map(|js_value| { + let device_id = web_sys::GpuDevice::from(js_value); + let queue_id = device_id.default_queue(); + (device_id, queue_id) + }) + .map_err(|_| crate::RequestDeviceError) } -pub(crate) struct CreateBufferMappedDetail { - /// On wasm we need to allocate our own temporary storage for `data`. Later - /// we copy this temporary storage into the `Uint8Array` which was returned - /// by the browser originally. - array_buffer: js_sys::ArrayBuffer, +pub(crate) struct MapFuture { + child: wasm_bindgen_futures::JsFuture, + buffer: Option, + marker: PhantomData, } - -pub(crate) fn device_create_buffer_mapped<'a>( - device: &DeviceId, - desc: &BufferDescriptor, -) -> crate::CreateBufferMapped<'a> { - let mut mapped_desc = web_sys::GpuBufferDescriptor::new(desc.size as f64, desc.usage.bits()); - if let Some(label) = desc.label { - mapped_desc.label(label); - } - unsafe { - let pair = device.create_buffer_mapped(&mapped_desc); - let id = pair.get(0).into(); - let array_buffer = pair.get(1).into(); - // TODO: Use `Vec::from_raw_parts` once it's stable - let memory = vec![0; desc.size as usize].into_boxed_slice(); - let mapped_data = - std::slice::from_raw_parts_mut(Box::into_raw(memory) as *mut u8, desc.size as usize); - crate::CreateBufferMapped { - id, - mapped_data, - detail: CreateBufferMappedDetail { array_buffer }, - } +impl Unpin for MapFuture {} +type MapData = (web_sys::GpuBuffer, Vec); +impl From for BufferReadMappingDetail { + fn from((buffer_id, mapped): MapData) -> Self { + BufferReadMappingDetail { buffer_id, mapped } } } - -pub type BufferDetail = (); - -pub(crate) fn device_create_buffer_mapped_finish( - create_buffer_mapped: crate::CreateBufferMapped<'_>, -) -> crate::Buffer { - unsafe { - // Convert the `mapped_data` slice back into a `Vec`. This should be - // safe because `mapped_data` is no longer accessible beyond this - // function. - let memory: Vec = Box::<[u8]>::from_raw(create_buffer_mapped.mapped_data).into(); - - // Create a view into the mapped `ArrayBuffer` that was provided by the - // browser - let mapped = js_sys::Uint8Array::new(&create_buffer_mapped.detail.array_buffer); - - // Convert `memory` into a temporary `Uint8Array` view. This should be - // safe as long as the backing wasm memory is not resized. - let memory_view = js_sys::Uint8Array::view(&memory[..]); - - // Finally copy into `mapped` and let `memory` drop - mapped.set(&memory_view, 0); - } - - buffer_unmap(&create_buffer_mapped.id); - - crate::Buffer { - id: create_buffer_mapped.id, - detail: (), +impl From for BufferWriteMappingDetail { + fn from((buffer_id, mapped): MapData) -> Self { + BufferWriteMappingDetail { buffer_id, mapped } } } - -pub(crate) fn buffer_unmap(buffer: &BufferId) { - buffer.unmap(); +impl> std::future::Future for MapFuture { + type Output = Result; + fn poll( + mut self: std::pin::Pin<&mut Self>, + context: &mut std::task::Context, + ) -> std::task::Poll { + std::future::Future::poll( + std::pin::Pin::new(&mut self.as_mut().get_mut().child), + context, + ) + .map(|result| { + let buffer = self.buffer.take().unwrap(); + result + .map(|js_value| { + let array_buffer = js_sys::ArrayBuffer::from(js_value); + let view = js_sys::Uint8Array::new(&array_buffer); + T::from((buffer, view.to_vec())) + }) + .map_err(|_| crate::BufferAsyncError) + }) + } } -pub(crate) fn buffer_drop(_buffer: &BufferId) { - // Buffer is dropped automatically -} +impl crate::Context for Context { + type AdapterId = web_sys::GpuAdapter; + type DeviceId = web_sys::GpuDevice; + type QueueId = web_sys::GpuQueue; + type ShaderModuleId = web_sys::GpuShaderModule; + type BindGroupLayoutId = web_sys::GpuBindGroupLayout; + type BindGroupId = web_sys::GpuBindGroup; + type TextureViewId = web_sys::GpuTextureView; + type SamplerId = web_sys::GpuSampler; + type BufferId = web_sys::GpuBuffer; + type TextureId = web_sys::GpuTexture; + type PipelineLayoutId = web_sys::GpuPipelineLayout; + type RenderPipelineId = web_sys::GpuRenderPipeline; + type ComputePipelineId = web_sys::GpuComputePipeline; + type CommandEncoderId = web_sys::GpuCommandEncoder; + type ComputePassId = ComputePass; + type CommandBufferId = web_sys::GpuCommandBuffer; + type SurfaceId = web_sys::GpuCanvasContext; + type SwapChainId = web_sys::GpuSwapChain; + type RenderPassId = RenderPass; + + type CreateBufferMappedDetail = CreateBufferMappedDetail; + type BufferReadMappingDetail = BufferReadMappingDetail; + type BufferWriteMappingDetail = BufferWriteMappingDetail; + type SwapChainOutputDetail = SwapChainOutputDetail; + + type RequestAdapterFuture = FutureMap>; + type RequestDeviceFuture = + FutureMap>; + type MapReadFuture = MapFuture; + type MapWriteFuture = MapFuture; + + fn init() -> Self { + web_sys::window().unwrap().navigator().gpu() + } -pub(crate) fn device_create_buffer(device: &DeviceId, desc: &BufferDescriptor) -> crate::Buffer { - let mut mapped_desc = web_sys::GpuBufferDescriptor::new(desc.size as f64, desc.usage.bits()); - if let Some(label) = desc.label { - mapped_desc.label(label); + fn instance_create_surface( + &self, + window: &W, + ) -> Self::SurfaceId { + let handle = window.raw_window_handle(); + let canvas_attribute = match handle { + raw_window_handle::RawWindowHandle::Web(web_handle) => web_handle.id, + _ => panic!("expected valid handle for canvas"), + }; + let canvas_node: wasm_bindgen::JsValue = web_sys::window() + .and_then(|win| win.document()) + .and_then(|doc| { + doc.query_selector_all(&format!("[data-raw-handle=\"{}\"]", canvas_attribute)) + .ok() + }) + .and_then(|nodes| nodes.get(0)) + .expect("expected to find single canvas") + .into(); + let canvas_element: web_sys::HtmlCanvasElement = canvas_node.into(); + let context: wasm_bindgen::JsValue = match canvas_element.get_context("gpupresent") { + Ok(Some(ctx)) => ctx.into(), + _ => panic!("expected to get context from canvas"), + }; + context.into() } - crate::Buffer { - id: device.create_buffer(&mapped_desc), - detail: (), + + fn instance_request_adapter( + &self, + options: &crate::RequestAdapterOptions<'_>, + _backends: wgt::BackendBit, + ) -> Self::RequestAdapterFuture { + //TODO: support this check, return `None` if the flag is not set. + // It's not trivial, since we need the Future logic to have this check, + // and currently the Future her has no room for extra parameter `backends`. + //assert!(backends.contains(wgt::BackendBit::BROWSER_WEBGPU)); + let mut mapped_options = web_sys::GpuRequestAdapterOptions::new(); + let mapped_power_preference = match options.power_preference { + wgt::PowerPreference::LowPower => web_sys::GpuPowerPreference::LowPower, + wgt::PowerPreference::HighPerformance | wgt::PowerPreference::Default => { + web_sys::GpuPowerPreference::HighPerformance + } + }; + mapped_options.power_preference(mapped_power_preference); + let adapter_promise = self.request_adapter_with_options(&mapped_options); + wasm_bindgen_futures::JsFuture::from(adapter_promise).map(future_request_adapter) } -} -pub(crate) fn device_create_texture(device: &DeviceId, desc: &TextureDescriptor) -> TextureId { - let mut mapped_desc = web_sys::GpuTextureDescriptor::new( - map_texture_format(desc.format), - &map_extent_3d(desc.size), - desc.usage.bits(), - ); - if let Some(label) = desc.label { - mapped_desc.label(label); - } - mapped_desc.dimension(map_texture_dimension(desc.dimension)); - mapped_desc.mip_level_count(desc.mip_level_count); - mapped_desc.sample_count(desc.sample_count); - device.create_texture(&mapped_desc) -} + fn adapter_request_device( + &self, + adapter: &Self::AdapterId, + desc: &crate::DeviceDescriptor, + ) -> Self::RequestDeviceFuture { + let mut mapped_desc = web_sys::GpuDeviceDescriptor::new(); + // TODO: label, extensions + let mut mapped_limits = web_sys::GpuLimits::new(); + mapped_limits.max_bind_groups(desc.limits.max_bind_groups); + mapped_desc.limits(&mapped_limits); + let device_promise = adapter.request_device_with_descriptor(&mapped_desc); + + wasm_bindgen_futures::JsFuture::from(device_promise).map(future_request_device) + } -pub(crate) fn device_create_sampler(device: &DeviceId, desc: &SamplerDescriptor) -> SamplerId { - let mut mapped_desc = web_sys::GpuSamplerDescriptor::new(); - // TODO: label - mapped_desc.address_mode_u(map_address_mode(desc.address_mode_u)); - mapped_desc.address_mode_v(map_address_mode(desc.address_mode_v)); - mapped_desc.address_mode_w(map_address_mode(desc.address_mode_w)); - if let Some(compare) = map_compare_function(desc.compare) { - mapped_desc.compare(compare); - } - mapped_desc.lod_max_clamp(desc.lod_max_clamp); - mapped_desc.lod_min_clamp(desc.lod_min_clamp); - mapped_desc.mag_filter(map_filter_mode(desc.mag_filter)); - mapped_desc.min_filter(map_filter_mode(desc.min_filter)); - mapped_desc.mipmap_filter(map_filter_mode(desc.mipmap_filter)); - device.create_sampler_with_descriptor(&mapped_desc) -} + fn device_create_swap_chain( + &self, + device: &Self::DeviceId, + surface: &Self::SurfaceId, + desc: &wgt::SwapChainDescriptor, + ) -> Self::SwapChainId { + let mut mapped = + web_sys::GpuSwapChainDescriptor::new(device, map_texture_format(desc.format)); + mapped.usage(desc.usage.bits()); + surface.configure_swap_chain(&mapped) + } -pub(crate) fn create_command_encoder( - device: &DeviceId, - desc: &CommandEncoderDescriptor, -) -> CommandEncoderId { - let mut mapped_desc = web_sys::GpuCommandEncoderDescriptor::new(); - if let Some(label) = desc.label { - mapped_desc.label(label); + fn device_create_shader_module( + &self, + device: &Self::DeviceId, + spv: &[u32], + ) -> Self::ShaderModuleId { + let desc = web_sys::GpuShaderModuleDescriptor::new(&js_sys::Uint32Array::from(spv)); + // TODO: label + device.create_shader_module(&desc) } - device.create_command_encoder_with_descriptor(&mapped_desc) -} -pub(crate) fn command_encoder_copy_buffer_to_buffer( - command_encoder: &CommandEncoderId, - source: &crate::Buffer, - source_offset: wgt::BufferAddress, - destination: &crate::Buffer, - destination_offset: wgt::BufferAddress, - copy_size: wgt::BufferAddress, -) { - command_encoder.copy_buffer_to_buffer_with_f64_and_f64_and_f64( - &source.id, - source_offset as f64, - &destination.id, - destination_offset as f64, - copy_size as f64, - ); -} + fn device_create_bind_group_layout( + &self, + device: &Self::DeviceId, + desc: &BindGroupLayoutDescriptor, + ) -> Self::BindGroupLayoutId { + use web_sys::GpuBindingType as bt; + + let mapped_bindings = desc + .bindings + .iter() + .map(|bind| { + let mapped_type = match bind.ty { + BindingType::UniformBuffer { .. } => bt::UniformBuffer, + BindingType::StorageBuffer { + readonly: false, .. + } => bt::StorageBuffer, + BindingType::StorageBuffer { readonly: true, .. } => bt::ReadonlyStorageBuffer, + BindingType::Sampler { comparison: false } => bt::Sampler, + BindingType::Sampler { .. } => bt::ComparisonSampler, + BindingType::SampledTexture { .. } => bt::SampledTexture, + BindingType::StorageTexture { readonly: true, .. } => { + bt::ReadonlyStorageTexture + } + BindingType::StorageTexture { .. } => bt::WriteonlyStorageTexture, + }; + + let mut mapped_entry = web_sys::GpuBindGroupLayoutEntry::new( + bind.binding, + mapped_type, + bind.visibility.bits(), + ); + + match bind.ty { + BindingType::UniformBuffer { dynamic } + | BindingType::StorageBuffer { dynamic, .. } => { + mapped_entry.has_dynamic_offset(dynamic); + } + _ => {} + } -pub(crate) fn command_encoder_copy_buffer_to_texture( - command_encoder: &CommandEncoderId, - source: crate::BufferCopyView, - destination: crate::TextureCopyView, - copy_size: wgt::Extent3d, -) { - command_encoder.copy_buffer_to_texture_with_gpu_extent_3d_dict( - &map_buffer_copy_view(source), - &map_texture_copy_view(destination), - &map_extent_3d(copy_size), - ); -} + if let BindingType::SampledTexture { multisampled, .. } = bind.ty { + mapped_entry.multisampled(multisampled); + } -pub(crate) fn command_encoder_copy_texture_to_buffer( - command_encoder: &CommandEncoderId, - source: crate::TextureCopyView, - destination: crate::BufferCopyView, - copy_size: wgt::Extent3d, -) { - command_encoder.copy_texture_to_buffer_with_gpu_extent_3d_dict( - &map_texture_copy_view(source), - &map_buffer_copy_view(destination), - &map_extent_3d(copy_size), - ); -} + match bind.ty { + BindingType::SampledTexture { dimension, .. } + | BindingType::StorageTexture { dimension, .. } => { + mapped_entry.view_dimension(map_texture_view_dimension(dimension)); + } + _ => {} + } -pub(crate) fn command_encoder_copy_texture_to_texture( - command_encoder: &CommandEncoderId, - source: crate::TextureCopyView, - destination: crate::TextureCopyView, - copy_size: wgt::Extent3d, -) { - command_encoder.copy_texture_to_texture_with_gpu_extent_3d_dict( - &map_texture_copy_view(source), - &map_texture_copy_view(destination), - &map_extent_3d(copy_size), - ); -} + if let BindingType::StorageTexture { format, .. } = bind.ty { + mapped_entry.storage_texture_format(map_texture_format(format)); + } -pub(crate) fn begin_compute_pass(command_encoder: &CommandEncoderId) -> ComputePassId { - let mut mapped_desc = web_sys::GpuComputePassDescriptor::new(); - if let Some(ref label) = command_encoder.label() { - mapped_desc.label(label); - } - command_encoder.begin_compute_pass_with_descriptor(&mapped_desc) -} + match bind.ty { + BindingType::SampledTexture { component_type, .. } + | BindingType::StorageTexture { component_type, .. } => { + mapped_entry + .texture_component_type(map_texture_component_type(component_type)); + } + _ => {} + } -pub(crate) fn compute_pass_set_pipeline( - compute_pass: &ComputePassId, - pipeline: &ComputePipelineId, -) { - compute_pass.set_pipeline(&pipeline); -} + mapped_entry + }) + .collect::(); -pub(crate) fn compute_pass_set_bind_group<'a>( - compute_pass: &ComputePassId, - index: u32, - bind_group: &BindGroupId, - offsets: &[wgt::DynamicOffset], -) { - compute_pass.set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( - index, - bind_group, - offsets, - 0f64, - offsets.len() as u32, - ); -} + let mut mapped_desc = web_sys::GpuBindGroupLayoutDescriptor::new(&mapped_bindings); + if let Some(label) = desc.label { + mapped_desc.label(label); + } + device.create_bind_group_layout(&mapped_desc) + } -pub(crate) fn compute_pass_dispatch(compute_pass: &ComputePassId, x: u32, y: u32, z: u32) { - compute_pass.dispatch_with_y_and_z(x, y, z); -} + fn device_create_bind_group( + &self, + device: &Self::DeviceId, + desc: &BindGroupDescriptor, + ) -> Self::BindGroupId { + let mapped_entries = desc + .bindings + .iter() + .map(|binding| { + let mapped_resource = match binding.resource { + BindingResource::Buffer { + ref buffer, + ref range, + } => { + let mut mapped_buffer_binding = web_sys::GpuBufferBinding::new(&buffer.id); + mapped_buffer_binding.offset(range.start as f64); + mapped_buffer_binding.size((range.end - range.start) as f64); + JsValue::from(mapped_buffer_binding.clone()) + } + BindingResource::Sampler(ref sampler) => JsValue::from(sampler.id.clone()), + BindingResource::TextureView(ref texture_view) => { + JsValue::from(texture_view.id.clone()) + } + }; + + web_sys::GpuBindGroupEntry::new(binding.binding, &mapped_resource) + }) + .collect::(); + + let mut mapped_desc = + web_sys::GpuBindGroupDescriptor::new(&mapped_entries, &desc.layout.id); + if let Some(label) = desc.label { + mapped_desc.label(label); + } + device.create_bind_group(&mapped_desc) + } -pub(crate) fn compute_pass_dispatch_indirect( - compute_pass: &ComputePassId, - indirect_buffer: &BufferId, - indirect_offset: wgt::BufferAddress, -) { - compute_pass.dispatch_indirect_with_f64(indirect_buffer, indirect_offset as f64); -} + fn device_create_pipeline_layout( + &self, + device: &Self::DeviceId, + desc: &PipelineLayoutDescriptor, + ) -> Self::PipelineLayoutId { + let temp_layouts = desc + .bind_group_layouts + .iter() + .map(|bgl| bgl.id.clone()) + .collect::(); + let mapped_desc = web_sys::GpuPipelineLayoutDescriptor::new(&temp_layouts); + // TODO: label + device.create_pipeline_layout(&mapped_desc) + } -pub(crate) fn compute_pass_end_pass(compute_pass: &ComputePassId) { - compute_pass.end_pass(); -} + fn device_create_render_pipeline( + &self, + device: &Self::DeviceId, + desc: &RenderPipelineDescriptor, + ) -> Self::RenderPipelineId { + use web_sys::GpuPrimitiveTopology as pt; + + let mapped_color_states = desc + .color_states + .iter() + .map(|color_state_desc| { + let mapped_format = map_texture_format(color_state_desc.format); + let mut mapped_color_state_desc = + web_sys::GpuColorStateDescriptor::new(mapped_format); + mapped_color_state_desc + .alpha_blend(&map_blend_descriptor(&color_state_desc.alpha_blend)); + mapped_color_state_desc + .color_blend(&map_blend_descriptor(&color_state_desc.color_blend)); + mapped_color_state_desc.write_mask(color_state_desc.write_mask.bits()); + mapped_color_state_desc + }) + .collect::(); + + let mapped_primitive_topology = match desc.primitive_topology { + wgt::PrimitiveTopology::PointList => pt::PointList, + wgt::PrimitiveTopology::LineList => pt::LineList, + wgt::PrimitiveTopology::LineStrip => pt::LineStrip, + wgt::PrimitiveTopology::TriangleList => pt::TriangleList, + wgt::PrimitiveTopology::TriangleStrip => pt::TriangleStrip, + }; + + let mapped_vertex_stage = map_stage_descriptor(&desc.vertex_stage); + + let mut mapped_desc = web_sys::GpuRenderPipelineDescriptor::new( + &desc.layout.id, + &mapped_color_states, + mapped_primitive_topology, + &mapped_vertex_stage, + ); -pub(crate) fn command_encoder_finish(command_encoder: &CommandEncoderId) -> CommandBufferId { - let mut mapped_desc = web_sys::GpuCommandBufferDescriptor::new(); - if let Some(ref label) = command_encoder.label() { - mapped_desc.label(label); - } - command_encoder.finish_with_descriptor(&mapped_desc) -} + // TODO: label -pub(crate) fn queue_submit(queue: &QueueId, command_buffers: &[crate::CommandBuffer]) { - let temp_command_buffers = command_buffers - .iter() - .map(|cb| &cb.id) - .collect::(); + if let Some(ref frag) = desc.fragment_stage { + mapped_desc.fragment_stage(&map_stage_descriptor(frag)); + } - queue.submit(&temp_command_buffers); -} + if let Some(ref rasterization) = desc.rasterization_state { + mapped_desc.rasterization_state(&map_rasterization_state_descriptor(rasterization)); + } -pub(crate) async fn buffer_map_read( - buffer: &crate::Buffer, - _start: wgt::BufferAddress, - _size: wgt::BufferAddress, -) -> Result { - let array_buffer_promise = buffer.id.map_read_async(); - let array_buffer: js_sys::ArrayBuffer = - wasm_bindgen_futures::JsFuture::from(array_buffer_promise) - .await - .expect("Unable to map buffer") - .into(); - let view = js_sys::Uint8Array::new(&array_buffer); - Ok(crate::BufferReadMapping { - detail: BufferReadMappingDetail { - buffer_id: buffer.id.clone(), - mapped: view.to_vec(), - }, - }) -} + if let Some(ref depth_stencil) = desc.depth_stencil_state { + mapped_desc.depth_stencil_state(&map_depth_stencil_state_descriptor(depth_stencil)); + } -pub(crate) async fn buffer_map_write( - buffer: &crate::Buffer, - _start: wgt::BufferAddress, - _size: wgt::BufferAddress, -) -> Result { - let array_buffer_promise = buffer.id.map_write_async(); - let array_buffer: js_sys::ArrayBuffer = - wasm_bindgen_futures::JsFuture::from(array_buffer_promise) - .await - .expect("Unable to map buffer") - .into(); - let view = js_sys::Uint8Array::new(&array_buffer); - Ok(crate::BufferWriteMapping { - detail: BufferWriteMappingDetail { - buffer_id: buffer.id.clone(), - mapped: view.to_vec(), - }, - }) -} + mapped_desc.vertex_state(&map_vertex_state_descriptor(&desc)); + mapped_desc.sample_count(desc.sample_count); + mapped_desc.sample_mask(desc.sample_mask); + mapped_desc.alpha_to_coverage_enabled(desc.alpha_to_coverage_enabled); -pub(crate) struct BufferReadMappingDetail { - pub(crate) buffer_id: BufferId, - mapped: Vec, -} + device.create_render_pipeline(&mapped_desc) + } -impl BufferReadMappingDetail { - pub(crate) fn as_slice(&self) -> &[u8] { - &self.mapped[..] + fn device_create_compute_pipeline( + &self, + device: &Self::DeviceId, + desc: &ComputePipelineDescriptor, + ) -> Self::ComputePipelineId { + let mapped_compute_stage = map_stage_descriptor(&desc.compute_stage); + let mapped_desc = + web_sys::GpuComputePipelineDescriptor::new(&desc.layout.id, &mapped_compute_stage); + // TODO: label + device.create_compute_pipeline(&mapped_desc) } -} -pub(crate) struct BufferWriteMappingDetail { - pub(crate) buffer_id: BufferId, - mapped: Vec, -} + fn device_create_buffer_mapped<'a>( + &self, + device: &Self::DeviceId, + desc: &BufferDescriptor, + ) -> (Self::BufferId, &'a mut [u8], Self::CreateBufferMappedDetail) { + let mut mapped_desc = + web_sys::GpuBufferDescriptor::new(desc.size as f64, desc.usage.bits()); + if let Some(label) = desc.label { + mapped_desc.label(label); + } + unsafe { + let pair = device.create_buffer_mapped(&mapped_desc); + let id = pair.get(0).into(); + let array_buffer = pair.get(1).into(); + // TODO: Use `Vec::from_raw_parts` once it's stable + let memory = vec![0; desc.size as usize].into_boxed_slice(); + let mapped_data = std::slice::from_raw_parts_mut( + Box::into_raw(memory) as *mut u8, + desc.size as usize, + ); + (id, mapped_data, CreateBufferMappedDetail { array_buffer }) + } + } -impl BufferWriteMappingDetail { - pub(crate) fn as_slice(&mut self) -> &mut [u8] { - &mut self.mapped[..] + fn device_create_buffer( + &self, + device: &Self::DeviceId, + desc: &BufferDescriptor, + ) -> Self::BufferId { + let mut mapped_desc = + web_sys::GpuBufferDescriptor::new(desc.size as f64, desc.usage.bits()); + if let Some(label) = desc.label { + mapped_desc.label(label); + } + device.create_buffer(&mapped_desc) } -} -pub(crate) fn device_create_surface( - window: &W, -) -> SurfaceId { - let handle = window.raw_window_handle(); - let canvas_attribute = match handle { - raw_window_handle::RawWindowHandle::Web(web_handle) => web_handle.id, - _ => panic!("expected valid handle for canvas"), - }; - let canvas_node: wasm_bindgen::JsValue = web_sys::window() - .and_then(|win| win.document()) - .and_then(|doc| { - doc.query_selector_all(&format!("[data-raw-handle=\"{}\"]", canvas_attribute)) - .ok() - }) - .and_then(|nodes| nodes.get(0)) - .expect("expected to find single canvas") - .into(); - let canvas_element: web_sys::HtmlCanvasElement = canvas_node.into(); - let context: wasm_bindgen::JsValue = match canvas_element.get_context("gpupresent") { - Ok(Some(ctx)) => ctx.into(), - _ => panic!("expected to get context from canvas"), - }; - context.into() -} + fn device_create_texture( + &self, + device: &Self::DeviceId, + desc: &TextureDescriptor, + ) -> Self::TextureId { + let mut mapped_desc = web_sys::GpuTextureDescriptor::new( + map_texture_format(desc.format), + &map_extent_3d(desc.size), + desc.usage.bits(), + ); + if let Some(label) = desc.label { + mapped_desc.label(label); + } + mapped_desc.dimension(map_texture_dimension(desc.dimension)); + mapped_desc.mip_level_count(desc.mip_level_count); + mapped_desc.sample_count(desc.sample_count); + device.create_texture(&mapped_desc) + } -pub(crate) fn device_create_swap_chain( - device: &DeviceId, - surface: &SurfaceId, - desc: &wgt::SwapChainDescriptor, -) -> SwapChainId { - let mut mapped = web_sys::GpuSwapChainDescriptor::new(device, map_texture_format(desc.format)); - mapped.usage(desc.usage.bits()); - surface.configure_swap_chain(&mapped) -} + fn device_create_sampler( + &self, + device: &Self::DeviceId, + desc: &SamplerDescriptor, + ) -> Self::SamplerId { + let mut mapped_desc = web_sys::GpuSamplerDescriptor::new(); + // TODO: label + mapped_desc.address_mode_u(map_address_mode(desc.address_mode_u)); + mapped_desc.address_mode_v(map_address_mode(desc.address_mode_v)); + mapped_desc.address_mode_w(map_address_mode(desc.address_mode_w)); + if let Some(compare) = map_compare_function(desc.compare) { + mapped_desc.compare(compare); + } + mapped_desc.lod_max_clamp(desc.lod_max_clamp); + mapped_desc.lod_min_clamp(desc.lod_min_clamp); + mapped_desc.mag_filter(map_filter_mode(desc.mag_filter)); + mapped_desc.min_filter(map_filter_mode(desc.min_filter)); + mapped_desc.mipmap_filter(map_filter_mode(desc.mipmap_filter)); + device.create_sampler_with_descriptor(&mapped_desc) + } -pub(crate) fn device_drop(_device: &DeviceId) { - // Device is dropped automatically -} + fn device_create_command_encoder( + &self, + device: &Self::DeviceId, + desc: &CommandEncoderDescriptor, + ) -> Self::CommandEncoderId { + let mut mapped_desc = web_sys::GpuCommandEncoderDescriptor::new(); + if let Some(label) = desc.label { + mapped_desc.label(label); + } + device.create_command_encoder_with_descriptor(&mapped_desc) + } -pub(crate) fn swap_chain_get_next_texture( - swap_chain: &SwapChainId, -) -> Result { - // TODO: Should we pass a descriptor here? - // Or is the default view always correct? - Ok(crate::SwapChainOutput { - view: crate::TextureView { - id: swap_chain.get_current_texture().create_view(), - owned: false, - }, - detail: (), - }) -} + fn device_drop(&self, _device: &Self::DeviceId) { + // Device is dropped automatically + } -pub(crate) type SwapChainOutputDetail = (); + fn device_poll(&self, _device: &Self::DeviceId, _maintain: crate::Maintain) { + // Device is polled automatically + } -fn map_store_op(op: wgt::StoreOp) -> web_sys::GpuStoreOp { - match op { - wgt::StoreOp::Clear => web_sys::GpuStoreOp::Clear, - wgt::StoreOp::Store => web_sys::GpuStoreOp::Store, + fn buffer_map_read( + &self, + buffer: &Self::BufferId, + _start: wgt::BufferAddress, + _size: wgt::BufferAddress, + ) -> Self::MapReadFuture { + MapFuture { + child: wasm_bindgen_futures::JsFuture::from(buffer.map_read_async()), + buffer: Some(buffer.clone()), + marker: PhantomData, + } } -} -pub(crate) fn command_encoder_begin_render_pass<'a>( - command_encoder: &CommandEncoderId, - desc: &crate::RenderPassDescriptor<'a, '_>, -) -> RenderPassEncoderId { - let mapped_color_attachments = desc - .color_attachments - .iter() - .map(|ca| { - let mut mapped_color_attachment = web_sys::GpuRenderPassColorAttachmentDescriptor::new( - &ca.attachment.id, - &match ca.load_op { - wgt::LoadOp::Clear => wasm_bindgen::JsValue::from(map_color(ca.clear_color)), - wgt::LoadOp::Load => wasm_bindgen::JsValue::from(web_sys::GpuLoadOp::Load), - }, - ); + fn buffer_map_write( + &self, + buffer: &Self::BufferId, + _start: wgt::BufferAddress, + _size: wgt::BufferAddress, + ) -> Self::MapWriteFuture { + MapFuture { + child: wasm_bindgen_futures::JsFuture::from(buffer.map_write_async()), + buffer: Some(buffer.clone()), + marker: PhantomData, + } + } - if let Some(rt) = ca.resolve_target { - mapped_color_attachment.resolve_target(&rt.id); - } + fn buffer_unmap(&self, buffer: &Self::BufferId) { + buffer.unmap(); + } - mapped_color_attachment.store_op(map_store_op(ca.store_op)); + fn swap_chain_get_next_texture( + &self, + swap_chain: &Self::SwapChainId, + ) -> Result<(Self::TextureViewId, Self::SwapChainOutputDetail), crate::TimeOut> { + // TODO: Should we pass a descriptor here? + // Or is the default view always correct? + Ok((swap_chain.get_current_texture().create_view(), ())) + } - mapped_color_attachment - }) - .collect::(); + fn swap_chain_present( + &self, + _view: &Self::TextureViewId, + _detail: &Self::SwapChainOutputDetail, + ) { + // Swapchain is presented automatically + } - let mut mapped_desc = web_sys::GpuRenderPassDescriptor::new(&mapped_color_attachments); - - // TODO: label - - if let Some(dsa) = &desc.depth_stencil_attachment { - let mapped_depth_stencil_attachment = - web_sys::GpuRenderPassDepthStencilAttachmentDescriptor::new( - &dsa.attachment.id, - &match dsa.depth_load_op { - wgt::LoadOp::Clear => wasm_bindgen::JsValue::from(dsa.clear_depth), - wgt::LoadOp::Load => wasm_bindgen::JsValue::from(web_sys::GpuLoadOp::Load), - }, - map_store_op(dsa.depth_store_op), - &match dsa.stencil_load_op { - wgt::LoadOp::Clear => wasm_bindgen::JsValue::from(dsa.clear_stencil), - wgt::LoadOp::Load => wasm_bindgen::JsValue::from(web_sys::GpuLoadOp::Load), - }, - map_store_op(dsa.stencil_store_op), - ); + fn texture_create_view( + &self, + texture: &Self::TextureId, + desc: Option<&TextureViewDescriptor>, + ) -> Self::TextureViewId { + match desc { + Some(d) => { + let mut mapped_desc = web_sys::GpuTextureViewDescriptor::new(); + mapped_desc.array_layer_count(d.array_layer_count); + mapped_desc.aspect(map_texture_aspect(d.aspect)); + mapped_desc.base_array_layer(d.base_array_layer); + mapped_desc.base_mip_level(d.base_mip_level); + mapped_desc.dimension(map_texture_view_dimension(d.dimension)); + mapped_desc.format(map_texture_format(d.format)); + mapped_desc.mip_level_count(d.level_count); + // TODO: label + texture.create_view_with_descriptor(&mapped_desc) + } + None => texture.create_view(), + } + } - mapped_desc.depth_stencil_attachment(&mapped_depth_stencil_attachment); + fn texture_drop(&self, _texture: &Self::TextureId) { + // Buffer is dropped automatically + } + fn texture_view_drop(&self, _texture_view: &Self::TextureViewId) { + // Buffer is dropped automatically + } + fn sampler_drop(&self, _sampler: &Self::SamplerId) { + // Buffer is dropped automatically + } + fn buffer_drop(&self, _buffer: &Self::BufferId) { + // Buffer is dropped automatically + } + fn bind_group_drop(&self, _bind_group: &Self::BindGroupId) { + // Buffer is dropped automatically + } + fn bind_group_layout_drop(&self, _bind_group_layout: &Self::BindGroupLayoutId) { + // Buffer is dropped automatically + } + fn pipeline_layout_drop(&self, _pipeline_layout: &Self::PipelineLayoutId) { + // Buffer is dropped automatically + } + fn shader_module_drop(&self, _shader_module: &Self::ShaderModuleId) { + // Buffer is dropped automatically + } + fn command_buffer_drop(&self, _command_buffer: &Self::CommandBufferId) { + // Buffer is dropped automatically + } + fn compute_pipeline_drop(&self, _pipeline: &Self::ComputePipelineId) { + // Buffer is dropped automatically + } + fn render_pipeline_drop(&self, _pipeline: &Self::RenderPipelineId) { + // Buffer is dropped automatically } - command_encoder.begin_render_pass(&mapped_desc) -} + fn flush_mapped_data(data: &mut [u8], detail: CreateBufferMappedDetail) { + unsafe { + // Convert the `mapped_data` slice back into a `Vec`. This should be + // safe because `mapped_data` is no longer accessible beyond this + // function. + let memory: Vec = Box::<[u8]>::from_raw(data).into(); -pub(crate) fn render_pass_set_pipeline( - render_pass: &RenderPassEncoderId, - pipeline: &RenderPipelineId, -) { - render_pass.set_pipeline(&pipeline); -} + // Create a view into the mapped `ArrayBuffer` that was provided by the + // browser + let mapped = js_sys::Uint8Array::new(&detail.array_buffer); -pub(crate) fn render_pass_set_blend_color(render_pass: &RenderPassEncoderId, color: wgt::Color) { - render_pass.set_blend_color_with_gpu_color_dict(&map_color(color)); -} + // Convert `memory` into a temporary `Uint8Array` view. This should be + // safe as long as the backing wasm memory is not resized. + let memory_view = js_sys::Uint8Array::view(&memory[..]); -pub(crate) fn render_pass_set_bind_group( - render_pass: &RenderPassEncoderId, - index: u32, - bind_group: &BindGroupId, - offsets: &[wgt::DynamicOffset], -) { - render_pass.set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( - index, - bind_group, - offsets, - 0f64, - offsets.len() as u32, - ); -} + // Finally copy into `mapped` and let `memory` drop + mapped.set(&memory_view, 0); + } + } -pub(crate) fn render_pass_set_index_buffer<'a>( - render_pass: &RenderPassEncoderId, - buffer: &'a crate::Buffer, - offset: wgt::BufferAddress, - size: wgt::BufferAddress, -) { - render_pass.set_index_buffer_with_f64_and_f64(&buffer.id, offset as f64, size as f64); -} + fn encoder_copy_buffer_to_buffer( + &self, + encoder: &Self::CommandEncoderId, + source: &Self::BufferId, + source_offset: wgt::BufferAddress, + destination: &Self::BufferId, + destination_offset: wgt::BufferAddress, + copy_size: wgt::BufferAddress, + ) { + encoder.copy_buffer_to_buffer_with_f64_and_f64_and_f64( + source, + source_offset as f64, + destination, + destination_offset as f64, + copy_size as f64, + ) + } -pub(crate) fn render_pass_set_vertex_buffer<'a>( - render_pass: &RenderPassEncoderId, - slot: u32, - buffer: &'a crate::Buffer, - offset: wgt::BufferAddress, - size: wgt::BufferAddress, -) { - render_pass.set_vertex_buffer_with_f64_and_f64(slot, &buffer.id, offset as f64, size as f64); -} + fn encoder_copy_buffer_to_texture( + &self, + encoder: &Self::CommandEncoderId, + source: crate::BufferCopyView, + destination: crate::TextureCopyView, + copy_size: wgt::Extent3d, + ) { + encoder.copy_buffer_to_texture_with_gpu_extent_3d_dict( + &map_buffer_copy_view(source), + &map_texture_copy_view(destination), + &map_extent_3d(copy_size), + ) + } -pub(crate) fn render_pass_set_scissor_rect( - render_pass: &RenderPassEncoderId, - x: u32, - y: u32, - width: u32, - height: u32, -) { - render_pass.set_scissor_rect(x, y, width, height); -} + fn encoder_copy_texture_to_buffer( + &self, + encoder: &Self::CommandEncoderId, + source: crate::TextureCopyView, + destination: crate::BufferCopyView, + copy_size: wgt::Extent3d, + ) { + encoder.copy_texture_to_buffer_with_gpu_extent_3d_dict( + &map_texture_copy_view(source), + &map_buffer_copy_view(destination), + &map_extent_3d(copy_size), + ) + } -pub(crate) fn render_pass_set_viewport( - render_pass: &RenderPassEncoderId, - x: f32, - y: f32, - width: f32, - height: f32, - min_depth: f32, - max_depth: f32, -) { - render_pass.set_viewport(x, y, width, height, min_depth, max_depth); -} + fn encoder_copy_texture_to_texture( + &self, + encoder: &Self::CommandEncoderId, + source: crate::TextureCopyView, + destination: crate::TextureCopyView, + copy_size: wgt::Extent3d, + ) { + encoder.copy_texture_to_texture_with_gpu_extent_3d_dict( + &map_texture_copy_view(source), + &map_texture_copy_view(destination), + &map_extent_3d(copy_size), + ) + } -pub(crate) fn render_pass_set_stencil_reference(render_pass: &RenderPassEncoderId, reference: u32) { - render_pass.set_stencil_reference(reference); -} + fn encoder_begin_compute_pass(&self, encoder: &Self::CommandEncoderId) -> Self::ComputePassId { + let mut mapped_desc = web_sys::GpuComputePassDescriptor::new(); + if let Some(ref label) = encoder.label() { + mapped_desc.label(label); + } + ComputePass(encoder.begin_compute_pass_with_descriptor(&mapped_desc)) + } -pub(crate) fn render_pass_draw( - render_pass: &RenderPassEncoderId, - vertices: Range, - instances: Range, -) { - render_pass.draw_with_instance_count_and_first_vertex_and_first_instance( - vertices.end - vertices.start, - instances.end - instances.start, - vertices.start, - instances.start, - ) -} + fn encoder_end_compute_pass( + &self, + _encoder: &Self::CommandEncoderId, + pass: &mut Self::ComputePassId, + ) { + pass.0.end_pass(); + } -pub(crate) fn render_pass_draw_indexed( - render_pass: &RenderPassEncoderId, - indices: Range, - base_vertex: i32, - instances: Range, -) { - render_pass - .draw_indexed_with_instance_count_and_first_index_and_base_vertex_and_first_instance( - indices.end - indices.start, - instances.end - instances.start, - indices.start, - base_vertex, - instances.start, - ); -} + fn encoder_begin_render_pass<'a>( + &self, + encoder: &Self::CommandEncoderId, + desc: &crate::RenderPassDescriptor<'a, '_>, + ) -> Self::RenderPassId { + let mapped_color_attachments = desc + .color_attachments + .iter() + .map(|ca| { + let mut mapped_color_attachment = + web_sys::GpuRenderPassColorAttachmentDescriptor::new( + &ca.attachment.id, + &match ca.load_op { + wgt::LoadOp::Clear => { + wasm_bindgen::JsValue::from(map_color(ca.clear_color)) + } + wgt::LoadOp::Load => { + wasm_bindgen::JsValue::from(web_sys::GpuLoadOp::Load) + } + }, + ); + + if let Some(rt) = ca.resolve_target { + mapped_color_attachment.resolve_target(&rt.id); + } -pub(crate) fn render_pass_draw_indirect<'a>( - render_pass: &RenderPassEncoderId, - indirect_buffer: &'a crate::Buffer, - indirect_offset: wgt::BufferAddress, -) { - render_pass.draw_indirect_with_f64(&indirect_buffer.id, indirect_offset as f64); -} + mapped_color_attachment.store_op(map_store_op(ca.store_op)); + + mapped_color_attachment + }) + .collect::(); + + let mut mapped_desc = web_sys::GpuRenderPassDescriptor::new(&mapped_color_attachments); + + // TODO: label + + if let Some(dsa) = &desc.depth_stencil_attachment { + let mapped_depth_stencil_attachment = + web_sys::GpuRenderPassDepthStencilAttachmentDescriptor::new( + &dsa.attachment.id, + &match dsa.depth_load_op { + wgt::LoadOp::Clear => wasm_bindgen::JsValue::from(dsa.clear_depth), + wgt::LoadOp::Load => wasm_bindgen::JsValue::from(web_sys::GpuLoadOp::Load), + }, + map_store_op(dsa.depth_store_op), + &match dsa.stencil_load_op { + wgt::LoadOp::Clear => wasm_bindgen::JsValue::from(dsa.clear_stencil), + wgt::LoadOp::Load => wasm_bindgen::JsValue::from(web_sys::GpuLoadOp::Load), + }, + map_store_op(dsa.stencil_store_op), + ); + + mapped_desc.depth_stencil_attachment(&mapped_depth_stencil_attachment); + } -pub(crate) fn render_pass_draw_indexed_indirect<'a>( - render_pass: &RenderPassEncoderId, - indirect_buffer: &'a crate::Buffer, - indirect_offset: wgt::BufferAddress, -) { - render_pass.draw_indexed_indirect_with_f64(&indirect_buffer.id, indirect_offset as f64); -} + RenderPass(encoder.begin_render_pass(&mapped_desc)) + } -pub(crate) fn render_pass_end_pass(render_pass: &RenderPassEncoderId) { - render_pass.end_pass(); -} + fn encoder_end_render_pass( + &self, + _encoder: &Self::CommandEncoderId, + pass: &mut Self::RenderPassId, + ) { + pass.0.end_pass(); + } -pub(crate) fn texture_create_view( - texture: &TextureId, - desc: Option<&TextureViewDescriptor>, -) -> TextureViewId { - match desc { - Some(d) => { - let mut mapped_desc = web_sys::GpuTextureViewDescriptor::new(); - mapped_desc.array_layer_count(d.array_layer_count); - mapped_desc.aspect(map_texture_aspect(d.aspect)); - mapped_desc.base_array_layer(d.base_array_layer); - mapped_desc.base_mip_level(d.base_mip_level); - mapped_desc.dimension(map_texture_view_dimension(d.dimension)); - mapped_desc.format(map_texture_format(d.format)); - mapped_desc.mip_level_count(d.level_count); - // TODO: label - texture.create_view_with_descriptor(&mapped_desc) + fn encoder_finish(&self, encoder: &Self::CommandEncoderId) -> Self::CommandBufferId { + let mut mapped_desc = web_sys::GpuCommandBufferDescriptor::new(); + if let Some(ref label) = encoder.label() { + mapped_desc.label(label); } - None => texture.create_view(), + encoder.finish_with_descriptor(&mapped_desc) + } + + fn queue_submit>( + &self, + queue: &Self::QueueId, + command_buffers: I, + ) { + let temp_command_buffers = command_buffers.collect::(); + + queue.submit(&temp_command_buffers); } } -pub(crate) fn texture_drop(_texture: &TextureId) { - // Texture is dropped automatically +pub(crate) struct CreateBufferMappedDetail { + /// On wasm we need to allocate our own temporary storage for `data`. Later + /// we copy this temporary storage into the `Uint8Array` which was returned + /// by the browser originally. + array_buffer: js_sys::ArrayBuffer, } -pub(crate) fn texture_view_drop(_texture_view: &TextureViewId) { - // Texture view is dropped automatically +pub(crate) struct BufferReadMappingDetail { + pub(crate) buffer_id: web_sys::GpuBuffer, + mapped: Vec, } -pub(crate) fn bind_group_drop(_bind_group: &BindGroupId) { - // Bind group is dropped automatically +impl BufferReadMappingDetail { + pub(crate) fn as_slice(&self) -> &[u8] { + &self.mapped[..] + } } -pub(crate) fn swap_chain_present(_swap_chain_output: &crate::SwapChainOutput) { - // Swapchain is presented automatically +pub(crate) struct BufferWriteMappingDetail { + pub(crate) buffer_id: web_sys::GpuBuffer, + mapped: Vec, } -pub(crate) fn device_poll(_device: &DeviceId, _maintain: crate::Maintain) { - // Device is polled automatically +impl BufferWriteMappingDetail { + pub(crate) fn as_slice(&mut self) -> &mut [u8] { + &mut self.mapped[..] + } } + +pub(crate) type SwapChainOutputDetail = (); diff --git a/src/lib.rs b/src/lib.rs index 6a2f09d32..af6a7cecb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,7 +5,8 @@ mod backend; #[macro_use] mod macros; -use std::{future::Future, ops::Range, thread}; +use futures::FutureExt as _; +use std::{future::Future, marker::PhantomData, ops::Range, sync::Arc, thread}; #[cfg(not(target_arch = "wasm32"))] pub use wgc::instance::{AdapterInfo, DeviceType}; @@ -21,24 +22,290 @@ pub use wgt::{ VertexAttributeDescriptor, VertexFormat, BIND_BUFFER_ALIGNMENT, MAX_BIND_GROUPS, }; -//TODO: avoid heap allocating vectors during resource creation. -#[derive(Default, Debug)] -struct Temp { - //bind_group_descriptors: Vec, -//vertex_buffers: Vec, +use backend::Context as C; + +trait ComputePassInner { + fn set_pipeline(&mut self, pipeline: &Ctx::ComputePipelineId); + fn set_bind_group( + &mut self, + index: u32, + bind_group: &Ctx::BindGroupId, + offsets: &[DynamicOffset], + ); + fn dispatch(&mut self, x: u32, y: u32, z: u32); + fn dispatch_indirect( + &mut self, + indirect_buffer: &Ctx::BufferId, + indirect_offset: BufferAddress, + ); +} + +trait RenderPassInner { + fn set_pipeline(&mut self, pipeline: &Ctx::RenderPipelineId); + fn set_bind_group( + &mut self, + index: u32, + bind_group: &Ctx::BindGroupId, + offsets: &[DynamicOffset], + ); + fn set_index_buffer( + &mut self, + buffer: &Ctx::BufferId, + offset: BufferAddress, + size: BufferAddress, + ); + fn set_vertex_buffer( + &mut self, + slot: u32, + buffer: &Ctx::BufferId, + offset: BufferAddress, + size: BufferAddress, + ); + fn set_blend_color(&mut self, color: wgt::Color); + fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32); + fn set_viewport( + &mut self, + x: f32, + y: f32, + width: f32, + height: f32, + min_depth: f32, + max_depth: f32, + ); + fn set_stencil_reference(&mut self, reference: u32); + fn draw(&mut self, vertices: Range, instances: Range); + fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range); + fn draw_indirect(&mut self, indirect_buffer: &Ctx::BufferId, indirect_offset: BufferAddress); + fn draw_indexed_indirect( + &mut self, + indirect_buffer: &Ctx::BufferId, + indirect_offset: BufferAddress, + ); +} + +trait Context: Sized { + type AdapterId; + type DeviceId; + type QueueId; + type ShaderModuleId; + type BindGroupLayoutId; + type BindGroupId; + type TextureViewId; + type SamplerId; + type BufferId; + type TextureId; + type PipelineLayoutId; + type RenderPipelineId; + type ComputePipelineId; + type CommandEncoderId; + type ComputePassId: ComputePassInner; + type CommandBufferId; + type SurfaceId; + type SwapChainId; + type RenderPassId: RenderPassInner; + + type CreateBufferMappedDetail; + type BufferReadMappingDetail; + type BufferWriteMappingDetail; + type SwapChainOutputDetail; + + type RequestAdapterFuture: Future>; + type RequestDeviceFuture: Future< + Output = Result<(Self::DeviceId, Self::QueueId), RequestDeviceError>, + >; + type MapReadFuture: Future>; + type MapWriteFuture: Future>; + + fn init() -> Self; + fn instance_create_surface( + &self, + window: &W, + ) -> Self::SurfaceId; + fn instance_request_adapter( + &self, + options: &RequestAdapterOptions<'_>, + backends: wgt::BackendBit, + ) -> Self::RequestAdapterFuture; + fn adapter_request_device( + &self, + adapter: &Self::AdapterId, + desc: &DeviceDescriptor, + ) -> Self::RequestDeviceFuture; + + fn device_create_swap_chain( + &self, + device: &Self::DeviceId, + surface: &Self::SurfaceId, + desc: &SwapChainDescriptor, + ) -> Self::SwapChainId; + fn device_create_shader_module( + &self, + device: &Self::DeviceId, + spv: &[u32], + ) -> Self::ShaderModuleId; + fn device_create_bind_group_layout( + &self, + device: &Self::DeviceId, + desc: &BindGroupLayoutDescriptor, + ) -> Self::BindGroupLayoutId; + fn device_create_bind_group( + &self, + device: &Self::DeviceId, + desc: &BindGroupDescriptor, + ) -> Self::BindGroupId; + fn device_create_pipeline_layout( + &self, + device: &Self::DeviceId, + desc: &PipelineLayoutDescriptor, + ) -> Self::PipelineLayoutId; + fn device_create_render_pipeline( + &self, + device: &Self::DeviceId, + desc: &RenderPipelineDescriptor, + ) -> Self::RenderPipelineId; + fn device_create_compute_pipeline( + &self, + device: &Self::DeviceId, + desc: &ComputePipelineDescriptor, + ) -> Self::ComputePipelineId; + fn device_create_buffer_mapped<'a>( + &self, + device: &Self::DeviceId, + desc: &BufferDescriptor, + ) -> (Self::BufferId, &'a mut [u8], Self::CreateBufferMappedDetail); + fn device_create_buffer( + &self, + device: &Self::DeviceId, + desc: &BufferDescriptor, + ) -> Self::BufferId; + fn device_create_texture( + &self, + device: &Self::DeviceId, + desc: &TextureDescriptor, + ) -> Self::TextureId; + fn device_create_sampler( + &self, + device: &Self::DeviceId, + desc: &SamplerDescriptor, + ) -> Self::SamplerId; + fn device_create_command_encoder( + &self, + device: &Self::DeviceId, + desc: &CommandEncoderDescriptor, + ) -> Self::CommandEncoderId; + fn device_drop(&self, device: &Self::DeviceId); + fn device_poll(&self, device: &Self::DeviceId, maintain: Maintain); + + fn buffer_map_read( + &self, + buffer: &Self::BufferId, + start: BufferAddress, + size: BufferAddress, + ) -> Self::MapReadFuture; + fn buffer_map_write( + &self, + buffer: &Self::BufferId, + start: BufferAddress, + size: BufferAddress, + ) -> Self::MapWriteFuture; + fn buffer_unmap(&self, buffer: &Self::BufferId); + fn swap_chain_get_next_texture( + &self, + swap_chain: &Self::SwapChainId, + ) -> Result<(Self::TextureViewId, Self::SwapChainOutputDetail), TimeOut>; + fn swap_chain_present(&self, view: &Self::TextureViewId, detail: &Self::SwapChainOutputDetail); + fn texture_create_view( + &self, + texture: &Self::TextureId, + desc: Option<&TextureViewDescriptor>, + ) -> Self::TextureViewId; + fn texture_drop(&self, texture: &Self::TextureId); + fn texture_view_drop(&self, texture_view: &Self::TextureViewId); + fn sampler_drop(&self, sampler: &Self::SamplerId); + fn buffer_drop(&self, buffer: &Self::BufferId); + fn bind_group_drop(&self, bind_group: &Self::BindGroupId); + fn bind_group_layout_drop(&self, bind_group_layout: &Self::BindGroupLayoutId); + fn pipeline_layout_drop(&self, pipeline_layout: &Self::PipelineLayoutId); + fn shader_module_drop(&self, shader_module: &Self::ShaderModuleId); + fn command_buffer_drop(&self, command_buffer: &Self::CommandBufferId); + fn compute_pipeline_drop(&self, pipeline: &Self::ComputePipelineId); + fn render_pipeline_drop(&self, pipeline: &Self::RenderPipelineId); + + fn encoder_copy_buffer_to_buffer( + &self, + encoder: &Self::CommandEncoderId, + source: &Self::BufferId, + source_offset: BufferAddress, + destination: &Self::BufferId, + destination_offset: BufferAddress, + copy_size: BufferAddress, + ); + fn encoder_copy_buffer_to_texture( + &self, + encoder: &Self::CommandEncoderId, + source: BufferCopyView, + destination: TextureCopyView, + copy_size: Extent3d, + ); + fn encoder_copy_texture_to_buffer( + &self, + encoder: &Self::CommandEncoderId, + source: TextureCopyView, + destination: BufferCopyView, + copy_size: Extent3d, + ); + fn encoder_copy_texture_to_texture( + &self, + encoder: &Self::CommandEncoderId, + source: TextureCopyView, + destination: TextureCopyView, + copy_size: Extent3d, + ); + + fn flush_mapped_data(data: &mut [u8], detail: Self::CreateBufferMappedDetail); + fn encoder_begin_compute_pass(&self, encoder: &Self::CommandEncoderId) -> Self::ComputePassId; + fn encoder_end_compute_pass( + &self, + encoder: &Self::CommandEncoderId, + pass: &mut Self::ComputePassId, + ); + fn encoder_begin_render_pass<'a>( + &self, + encoder: &Self::CommandEncoderId, + desc: &RenderPassDescriptor<'a, '_>, + ) -> Self::RenderPassId; + fn encoder_end_render_pass( + &self, + encoder: &Self::CommandEncoderId, + pass: &mut Self::RenderPassId, + ); + fn encoder_finish(&self, encoder: &Self::CommandEncoderId) -> Self::CommandBufferId; + fn queue_submit>( + &self, + queue: &Self::QueueId, + command_buffers: I, + ); +} + +/// An instance sets up the context for all other wgpu objects. +/// +/// An `Adapter` can be used to open a connection to the corresponding device on the host system, +/// yielding a [`Device`] object. +pub struct Instance { + context: Arc, } /// A handle to a physical graphics and/or compute device. /// /// An `Adapter` can be used to open a connection to the corresponding device on the host system, /// yielding a [`Device`] object. -#[derive(Debug, PartialEq)] pub struct Adapter { - id: backend::AdapterId, + context: Arc, + id: ::AdapterId, } /// Options for requesting adapter. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct RequestAdapterOptions<'a> { /// Power preference for the adapter. pub power_preference: PowerPreference, @@ -50,10 +317,9 @@ pub struct RequestAdapterOptions<'a> { /// /// The `Device` is the responsible for the creation of most rendering and compute resources, as /// well as exposing [`Queue`] objects. -#[derive(Debug)] pub struct Device { - id: backend::DeviceId, - temp: Temp, + context: Arc, + id: ::DeviceId, } /// This is passed to `Device::poll` to control whether @@ -65,16 +331,16 @@ pub enum Maintain { } /// A handle to a GPU-accessible buffer. -#[derive(Debug, PartialEq)] pub struct Buffer { - id: backend::BufferId, - detail: backend::BufferDetail, + context: Arc, + id: ::BufferId, + //detail: ::BufferDetail, } /// A handle to a texture on the GPU. -#[derive(Debug, PartialEq)] pub struct Texture { - id: backend::TextureId, + context: Arc, + id: ::TextureId, owned: bool, } @@ -82,9 +348,9 @@ pub struct Texture { /// /// A `TextureView` object describes a texture and associated metadata needed by a /// [`RenderPipeline`] or [`BindGroup`]. -#[derive(Debug, PartialEq)] pub struct TextureView { - id: backend::TextureViewId, + context: Arc, + id: ::TextureViewId, owned: bool, } @@ -93,27 +359,32 @@ pub struct TextureView { /// A `Sampler` object defines how a pipeline will sample from a [`TextureView`]. Samplers define /// image filters (including anisotropy) and address (wrapping) modes, among other things. See /// the documentation for [`SamplerDescriptor`] for more information. -#[derive(Debug, PartialEq)] pub struct Sampler { - id: backend::SamplerId, + context: Arc, + id: ::SamplerId, +} + +impl Drop for Sampler { + fn drop(&mut self) { + self.context.sampler_drop(&self.id); + } } /// A handle to a presentable surface. /// /// A `Surface` represents a platform-specific surface (e.g. a window) to which rendered images may /// be presented. A `Surface` may be created with [`Surface::create`]. -#[derive(Debug, PartialEq)] pub struct Surface { - id: backend::SurfaceId, + id: ::SurfaceId, } /// A handle to a swap chain. /// /// A `SwapChain` represents the image or series of images that will be presented to a [`Surface`]. /// A `SwapChain` may be created with [`Device::create_swap_chain`]. -#[derive(Debug, PartialEq)] pub struct SwapChain { - id: backend::SwapChainId, + context: Arc, + id: ::SwapChainId, } /// An opaque handle to a binding group layout. @@ -122,9 +393,15 @@ pub struct SwapChain { /// create a [`BindGroupDescriptor`] object, which in turn can be used to create a [`BindGroup`] /// object with [`Device::create_bind_group`]. A series of `BindGroupLayout`s can also be used to /// create a [`PipelineLayoutDescriptor`], which can be used to create a [`PipelineLayout`]. -#[derive(Debug, PartialEq)] pub struct BindGroupLayout { - id: backend::BindGroupLayoutId, + context: Arc, + id: ::BindGroupLayoutId, +} + +impl Drop for BindGroupLayout { + fn drop(&mut self) { + self.context.bind_group_layout_drop(&self.id); + } } /// An opaque handle to a binding group. @@ -133,14 +410,14 @@ pub struct BindGroupLayout { /// [`BindGroupLayout`]. It can be created with [`Device::create_bind_group`]. A `BindGroup` can /// be bound to a particular [`RenderPass`] with [`RenderPass::set_bind_group`], or to a /// [`ComputePass`] with [`ComputePass::set_bind_group`]. -#[derive(Debug, PartialEq)] pub struct BindGroup { - id: backend::BindGroupId, + context: Arc, + id: ::BindGroupId, } impl Drop for BindGroup { fn drop(&mut self) { - backend::bind_group_drop(&self.id); + self.context.bind_group_drop(&self.id); } } @@ -149,32 +426,56 @@ impl Drop for BindGroup { /// A `ShaderModule` represents a compiled shader module on the GPU. It can be created by passing /// valid SPIR-V source code to [`Device::create_shader_module`]. Shader modules are used to define /// programmable stages of a pipeline. -#[derive(Debug, PartialEq)] pub struct ShaderModule { - id: backend::ShaderModuleId, + context: Arc, + id: ::ShaderModuleId, +} + +impl Drop for ShaderModule { + fn drop(&mut self) { + self.context.shader_module_drop(&self.id); + } } /// An opaque handle to a pipeline layout. /// /// A `PipelineLayout` object describes the available binding groups of a pipeline. -#[derive(Debug, PartialEq)] pub struct PipelineLayout { - id: backend::PipelineLayoutId, + context: Arc, + id: ::PipelineLayoutId, +} + +impl Drop for PipelineLayout { + fn drop(&mut self) { + self.context.pipeline_layout_drop(&self.id); + } } /// A handle to a rendering (graphics) pipeline. /// /// A `RenderPipeline` object represents a graphics pipeline and its stages, bindings, vertex /// buffers and targets. A `RenderPipeline` may be created with [`Device::create_render_pipeline`]. -#[derive(Debug, PartialEq)] pub struct RenderPipeline { - id: backend::RenderPipelineId, + context: Arc, + id: ::RenderPipelineId, +} + +impl Drop for RenderPipeline { + fn drop(&mut self) { + self.context.render_pipeline_drop(&self.id); + } } /// A handle to a compute pipeline. -#[derive(Debug, PartialEq)] pub struct ComputePipeline { - id: backend::ComputePipelineId, + context: Arc, + id: ::ComputePipelineId, +} + +impl Drop for ComputePipeline { + fn drop(&mut self) { + self.context.compute_pipeline_drop(&self.id); + } } /// An opaque handle to a command buffer on the GPU. @@ -182,9 +483,17 @@ pub struct ComputePipeline { /// A `CommandBuffer` represents a complete sequence of commands that may be submitted to a command /// queue with [`Queue::submit`]. A `CommandBuffer` is obtained by recording a series of commands to /// a [`CommandEncoder`] and then calling [`CommandEncoder::finish`]. -#[derive(Debug, PartialEq)] pub struct CommandBuffer { - id: backend::CommandBufferId, + context: Arc, + id: Option<::CommandBufferId>, +} + +impl Drop for CommandBuffer { + fn drop(&mut self) { + if let Some(ref id) = self.id { + self.context.command_buffer_drop(id); + } + } } /// An object that encodes GPU operations. @@ -194,38 +503,35 @@ pub struct CommandBuffer { /// /// When finished recording, call [`CommandEncoder::finish`] to obtain a [`CommandBuffer`] which may /// be submitted for execution. -#[derive(Debug)] pub struct CommandEncoder { - id: backend::CommandEncoderId, + context: Arc, + id: ::CommandEncoderId, /// This type should be !Send !Sync, because it represents an allocation on this thread's /// command buffer. - _p: std::marker::PhantomData<*const u8>, + _p: PhantomData<*const u8>, } /// An in-progress recording of a render pass. -#[derive(Debug)] pub struct RenderPass<'a> { - id: backend::RenderPassEncoderId, - _parent: &'a mut CommandEncoder, + id: ::RenderPassId, + parent: &'a mut CommandEncoder, } /// An in-progress recording of a compute pass. -#[derive(Debug)] pub struct ComputePass<'a> { - id: backend::ComputePassId, - _parent: &'a mut CommandEncoder, + id: ::ComputePassId, + parent: &'a mut CommandEncoder, } /// A handle to a command queue on a device. /// /// A `Queue` executes recorded [`CommandBuffer`] objects. -#[derive(Debug, PartialEq)] pub struct Queue { - id: backend::QueueId, + context: Arc, + id: ::QueueId, } /// A resource that can be bound to a pipeline. -#[derive(Clone, Debug)] pub enum BindingResource<'a> { Buffer { buffer: &'a Buffer, @@ -236,7 +542,6 @@ pub enum BindingResource<'a> { } /// A bindable resource and the slot to bind it to. -#[derive(Clone, Debug)] pub struct Binding<'a> { pub binding: u32, pub resource: BindingResource<'a>, @@ -249,7 +554,7 @@ pub enum BindingType { /// A buffer for uniform values. /// /// Example GLSL syntax: - /// ``` + /// ```cpp,ignore /// layout(std140, binding = 0) /// uniform Globals { /// vec2 aUniform; @@ -264,7 +569,7 @@ pub enum BindingType { /// A storage buffer. /// /// Example GLSL syntax: - /// ``` + /// ```cpp,ignore /// layout (set=0, binding=0) buffer myStorageBuffer { /// vec4 myElement[]; /// }; @@ -276,7 +581,7 @@ pub enum BindingType { /// The buffer can only be read in the shader and it must be annotated with `readonly`. /// /// Example GLSL syntax: - /// ``` + /// ```cpp,ignore /// layout (set=0, binding=0) readonly buffer myStorageBuffer { /// vec4 myElement[]; /// }; @@ -286,7 +591,7 @@ pub enum BindingType { /// A sampler that can be used to sample a texture. /// /// Example GLSL syntax: - /// ``` + /// ```cpp,ignore /// layout(binding = 0) /// uniform sampler s; /// ``` @@ -298,7 +603,7 @@ pub enum BindingType { /// A texture. /// /// Example GLSL syntax: - /// ``` + /// ```cpp,ignore /// layout(binding = 0) /// uniform texture2D t; /// ``` @@ -313,7 +618,7 @@ pub enum BindingType { }, /// A storage texture. /// Example GLSL syntax: - /// ``` + /// ```cpp,ignore /// layout(set=0, binding=0, r32f) uniform image2D myStorageImage; /// ``` /// Note that the texture format must be specified in the shader as well. @@ -329,7 +634,7 @@ pub enum BindingType { /// The texture can only be read in the shader and it must be annotated with `readonly`. /// /// Example GLSL syntax: - /// ``` + /// ```cpp,ignore /// layout(set=0, binding=0, r32f) readonly uniform image2D myStorageImage; /// ``` readonly: bool, @@ -355,7 +660,7 @@ pub struct BindGroupLayoutDescriptor<'a> { } /// A description of a group of bindings and the resources to be bound. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct BindGroupDescriptor<'a> { /// The layout for this bind group. pub layout: &'a BindGroupLayout, @@ -372,13 +677,13 @@ pub struct BindGroupDescriptor<'a> { /// /// A `PipelineLayoutDescriptor` can be passed to [`Device::create_pipeline_layout`] to obtain a /// [`PipelineLayout`]. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct PipelineLayoutDescriptor<'a> { pub bind_group_layouts: &'a [&'a BindGroupLayout], } /// A description of a programmable pipeline stage. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct ProgrammableStageDescriptor<'a> { /// The compiled shader module for this stage. pub module: &'a ShaderModule, @@ -410,7 +715,7 @@ pub struct VertexBufferDescriptor<'a> { } /// A complete description of a render (graphics) pipeline. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct RenderPipelineDescriptor<'a> { /// The layout of bind groups for this pipeline. pub layout: &'a PipelineLayout, @@ -451,7 +756,7 @@ pub struct RenderPipelineDescriptor<'a> { } /// A complete description of a compute pipeline. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct ComputePipelineDescriptor<'a> { /// The layout of bind groups for this pipeline. pub layout: &'a PipelineLayout, @@ -466,7 +771,6 @@ pub type RenderPassDepthStencilAttachmentDescriptor<'a> = wgt::RenderPassDepthStencilAttachmentDescriptorBase<&'a TextureView>; /// A description of all the attachments of a render pass. -#[derive(Debug)] pub struct RenderPassDescriptor<'a, 'b> { /// The color attachments of the render pass. pub color_attachments: &'b [RenderPassColorAttachmentDescriptor<'a>], @@ -524,14 +828,13 @@ pub struct TextureDescriptor<'a> { } /// A swap chain image that can be rendered to. -#[derive(Debug)] pub struct SwapChainOutput { pub view: TextureView, - detail: backend::SwapChainOutputDetail, + detail: ::SwapChainOutputDetail, } /// A view of a buffer which can be used to copy to or from a texture. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct BufferCopyView<'a> { /// The buffer to be copied to or from. pub buffer: &'a Buffer, @@ -550,7 +853,7 @@ pub struct BufferCopyView<'a> { } /// A view of a texture which can be used to copy to or from a buffer or another texture. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct TextureCopyView<'a> { /// The texture to be copied to or from. pub texture: &'a Texture, @@ -567,12 +870,13 @@ pub struct TextureCopyView<'a> { /// A buffer being created, mapped in host memory. pub struct CreateBufferMapped<'a> { - id: backend::BufferId, + context: Arc, + id: ::BufferId, /// The backing field for `data()`. This isn't `pub` because users shouldn't /// be able to replace it to point somewhere else. We rely on it pointing to /// to the correct memory later during `unmap()`. mapped_data: &'a mut [u8], - detail: backend::CreateBufferMappedDetail, + detail: ::CreateBufferMappedDetail, } impl CreateBufferMapped<'_> { @@ -583,34 +887,73 @@ impl CreateBufferMapped<'_> { /// Unmaps the buffer from host memory and returns a [`Buffer`]. pub fn finish(self) -> Buffer { - backend::device_create_buffer_mapped_finish(self) + ::flush_mapped_data(self.mapped_data, self.detail); + Context::buffer_unmap(&*self.context, &self.id); + Buffer { + context: self.context, + id: self.id, + //detail: self.detail, + } } } -impl Surface { - /// Creates a surface from a raw window handle. - pub fn create(window: &W) -> Self { - Surface { - id: backend::device_create_surface(window), +impl Instance { + /// Create an new instance. + pub fn new() -> Self { + Instance { + context: Arc::new(C::init()), } } - #[cfg(any(target_os = "ios", target_os = "macos"))] - pub fn create_surface_from_core_animation_layer(layer: *mut std::ffi::c_void) -> Self { + /// Retrieves all available [`Adapter`]s that match the given backends. + #[cfg(not(target_arch = "wasm32"))] + pub fn enumerate_adapters(&self, backends: wgt::BackendBit) -> impl Iterator { + let context = Arc::clone(&self.context); + self.context + .enumerate_adapters(wgc::instance::AdapterInputs::Mask(backends, || PhantomData)) + .into_iter() + .map(move |id| crate::Adapter { + id, + context: Arc::clone(&context), + }) + } + + /// Creates a surface from a raw window handle. + pub unsafe fn create_surface( + &self, + window: &W, + ) -> Surface { Surface { - id: wgn::wgpu_create_surface_from_metal_layer(layer), + id: self.context.instance_create_surface(window), } } -} -impl Adapter { - /// Retrieves all available [`Adapter`]s that match the given backends. - #[cfg(not(target_arch = "wasm32"))] - pub fn enumerate(backends: BackendBit) -> Vec { - wgn::wgpu_enumerate_adapters(backends) - .into_iter() - .map(|id| Adapter { id }) - .collect() + #[cfg(any(target_os = "ios", target_os = "macos"))] + pub unsafe fn create_surface_from_core_animation_layer( + &self, + layer: *mut std::ffi::c_void, + ) -> Surface { + let surface = wgc::instance::Surface { + #[cfg(feature = "vulkan-portability")] + vulkan: self + .context + .instance + .vulkan + .create_surface_from_layer(layer as *mut _, cfg!(debug_assertions)), + metal: self + .context + .instance + .metal + .create_surface_from_layer(layer as *mut _, cfg!(debug_assertions)), + }; + + crate::Surface { + id: self.context.surfaces.register_identity( + PhantomData, + surface, + &mut wgc::hub::Token::root(), + ), + } } /// Retrieves an [`Adapter`] which matches the given options. @@ -618,91 +961,122 @@ impl Adapter { /// Some options are "soft", so treated as non-mandatory. Others are "hard". /// /// If no adapters are found that suffice all the "hard" options, `None` is returned. - pub async fn request( + pub fn request_adapter( + &self, options: &RequestAdapterOptions<'_>, backends: BackendBit, - ) -> Option { - backend::request_adapter(options, backends) - .await - .map(|id| Adapter { id }) + ) -> impl Future> { + let context = Arc::clone(&self.context); + self.context + .instance_request_adapter(options, backends) + .map(|option| option.map(|id| Adapter { context, id })) } +} +impl Adapter { /// Requests a connection to a physical device, creating a logical device. /// Returns the device together with a queue that executes command buffers. /// /// # Panics /// /// Panics if the extensions specified by `desc` are not supported by this adapter. - pub async fn request_device(&self, desc: &DeviceDescriptor) -> (Device, Queue) { - let (device_id, queue_id) = backend::request_device_and_queue(&self.id, Some(desc)).await; - let device = Device { - id: device_id, - temp: Temp::default(), - }; - let queue = Queue { id: queue_id }; - (device, queue) + pub fn request_device( + &self, + desc: &DeviceDescriptor, + ) -> impl Future> { + let context = Arc::clone(&self.context); + Context::adapter_request_device(&*self.context, &self.id, desc).map(|result| { + result.map(|(device_id, queue_id)| { + ( + Device { + context: Arc::clone(&context), + id: device_id, + }, + Queue { + context, + id: queue_id, + }, + ) + }) + }) } #[cfg(not(target_arch = "wasm32"))] pub fn get_info(&self) -> AdapterInfo { - wgn::adapter_get_info(self.id) + //wgn::adapter_get_info(self.id) + unimplemented!() } } impl Device { /// Check for resource cleanups and mapping callbacks. pub fn poll(&self, maintain: Maintain) { - backend::device_poll(&self.id, maintain); + Context::device_poll(&*self.context, &self.id, maintain); } /// Creates a shader module from SPIR-V source code. pub fn create_shader_module(&self, spv: &[u32]) -> ShaderModule { ShaderModule { - id: backend::create_shader_module(&self.id, spv), + context: Arc::clone(&self.context), + id: Context::device_create_shader_module(&*self.context, &self.id, spv), } } /// Creates an empty [`CommandEncoder`]. pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor) -> CommandEncoder { CommandEncoder { - id: backend::create_command_encoder(&self.id, desc), + context: Arc::clone(&self.context), + id: Context::device_create_command_encoder(&*self.context, &self.id, desc), _p: Default::default(), } } /// Creates a new bind group. pub fn create_bind_group(&self, desc: &BindGroupDescriptor) -> BindGroup { - let id = backend::create_bind_group(&self.id, desc); - BindGroup { id } + BindGroup { + context: Arc::clone(&self.context), + id: Context::device_create_bind_group(&*self.context, &self.id, desc), + } } /// Creates a bind group layout. pub fn create_bind_group_layout(&self, desc: &BindGroupLayoutDescriptor) -> BindGroupLayout { - let id = backend::create_bind_group_layout(&self.id, desc); - BindGroupLayout { id } + BindGroupLayout { + context: Arc::clone(&self.context), + id: Context::device_create_bind_group_layout(&*self.context, &self.id, desc), + } } /// Creates a pipeline layout. pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor) -> PipelineLayout { - let id = backend::create_pipeline_layout(&self.id, desc); - PipelineLayout { id } + PipelineLayout { + context: Arc::clone(&self.context), + id: Context::device_create_pipeline_layout(&*self.context, &self.id, desc), + } } /// Creates a render pipeline. pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor) -> RenderPipeline { - let id = backend::create_render_pipeline(&self.id, desc); - RenderPipeline { id } + RenderPipeline { + context: Arc::clone(&self.context), + id: Context::device_create_render_pipeline(&*self.context, &self.id, desc), + } } /// Creates a compute pipeline. pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor) -> ComputePipeline { - let id = backend::create_compute_pipeline(&self.id, desc); - ComputePipeline { id } + ComputePipeline { + context: Arc::clone(&self.context), + id: Context::device_create_compute_pipeline(&*self.context, &self.id, desc), + } } /// Creates a new buffer. pub fn create_buffer(&self, desc: &BufferDescriptor) -> Buffer { - backend::device_create_buffer(&self.id, desc) + Buffer { + context: Arc::clone(&self.context), + id: Context::device_create_buffer(&*self.context, &self.id, desc), + } } /// Creates a new buffer and maps it into host-visible memory. @@ -711,7 +1085,14 @@ impl Device { /// will not be created until calling [`CreateBufferMapped::finish`]. pub fn create_buffer_mapped(&self, desc: &BufferDescriptor) -> CreateBufferMapped<'_> { assert_ne!(desc.size, 0); - backend::device_create_buffer_mapped(&self.id, desc) + let (id, mapped_data, detail) = + Context::device_create_buffer_mapped(&*self.context, &self.id, desc); + CreateBufferMapped { + context: Arc::clone(&self.context), + id, + mapped_data, + detail, + } } /// Creates a new buffer, maps it into host-visible memory, copies data from the given slice, @@ -731,7 +1112,8 @@ impl Device { /// `desc` specifies the general format of the texture. pub fn create_texture(&self, desc: &TextureDescriptor) -> Texture { Texture { - id: backend::device_create_texture(&self.id, desc), + context: Arc::clone(&self.context), + id: Context::device_create_texture(&*self.context, &self.id, desc), owned: true, } } @@ -741,30 +1123,35 @@ impl Device { /// `desc` specifies the behavior of the sampler. pub fn create_sampler(&self, desc: &SamplerDescriptor) -> Sampler { Sampler { - id: backend::device_create_sampler(&self.id, desc), + context: Arc::clone(&self.context), + id: Context::device_create_sampler(&*self.context, &self.id, desc), } } /// Create a new [`SwapChain`] which targets `surface`. pub fn create_swap_chain(&self, surface: &Surface, desc: &SwapChainDescriptor) -> SwapChain { SwapChain { - id: backend::device_create_swap_chain(&self.id, &surface.id, desc), + context: Arc::clone(&self.context), + id: Context::device_create_swap_chain(&*self.context, &self.id, &surface.id, desc), } } } -// TODO impl Drop for Device { fn drop(&mut self) { - backend::device_drop(&self.id); + self.context.device_drop(&self.id); } } #[derive(Clone, PartialEq, Eq, Debug)] -pub struct BufferAsyncErr; +pub struct RequestDeviceError; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct BufferAsyncError; pub struct BufferReadMapping { - detail: backend::BufferReadMappingDetail, + context: Arc, + detail: ::BufferReadMappingDetail, } unsafe impl Send for BufferReadMapping {} @@ -778,12 +1165,13 @@ impl BufferReadMapping { impl Drop for BufferReadMapping { fn drop(&mut self) { - backend::buffer_unmap(&self.detail.buffer_id); + Context::buffer_unmap(&*self.context, &self.detail.buffer_id); } } pub struct BufferWriteMapping { - detail: backend::BufferWriteMappingDetail, + context: Arc, + detail: ::BufferWriteMappingDetail, } unsafe impl Send for BufferWriteMapping {} @@ -797,7 +1185,7 @@ impl BufferWriteMapping { impl Drop for BufferWriteMapping { fn drop(&mut self) { - backend::buffer_unmap(&self.detail.buffer_id); + Context::buffer_unmap(&*self.context, &self.detail.buffer_id); } } @@ -814,8 +1202,11 @@ impl Buffer { &self, start: BufferAddress, size: BufferAddress, - ) -> impl Future> + '_ { - backend::buffer_map_read(self, start, size) + ) -> impl Future> { + let context = Arc::clone(&self.context); + self.context + .buffer_map_read(&self.id, start, size) + .map(|result| result.map(|detail| BufferReadMapping { context, detail })) } /// Map the buffer for writing. The result is returned in a future. @@ -826,19 +1217,22 @@ impl Buffer { &self, start: BufferAddress, size: BufferAddress, - ) -> impl Future> + '_ { - backend::buffer_map_write(self, start, size) + ) -> impl Future> { + let context = Arc::clone(&self.context); + self.context + .buffer_map_write(&self.id, start, size) + .map(|result| result.map(|detail| BufferWriteMapping { context, detail })) } /// Flushes any pending write operations and unmaps the buffer from host memory. pub fn unmap(&self) { - backend::buffer_unmap(&self.id); + Context::buffer_unmap(&*self.context, &self.id); } } impl Drop for Buffer { fn drop(&mut self) { - backend::buffer_drop(&self.id); + self.context.buffer_drop(&self.id); } } @@ -846,7 +1240,8 @@ impl Texture { /// Creates a view of this texture. pub fn create_view(&self, desc: &TextureViewDescriptor) -> TextureView { TextureView { - id: backend::texture_create_view(&self.id, Some(desc)), + context: Arc::clone(&self.context), + id: Context::texture_create_view(&*self.context, &self.id, Some(desc)), owned: true, } } @@ -854,7 +1249,8 @@ impl Texture { /// Creates a default view of this whole texture. pub fn create_default_view(&self) -> TextureView { TextureView { - id: backend::texture_create_view(&self.id, None), + context: Arc::clone(&self.context), + id: Context::texture_create_view(&*self.context, &self.id, None), owned: true, } } @@ -863,7 +1259,7 @@ impl Texture { impl Drop for Texture { fn drop(&mut self) { if self.owned { - backend::texture_drop(&self.id); + self.context.texture_drop(&self.id); } } } @@ -871,7 +1267,7 @@ impl Drop for Texture { impl Drop for TextureView { fn drop(&mut self) { if self.owned { - backend::texture_view_drop(&self.id); + self.context.texture_view_drop(&self.id); } } } @@ -880,7 +1276,8 @@ impl CommandEncoder { /// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution. pub fn finish(self) -> CommandBuffer { CommandBuffer { - id: backend::command_encoder_finish(&self.id), + context: Arc::clone(&self.context), + id: Some(Context::encoder_finish(&*self.context, &self.id)), } } @@ -892,8 +1289,8 @@ impl CommandEncoder { desc: &RenderPassDescriptor<'a, '_>, ) -> RenderPass<'a> { RenderPass { - id: backend::command_encoder_begin_render_pass(&self.id, desc), - _parent: self, + id: Context::encoder_begin_render_pass(&*self.context, &self.id, desc), + parent: self, } } @@ -902,8 +1299,8 @@ impl CommandEncoder { /// This function returns a [`ComputePass`] object which records a single compute pass. pub fn begin_compute_pass(&mut self) -> ComputePass { ComputePass { - id: backend::begin_compute_pass(&self.id), - _parent: self, + id: Context::encoder_begin_compute_pass(&*self.context, &self.id), + parent: self, } } @@ -916,11 +1313,12 @@ impl CommandEncoder { destination_offset: BufferAddress, copy_size: BufferAddress, ) { - backend::command_encoder_copy_buffer_to_buffer( + Context::encoder_copy_buffer_to_buffer( + &*self.context, &self.id, - source, + &source.id, source_offset, - destination, + &destination.id, destination_offset, copy_size, ); @@ -933,7 +1331,13 @@ impl CommandEncoder { destination: TextureCopyView, copy_size: Extent3d, ) { - backend::command_encoder_copy_buffer_to_texture(&self.id, source, destination, copy_size); + Context::encoder_copy_buffer_to_texture( + &*self.context, + &self.id, + source, + destination, + copy_size, + ); } /// Copy data from a texture to a buffer. @@ -943,7 +1347,13 @@ impl CommandEncoder { destination: BufferCopyView, copy_size: Extent3d, ) { - backend::command_encoder_copy_texture_to_buffer(&self.id, source, destination, copy_size); + Context::encoder_copy_texture_to_buffer( + &*self.context, + &self.id, + source, + destination, + copy_size, + ); } /// Copy data from one texture to another. @@ -953,7 +1363,13 @@ impl CommandEncoder { destination: TextureCopyView, copy_size: Extent3d, ) { - backend::command_encoder_copy_texture_to_texture(&self.id, source, destination, copy_size); + Context::encoder_copy_texture_to_texture( + &*self.context, + &self.id, + source, + destination, + copy_size, + ); } } @@ -965,18 +1381,18 @@ impl<'a> RenderPass<'a> { bind_group: &'a BindGroup, offsets: &[DynamicOffset], ) { - backend::render_pass_set_bind_group(&self.id, index, &bind_group.id, offsets) + RenderPassInner::set_bind_group(&mut self.id, index, &bind_group.id, offsets) } /// Sets the active render pipeline. /// /// Subsequent draw calls will exhibit the behavior defined by `pipeline`. pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) { - backend::render_pass_set_pipeline(&self.id, &pipeline.id) + RenderPassInner::set_pipeline(&mut self.id, &pipeline.id) } pub fn set_blend_color(&mut self, color: Color) { - backend::render_pass_set_blend_color(&self.id, color) + self.id.set_blend_color(color) } /// Sets the active index buffer. @@ -991,7 +1407,7 @@ impl<'a> RenderPass<'a> { offset: BufferAddress, size: BufferAddress, ) { - backend::render_pass_set_index_buffer(&self.id, buffer, offset, size) + RenderPassInner::set_index_buffer(&mut self.id, &buffer.id, offset, size) } /// Assign a vertex buffer to a slot. @@ -1015,35 +1431,35 @@ impl<'a> RenderPass<'a> { offset: BufferAddress, size: BufferAddress, ) { - backend::render_pass_set_vertex_buffer(&self.id, slot, buffer, offset, size) + RenderPassInner::set_vertex_buffer(&mut self.id, slot, &buffer.id, offset, size) } /// Sets the scissor region. /// /// Subsequent draw calls will discard any fragments that fall outside this region. pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { - backend::render_pass_set_scissor_rect(&self.id, x, y, width, height); + self.id.set_scissor_rect(x, y, width, height); } /// Sets the viewport region. /// /// Subsequent draw calls will draw any fragments in this region. pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) { - backend::render_pass_set_viewport(&self.id, x, y, w, h, min_depth, max_depth); + self.id.set_viewport(x, y, w, h, min_depth, max_depth); } /// Sets the stencil reference. /// /// Subsequent stencil tests will test against this value. pub fn set_stencil_reference(&mut self, reference: u32) { - backend::render_pass_set_stencil_reference(&self.id, reference); + self.id.set_stencil_reference(reference); } /// Draws primitives from the active vertex buffer(s). /// /// The active vertex buffers can be set with [`RenderPass::set_vertex_buffer`]. pub fn draw(&mut self, vertices: Range, instances: Range) { - backend::render_pass_draw(&self.id, vertices, instances) + RenderPassInner::draw(&mut self.id, vertices, instances) } /// Draws indexed primitives using the active index buffer and the active vertex buffers. @@ -1051,7 +1467,7 @@ impl<'a> RenderPass<'a> { /// The active index buffer can be set with [`RenderPass::set_index_buffer`], while the active /// vertex buffers can be set with [`RenderPass::set_vertex_buffer`]. pub fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { - backend::render_pass_draw_indexed(&self.id, indices, base_vertex, instances); + RenderPassInner::draw_indexed(&mut self.id, indices, base_vertex, instances); } /// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`. @@ -1070,7 +1486,7 @@ impl<'a> RenderPass<'a> { /// } /// ``` pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) { - backend::render_pass_draw_indirect(&self.id, indirect_buffer, indirect_offset); + self.id.draw_indirect(&indirect_buffer.id, indirect_offset); } /// Draws indexed primitives using the active index buffer and the active vertex buffers, @@ -1096,14 +1512,17 @@ impl<'a> RenderPass<'a> { indirect_buffer: &'a Buffer, indirect_offset: BufferAddress, ) { - backend::render_pass_draw_indexed_indirect(&self.id, indirect_buffer, indirect_offset); + self.id + .draw_indexed_indirect(&indirect_buffer.id, indirect_offset); } } impl<'a> Drop for RenderPass<'a> { fn drop(&mut self) { if !thread::panicking() { - backend::render_pass_end_pass(&self.id); + self.parent + .context + .encoder_end_render_pass(&self.parent.id, &mut self.id); } } } @@ -1116,19 +1535,19 @@ impl<'a> ComputePass<'a> { bind_group: &'a BindGroup, offsets: &[DynamicOffset], ) { - backend::compute_pass_set_bind_group(&self.id, index, &bind_group.id, offsets); + ComputePassInner::set_bind_group(&mut self.id, index, &bind_group.id, offsets); } /// Sets the active compute pipeline. pub fn set_pipeline(&mut self, pipeline: &'a ComputePipeline) { - backend::compute_pass_set_pipeline(&self.id, &pipeline.id); + ComputePassInner::set_pipeline(&mut self.id, &pipeline.id); } /// Dispatches compute work operations. /// /// `x`, `y` and `z` denote the number of work groups to dispatch in each dimension. pub fn dispatch(&mut self, x: u32, y: u32, z: u32) { - backend::compute_pass_dispatch(&self.id, x, y, z); + ComputePassInner::dispatch(&mut self.id, x, y, z); } /// Dispatches compute work operations, based on the contents of the `indirect_buffer`. @@ -1137,29 +1556,37 @@ impl<'a> ComputePass<'a> { indirect_buffer: &'a Buffer, indirect_offset: BufferAddress, ) { - backend::compute_pass_dispatch_indirect(&self.id, &indirect_buffer.id, indirect_offset); + ComputePassInner::dispatch_indirect(&mut self.id, &indirect_buffer.id, indirect_offset); } } impl<'a> Drop for ComputePass<'a> { fn drop(&mut self) { if !thread::panicking() { - backend::compute_pass_end_pass(&self.id); + self.parent + .context + .encoder_end_compute_pass(&self.parent.id, &mut self.id); } } } impl Queue { /// Submits a series of finished command buffers for execution. - pub fn submit(&self, command_buffers: &[CommandBuffer]) { - backend::queue_submit(&self.id, command_buffers); + pub fn submit>(&self, command_buffers: I) { + Context::queue_submit( + &*self.context, + &self.id, + command_buffers + .into_iter() + .map(|mut comb| comb.id.take().unwrap()), + ); } } impl Drop for SwapChainOutput { fn drop(&mut self) { if !thread::panicking() { - backend::swap_chain_present(&self); + Context::swap_chain_present(&*self.view.context, &self.view.id, &self.detail); } } } @@ -1175,6 +1602,15 @@ impl SwapChain { /// When the [`SwapChainOutput`] returned by this method is dropped, the swapchain will present /// the texture to the associated [`Surface`]. pub fn get_next_texture(&mut self) -> Result { - backend::swap_chain_get_next_texture(&self.id) + Context::swap_chain_get_next_texture(&*self.context, &self.id).map(|(id, detail)| { + SwapChainOutput { + view: TextureView { + context: Arc::clone(&self.context), + id, + owned: false, + }, + detail, + } + }) } }