diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-19 00:47:55 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-19 00:47:55 +0000 |
commit | 26a029d407be480d791972afb5975cf62c9360a6 (patch) | |
tree | f435a8308119effd964b339f76abb83a57c29483 /gfx/wgpu_bindings/src | |
parent | Initial commit. (diff) | |
download | firefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz firefox-26a029d407be480d791972afb5975cf62c9360a6.zip |
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'gfx/wgpu_bindings/src')
-rw-r--r-- | gfx/wgpu_bindings/src/client.rs | 1361 | ||||
-rw-r--r-- | gfx/wgpu_bindings/src/error.rs | 662 | ||||
-rw-r--r-- | gfx/wgpu_bindings/src/identity.rs | 3 | ||||
-rw-r--r-- | gfx/wgpu_bindings/src/lib.rs | 233 | ||||
-rw-r--r-- | gfx/wgpu_bindings/src/server.rs | 1345 |
5 files changed, 3604 insertions, 0 deletions
diff --git a/gfx/wgpu_bindings/src/client.rs b/gfx/wgpu_bindings/src/client.rs new file mode 100644 index 0000000000..bf53a8c871 --- /dev/null +++ b/gfx/wgpu_bindings/src/client.rs @@ -0,0 +1,1361 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use crate::{ + cow_label, error::HasErrorBufferType, wgpu_string, AdapterInformation, ByteBuf, + CommandEncoderAction, DeviceAction, DropAction, ImageDataLayout, ImplicitLayout, + QueueWriteAction, RawString, TextureAction, +}; + +use crate::SwapChainId; + +use wgc::{id, identity::IdentityManager}; +use wgt::{Backend, TextureFormat}; + +pub use wgc::command::{compute_ffi::*, render_ffi::*}; +use wgc::id::markers; + +use parking_lot::Mutex; + +use nsstring::{nsACString, nsString}; + +use std::{borrow::Cow, ptr}; + +// we can't call `from_raw_parts` unconditionally because the caller +// may not even have a valid pointer (e.g. NULL) if the `length` is zero. +fn make_slice<'a, T>(pointer: *const T, length: usize) -> &'a [T] { + if length == 0 { + &[] + } else { + unsafe { std::slice::from_raw_parts(pointer, length) } + } +} + +fn make_byte_buf<T: serde::Serialize>(data: &T) -> ByteBuf { + let vec = bincode::serialize(data).unwrap(); + ByteBuf::from_vec(vec) +} + +#[repr(C)] +pub struct ProgrammableStageDescriptor { + module: id::ShaderModuleId, + entry_point: RawString, +} + +impl ProgrammableStageDescriptor { + fn to_wgpu(&self) -> wgc::pipeline::ProgrammableStageDescriptor { + wgc::pipeline::ProgrammableStageDescriptor { + module: self.module, + entry_point: cow_label(&self.entry_point).unwrap(), + } + } +} + +#[repr(C)] +pub struct ComputePipelineDescriptor<'a> { + label: Option<&'a nsACString>, + layout: Option<id::PipelineLayoutId>, + stage: ProgrammableStageDescriptor, +} + +#[repr(C)] +pub struct VertexBufferLayout { + array_stride: wgt::BufferAddress, + step_mode: wgt::VertexStepMode, + attributes: *const wgt::VertexAttribute, + attributes_length: usize, +} + +#[repr(C)] +pub struct VertexState { + stage: ProgrammableStageDescriptor, + buffers: *const VertexBufferLayout, + buffers_length: usize, +} + +impl VertexState { + fn to_wgpu(&self) -> wgc::pipeline::VertexState { + let buffer_layouts = make_slice(self.buffers, self.buffers_length) + .iter() + .map(|vb| wgc::pipeline::VertexBufferLayout { + array_stride: vb.array_stride, + step_mode: vb.step_mode, + attributes: Cow::Borrowed(make_slice(vb.attributes, vb.attributes_length)), + }) + .collect(); + wgc::pipeline::VertexState { + stage: self.stage.to_wgpu(), + buffers: Cow::Owned(buffer_layouts), + } + } +} + +#[repr(C)] +pub struct ColorTargetState<'a> { + format: wgt::TextureFormat, + blend: Option<&'a wgt::BlendState>, + write_mask: wgt::ColorWrites, +} + +#[repr(C)] +pub struct FragmentState<'a> { + stage: ProgrammableStageDescriptor, + targets: *const ColorTargetState<'a>, + targets_length: usize, +} + +impl FragmentState<'_> { + fn to_wgpu(&self) -> wgc::pipeline::FragmentState { + let color_targets = make_slice(self.targets, self.targets_length) + .iter() + .map(|ct| { + Some(wgt::ColorTargetState { + format: ct.format, + blend: ct.blend.cloned(), + write_mask: ct.write_mask, + }) + }) + .collect(); + wgc::pipeline::FragmentState { + stage: self.stage.to_wgpu(), + targets: Cow::Owned(color_targets), + } + } +} + +#[repr(C)] +pub struct PrimitiveState<'a> { + topology: wgt::PrimitiveTopology, + strip_index_format: Option<&'a wgt::IndexFormat>, + front_face: wgt::FrontFace, + cull_mode: Option<&'a wgt::Face>, + polygon_mode: wgt::PolygonMode, + unclipped_depth: bool, +} + +impl PrimitiveState<'_> { + fn to_wgpu(&self) -> wgt::PrimitiveState { + wgt::PrimitiveState { + topology: self.topology, + strip_index_format: self.strip_index_format.cloned(), + front_face: self.front_face.clone(), + cull_mode: self.cull_mode.cloned(), + polygon_mode: self.polygon_mode, + unclipped_depth: self.unclipped_depth, + conservative: false, + } + } +} + +#[repr(C)] +pub struct RenderPipelineDescriptor<'a> { + label: Option<&'a nsACString>, + layout: Option<id::PipelineLayoutId>, + vertex: &'a VertexState, + primitive: PrimitiveState<'a>, + fragment: Option<&'a FragmentState<'a>>, + depth_stencil: Option<&'a wgt::DepthStencilState>, + multisample: wgt::MultisampleState, +} + +#[repr(C)] +pub enum RawTextureSampleType { + Float, + UnfilterableFloat, + Uint, + Sint, + Depth, +} + +#[repr(C)] +pub enum RawBindingType { + UniformBuffer, + StorageBuffer, + ReadonlyStorageBuffer, + Sampler, + SampledTexture, + ReadonlyStorageTexture, + WriteonlyStorageTexture, +} + +#[repr(C)] +pub struct BindGroupLayoutEntry<'a> { + binding: u32, + visibility: wgt::ShaderStages, + ty: RawBindingType, + has_dynamic_offset: bool, + min_binding_size: Option<wgt::BufferSize>, + view_dimension: Option<&'a wgt::TextureViewDimension>, + texture_sample_type: Option<&'a RawTextureSampleType>, + multisampled: bool, + storage_texture_format: Option<&'a wgt::TextureFormat>, + sampler_filter: bool, + sampler_compare: bool, +} + +#[repr(C)] +pub struct BindGroupLayoutDescriptor<'a> { + label: Option<&'a nsACString>, + entries: *const BindGroupLayoutEntry<'a>, + entries_length: usize, +} + +#[repr(C)] +#[derive(Debug)] +pub struct BindGroupEntry { + binding: u32, + buffer: Option<id::BufferId>, + offset: wgt::BufferAddress, + size: Option<wgt::BufferSize>, + sampler: Option<id::SamplerId>, + texture_view: Option<id::TextureViewId>, +} + +#[repr(C)] +pub struct BindGroupDescriptor<'a> { + label: Option<&'a nsACString>, + layout: id::BindGroupLayoutId, + entries: *const BindGroupEntry, + entries_length: usize, +} + +#[repr(C)] +pub struct PipelineLayoutDescriptor<'a> { + label: Option<&'a nsACString>, + bind_group_layouts: *const id::BindGroupLayoutId, + bind_group_layouts_length: usize, +} + +#[repr(C)] +pub struct SamplerDescriptor<'a> { + label: Option<&'a nsACString>, + address_modes: [wgt::AddressMode; 3], + mag_filter: wgt::FilterMode, + min_filter: wgt::FilterMode, + mipmap_filter: wgt::FilterMode, + lod_min_clamp: f32, + lod_max_clamp: f32, + compare: Option<&'a wgt::CompareFunction>, + anisotropy_clamp: Option<&'a u16>, +} + +#[repr(C)] +pub struct TextureViewDescriptor<'a> { + label: Option<&'a nsACString>, + format: Option<&'a wgt::TextureFormat>, + dimension: Option<&'a wgt::TextureViewDimension>, + aspect: wgt::TextureAspect, + base_mip_level: u32, + mip_level_count: Option<&'a u32>, + base_array_layer: u32, + array_layer_count: Option<&'a u32>, +} + +#[repr(C)] +pub struct RenderBundleEncoderDescriptor<'a> { + label: Option<&'a nsACString>, + color_formats: *const wgt::TextureFormat, + color_formats_length: usize, + depth_stencil_format: Option<&'a wgt::TextureFormat>, + depth_read_only: bool, + stencil_read_only: bool, + sample_count: u32, +} + +#[derive(Debug)] +struct IdentityHub { + adapters: IdentityManager<markers::Adapter>, + devices: IdentityManager<markers::Device>, + buffers: IdentityManager<markers::Buffer>, + command_buffers: IdentityManager<markers::CommandBuffer>, + render_bundles: IdentityManager<markers::RenderBundle>, + bind_group_layouts: IdentityManager<markers::BindGroupLayout>, + pipeline_layouts: IdentityManager<markers::PipelineLayout>, + bind_groups: IdentityManager<markers::BindGroup>, + shader_modules: IdentityManager<markers::ShaderModule>, + compute_pipelines: IdentityManager<markers::ComputePipeline>, + render_pipelines: IdentityManager<markers::RenderPipeline>, + textures: IdentityManager<markers::Texture>, + texture_views: IdentityManager<markers::TextureView>, + samplers: IdentityManager<markers::Sampler>, +} + +impl Default for IdentityHub { + fn default() -> Self { + IdentityHub { + adapters: IdentityManager::new(), + devices: IdentityManager::new(), + buffers: IdentityManager::new(), + command_buffers: IdentityManager::new(), + render_bundles: IdentityManager::new(), + bind_group_layouts: IdentityManager::new(), + pipeline_layouts: IdentityManager::new(), + bind_groups: IdentityManager::new(), + shader_modules: IdentityManager::new(), + compute_pipelines: IdentityManager::new(), + render_pipelines: IdentityManager::new(), + textures: IdentityManager::new(), + texture_views: IdentityManager::new(), + samplers: IdentityManager::new(), + } + } +} + +impl ImplicitLayout<'_> { + fn new(identities: &mut IdentityHub, backend: Backend) -> Self { + ImplicitLayout { + pipeline: identities.pipeline_layouts.process(backend), + bind_groups: Cow::Owned( + (0..8) // hal::MAX_BIND_GROUPS + .map(|_| Some(identities.bind_group_layouts.process(backend))) + .collect(), + ), + } + } +} + +#[derive(Debug, Default)] +struct Identities { + vulkan: IdentityHub, + #[cfg(any(target_os = "ios", target_os = "macos"))] + metal: IdentityHub, + #[cfg(windows)] + dx12: IdentityHub, +} + +impl Identities { + fn select(&mut self, backend: Backend) -> &mut IdentityHub { + match backend { + Backend::Vulkan => &mut self.vulkan, + #[cfg(any(target_os = "ios", target_os = "macos"))] + Backend::Metal => &mut self.metal, + #[cfg(windows)] + Backend::Dx12 => &mut self.dx12, + _ => panic!("Unexpected backend: {:?}", backend), + } + } +} + +#[derive(Debug)] +pub struct Client { + identities: Mutex<Identities>, +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_drop_action(client: &mut Client, byte_buf: &ByteBuf) { + let mut cursor = std::io::Cursor::new(byte_buf.as_slice()); + let mut identities = client.identities.lock(); + while let Ok(action) = bincode::deserialize_from(&mut cursor) { + match action { + DropAction::Adapter(id) => identities.select(id.backend()).adapters.free(id), + DropAction::Device(id) => identities.select(id.backend()).devices.free(id), + DropAction::ShaderModule(id) => identities.select(id.backend()).shader_modules.free(id), + DropAction::PipelineLayout(id) => { + identities.select(id.backend()).pipeline_layouts.free(id) + } + DropAction::BindGroupLayout(id) => { + identities.select(id.backend()).bind_group_layouts.free(id) + } + DropAction::BindGroup(id) => identities.select(id.backend()).bind_groups.free(id), + DropAction::CommandBuffer(id) => { + identities.select(id.backend()).command_buffers.free(id) + } + DropAction::RenderBundle(id) => identities.select(id.backend()).render_bundles.free(id), + DropAction::RenderPipeline(id) => { + identities.select(id.backend()).render_pipelines.free(id) + } + DropAction::ComputePipeline(id) => { + identities.select(id.backend()).compute_pipelines.free(id) + } + DropAction::Buffer(id) => identities.select(id.backend()).buffers.free(id), + DropAction::Texture(id) => identities.select(id.backend()).textures.free(id), + DropAction::TextureView(id) => identities.select(id.backend()).texture_views.free(id), + DropAction::Sampler(id) => identities.select(id.backend()).samplers.free(id), + } + } +} + +#[no_mangle] +pub extern "C" fn wgpu_client_kill_device_id(client: &Client, id: id::DeviceId) { + client + .identities + .lock() + .select(id.backend()) + .devices + .free(id) +} + +#[repr(C)] +#[derive(Debug)] +pub struct Infrastructure { + pub client: *mut Client, + pub error: *const u8, +} + +#[no_mangle] +pub extern "C" fn wgpu_client_new() -> Infrastructure { + log::info!("Initializing WGPU client"); + let client = Box::new(Client { + identities: Mutex::new(Identities::default()), + }); + Infrastructure { + client: Box::into_raw(client), + error: ptr::null(), + } +} + +/// # Safety +/// +/// This function is unsafe because improper use may lead to memory +/// problems. For example, a double-free may occur if the function is called +/// twice on the same raw pointer. +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_delete(client: *mut Client) { + log::info!("Terminating WGPU client"); + let _client = Box::from_raw(client); +} + +/// # Safety +/// +/// This function is unsafe as there is no guarantee that the given pointer is +/// valid for `id_length` elements. +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_make_adapter_ids( + client: &Client, + ids: *mut id::AdapterId, + id_length: usize, +) -> usize { + let identities = client.identities.lock(); + assert_ne!(id_length, 0); + let mut ids = std::slice::from_raw_parts_mut(ids, id_length).iter_mut(); + + *ids.next().unwrap() = identities.vulkan.adapters.process(Backend::Vulkan); + + #[cfg(any(target_os = "ios", target_os = "macos"))] + { + *ids.next().unwrap() = identities.metal.adapters.process(Backend::Metal); + } + #[cfg(windows)] + { + *ids.next().unwrap() = identities.dx12.adapters.process(Backend::Dx12); + } + + id_length - ids.len() +} + +#[no_mangle] +pub extern "C" fn wgpu_client_fill_default_limits(limits: &mut wgt::Limits) { + *limits = wgt::Limits::default(); +} + +#[no_mangle] +pub extern "C" fn wgpu_client_adapter_extract_info( + byte_buf: &ByteBuf, + info: &mut AdapterInformation<nsString>, +) { + let AdapterInformation { + backend, + device_type, + device, + driver_info, + driver, + features, + id, + limits, + name, + vendor, + } = bincode::deserialize::<AdapterInformation<String>>(unsafe { byte_buf.as_slice() }).unwrap(); + + let nss = |s: &str| { + let mut ns_string = nsString::new(); + ns_string.assign_str(s); + ns_string + }; + *info = AdapterInformation { + backend, + device_type, + device, + driver_info: nss(&driver_info), + driver: nss(&driver), + features, + id, + limits, + name: nss(&name), + vendor, + }; +} + +#[no_mangle] +pub extern "C" fn wgpu_client_serialize_device_descriptor( + desc: &wgt::DeviceDescriptor<Option<&nsACString>>, + bb: &mut ByteBuf, +) { + let label = wgpu_string(desc.label); + *bb = make_byte_buf(&desc.map_label(|_| label)); +} + +#[no_mangle] +pub extern "C" fn wgpu_client_make_device_id( + client: &Client, + adapter_id: id::AdapterId, +) -> id::DeviceId { + let backend = adapter_id.backend(); + client + .identities + .lock() + .select(backend) + .devices + .process(backend) +} + +#[no_mangle] +pub extern "C" fn wgpu_client_make_buffer_id( + client: &Client, + device_id: id::DeviceId, +) -> id::BufferId { + let backend = device_id.backend(); + client + .identities + .lock() + .select(backend) + .buffers + .process(backend) +} + +#[no_mangle] +pub extern "C" fn wgpu_client_create_texture( + client: &Client, + device_id: id::DeviceId, + desc: &wgt::TextureDescriptor<Option<&nsACString>, crate::FfiSlice<TextureFormat>>, + swap_chain_id: Option<&SwapChainId>, + bb: &mut ByteBuf, +) -> id::TextureId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let id = client + .identities + .lock() + .select(backend) + .textures + .process(backend); + + let view_formats = unsafe { desc.view_formats.as_slice() }.to_vec(); + + let action = DeviceAction::CreateTexture( + id, + desc.map_label_and_view_formats(|_| label, |_| view_formats), + swap_chain_id.copied(), + ); + *bb = make_byte_buf(&action); + + id +} + +#[no_mangle] +pub extern "C" fn wgpu_client_create_texture_view( + client: &Client, + device_id: id::DeviceId, + desc: &TextureViewDescriptor, + bb: &mut ByteBuf, +) -> id::TextureViewId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let id = client + .identities + .lock() + .select(backend) + .texture_views + .process(backend); + + let wgpu_desc = wgc::resource::TextureViewDescriptor { + label, + format: desc.format.cloned(), + dimension: desc.dimension.cloned(), + range: wgt::ImageSubresourceRange { + aspect: desc.aspect, + base_mip_level: desc.base_mip_level, + mip_level_count: desc.mip_level_count.map(|ptr| *ptr), + base_array_layer: desc.base_array_layer, + array_layer_count: desc.array_layer_count.map(|ptr| *ptr), + }, + }; + + let action = TextureAction::CreateView(id, wgpu_desc); + *bb = make_byte_buf(&action); + id +} + +#[no_mangle] +pub extern "C" fn wgpu_client_create_sampler( + client: &Client, + device_id: id::DeviceId, + desc: &SamplerDescriptor, + bb: &mut ByteBuf, +) -> id::SamplerId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let id = client + .identities + .lock() + .select(backend) + .samplers + .process(backend); + + let wgpu_desc = wgc::resource::SamplerDescriptor { + label, + address_modes: desc.address_modes, + mag_filter: desc.mag_filter, + min_filter: desc.min_filter, + mipmap_filter: desc.mipmap_filter, + lod_min_clamp: desc.lod_min_clamp, + lod_max_clamp: desc.lod_max_clamp, + compare: desc.compare.cloned(), + anisotropy_clamp: *desc.anisotropy_clamp.unwrap_or(&1), + border_color: None, + }; + let action = DeviceAction::CreateSampler(id, wgpu_desc); + *bb = make_byte_buf(&action); + id +} + +#[no_mangle] +pub extern "C" fn wgpu_client_make_encoder_id( + client: &Client, + device_id: id::DeviceId, +) -> id::CommandEncoderId { + let backend = device_id.backend(); + client + .identities + .lock() + .select(backend) + .command_buffers + .process(backend) + .transmute() +} + +#[no_mangle] +pub extern "C" fn wgpu_client_create_command_encoder( + client: &Client, + device_id: id::DeviceId, + desc: &wgt::CommandEncoderDescriptor<Option<&nsACString>>, + bb: &mut ByteBuf, +) -> id::CommandEncoderId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let id = client + .identities + .lock() + .select(backend) + .command_buffers + .process(backend) + .transmute(); + + let action = DeviceAction::CreateCommandEncoder(id, desc.map_label(|_| label)); + *bb = make_byte_buf(&action); + id +} + +#[no_mangle] +pub extern "C" fn wgpu_device_create_render_bundle_encoder( + device_id: id::DeviceId, + desc: &RenderBundleEncoderDescriptor, + bb: &mut ByteBuf, +) -> *mut wgc::command::RenderBundleEncoder { + let label = wgpu_string(desc.label); + + let color_formats: Vec<_> = make_slice(desc.color_formats, desc.color_formats_length) + .iter() + .map(|format| Some(format.clone())) + .collect(); + let descriptor = wgc::command::RenderBundleEncoderDescriptor { + label, + color_formats: Cow::Owned(color_formats), + depth_stencil: desc + .depth_stencil_format + .map(|&format| wgt::RenderBundleDepthStencil { + format, + depth_read_only: desc.depth_read_only, + stencil_read_only: desc.stencil_read_only, + }), + sample_count: desc.sample_count, + multiview: None, + }; + match wgc::command::RenderBundleEncoder::new(&descriptor, device_id, None) { + Ok(encoder) => Box::into_raw(Box::new(encoder)), + Err(e) => { + let message = format!("Error in `Device::create_render_bundle_encoder`: {}", e); + let action = DeviceAction::Error { + message, + r#type: e.error_type(), + }; + *bb = make_byte_buf(&action); + ptr::null_mut() + } + } +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_render_bundle_encoder_destroy( + pass: *mut wgc::command::RenderBundleEncoder, +) { + // The RB encoder is just a boxed Rust struct, it doesn't have any API primitives + // associated with it right now, but in the future it will. + let _ = Box::from_raw(pass); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_create_render_bundle( + client: &Client, + encoder: *mut wgc::command::RenderBundleEncoder, + device_id: id::DeviceId, + desc: &wgt::RenderBundleDescriptor<Option<&nsACString>>, + bb: &mut ByteBuf, +) -> id::RenderBundleId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let id = client + .identities + .lock() + .select(backend) + .render_bundles + .process(backend); + + let action = + DeviceAction::CreateRenderBundle(id, *Box::from_raw(encoder), desc.map_label(|_| label)); + *bb = make_byte_buf(&action); + id +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_create_render_bundle_error( + client: &Client, + device_id: id::DeviceId, + label: Option<&nsACString>, + bb: &mut ByteBuf, +) -> id::RenderBundleId { + let label = wgpu_string(label); + + let backend = device_id.backend(); + let id = client + .identities + .lock() + .select(backend) + .render_bundles + .process(backend); + + let action = DeviceAction::CreateRenderBundleError(id, label); + *bb = make_byte_buf(&action); + id +} + +#[repr(C)] +pub struct ComputePassDescriptor<'a> { + pub label: Option<&'a nsACString>, + pub timestamp_writes: Option<&'a ComputePassTimestampWrites<'a>>, +} + +#[repr(C)] +pub struct ComputePassTimestampWrites<'a> { + pub query_set: id::QuerySetId, + pub beginning_of_pass_write_index: Option<&'a u32>, + pub end_of_pass_write_index: Option<&'a u32>, +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_command_encoder_begin_compute_pass( + encoder_id: id::CommandEncoderId, + desc: &ComputePassDescriptor, +) -> *mut wgc::command::ComputePass { + let &ComputePassDescriptor { + label, + timestamp_writes, + } = desc; + + let label = wgpu_string(label); + + let timestamp_writes = timestamp_writes.map(|tsw| { + let &ComputePassTimestampWrites { + query_set, + beginning_of_pass_write_index, + end_of_pass_write_index, + } = tsw; + let beginning_of_pass_write_index = beginning_of_pass_write_index.cloned(); + let end_of_pass_write_index = end_of_pass_write_index.cloned(); + wgc::command::ComputePassTimestampWrites { + query_set, + beginning_of_pass_write_index, + end_of_pass_write_index, + } + }); + let timestamp_writes = timestamp_writes.as_ref(); + + let pass = wgc::command::ComputePass::new( + encoder_id, + &wgc::command::ComputePassDescriptor { + label, + timestamp_writes, + }, + ); + Box::into_raw(Box::new(pass)) +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_compute_pass_finish( + pass: *mut wgc::command::ComputePass, + output: &mut ByteBuf, +) { + let command = Box::from_raw(pass).into_command(); + *output = make_byte_buf(&command); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_compute_pass_destroy(pass: *mut wgc::command::ComputePass) { + let _ = Box::from_raw(pass); +} + +#[repr(C)] +pub struct RenderPassDescriptor<'a> { + pub label: Option<&'a nsACString>, + pub color_attachments: *const wgc::command::RenderPassColorAttachment, + pub color_attachments_length: usize, + pub depth_stencil_attachment: *const wgc::command::RenderPassDepthStencilAttachment, + pub timestamp_writes: Option<&'a RenderPassTimestampWrites<'a>>, + pub occlusion_query_set: Option<wgc::id::QuerySetId>, +} + +#[repr(C)] +pub struct RenderPassTimestampWrites<'a> { + pub query_set: wgc::id::QuerySetId, + pub beginning_of_pass_write_index: Option<&'a u32>, + pub end_of_pass_write_index: Option<&'a u32>, +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_command_encoder_begin_render_pass( + encoder_id: id::CommandEncoderId, + desc: &RenderPassDescriptor, +) -> *mut wgc::command::RenderPass { + let &RenderPassDescriptor { + label, + color_attachments, + color_attachments_length, + depth_stencil_attachment, + timestamp_writes, + occlusion_query_set, + } = desc; + + let label = wgpu_string(label); + + let timestamp_writes = timestamp_writes.map(|tsw| { + let &RenderPassTimestampWrites { + query_set, + beginning_of_pass_write_index, + end_of_pass_write_index, + } = tsw; + let beginning_of_pass_write_index = beginning_of_pass_write_index.cloned(); + let end_of_pass_write_index = end_of_pass_write_index.cloned(); + wgc::command::RenderPassTimestampWrites { + query_set, + beginning_of_pass_write_index, + end_of_pass_write_index, + } + }); + + let timestamp_writes = timestamp_writes.as_ref(); + + let color_attachments: Vec<_> = make_slice(color_attachments, color_attachments_length) + .iter() + .map(|format| Some(format.clone())) + .collect(); + let pass = wgc::command::RenderPass::new( + encoder_id, + &wgc::command::RenderPassDescriptor { + label, + color_attachments: Cow::Owned(color_attachments), + depth_stencil_attachment: depth_stencil_attachment.as_ref(), + timestamp_writes, + occlusion_query_set, + }, + ); + Box::into_raw(Box::new(pass)) +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_render_pass_finish( + pass: *mut wgc::command::RenderPass, + output: &mut ByteBuf, +) { + let command = Box::from_raw(pass).into_command(); + *output = make_byte_buf(&command); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_render_pass_destroy(pass: *mut wgc::command::RenderPass) { + let _ = Box::from_raw(pass); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_create_bind_group_layout( + client: &Client, + device_id: id::DeviceId, + desc: &BindGroupLayoutDescriptor, + bb: &mut ByteBuf, +) -> id::BindGroupLayoutId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let id = client + .identities + .lock() + .select(backend) + .bind_group_layouts + .process(backend); + + let mut entries = Vec::with_capacity(desc.entries_length); + for entry in make_slice(desc.entries, desc.entries_length) { + entries.push(wgt::BindGroupLayoutEntry { + binding: entry.binding, + visibility: entry.visibility, + count: None, + ty: match entry.ty { + RawBindingType::UniformBuffer => wgt::BindingType::Buffer { + ty: wgt::BufferBindingType::Uniform, + has_dynamic_offset: entry.has_dynamic_offset, + min_binding_size: entry.min_binding_size, + }, + RawBindingType::StorageBuffer => wgt::BindingType::Buffer { + ty: wgt::BufferBindingType::Storage { read_only: false }, + has_dynamic_offset: entry.has_dynamic_offset, + min_binding_size: entry.min_binding_size, + }, + RawBindingType::ReadonlyStorageBuffer => wgt::BindingType::Buffer { + ty: wgt::BufferBindingType::Storage { read_only: true }, + has_dynamic_offset: entry.has_dynamic_offset, + min_binding_size: entry.min_binding_size, + }, + RawBindingType::Sampler => wgt::BindingType::Sampler(if entry.sampler_compare { + wgt::SamplerBindingType::Comparison + } else if entry.sampler_filter { + wgt::SamplerBindingType::Filtering + } else { + wgt::SamplerBindingType::NonFiltering + }), + RawBindingType::SampledTexture => wgt::BindingType::Texture { + //TODO: the spec has a bug here + view_dimension: *entry + .view_dimension + .unwrap_or(&wgt::TextureViewDimension::D2), + sample_type: match entry.texture_sample_type { + None | Some(RawTextureSampleType::Float) => { + wgt::TextureSampleType::Float { filterable: true } + } + Some(RawTextureSampleType::UnfilterableFloat) => { + wgt::TextureSampleType::Float { filterable: false } + } + Some(RawTextureSampleType::Uint) => wgt::TextureSampleType::Uint, + Some(RawTextureSampleType::Sint) => wgt::TextureSampleType::Sint, + Some(RawTextureSampleType::Depth) => wgt::TextureSampleType::Depth, + }, + multisampled: entry.multisampled, + }, + RawBindingType::ReadonlyStorageTexture => wgt::BindingType::StorageTexture { + access: wgt::StorageTextureAccess::ReadOnly, + view_dimension: *entry.view_dimension.unwrap(), + format: *entry.storage_texture_format.unwrap(), + }, + RawBindingType::WriteonlyStorageTexture => wgt::BindingType::StorageTexture { + access: wgt::StorageTextureAccess::WriteOnly, + view_dimension: *entry.view_dimension.unwrap(), + format: *entry.storage_texture_format.unwrap(), + }, + }, + }); + } + let wgpu_desc = wgc::binding_model::BindGroupLayoutDescriptor { + label, + entries: Cow::Owned(entries), + }; + + let action = DeviceAction::CreateBindGroupLayout(id, wgpu_desc); + *bb = make_byte_buf(&action); + id +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_render_pipeline_get_bind_group_layout( + client: &Client, + pipeline_id: id::RenderPipelineId, + index: u32, + bb: &mut ByteBuf, +) -> id::BindGroupLayoutId { + let backend = pipeline_id.backend(); + let bgl_id = client + .identities + .lock() + .select(backend) + .bind_group_layouts + .process(backend); + + let action = DeviceAction::RenderPipelineGetBindGroupLayout(pipeline_id, index, bgl_id); + *bb = make_byte_buf(&action); + + bgl_id +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_compute_pipeline_get_bind_group_layout( + client: &Client, + pipeline_id: id::ComputePipelineId, + index: u32, + bb: &mut ByteBuf, +) -> id::BindGroupLayoutId { + let backend = pipeline_id.backend(); + let bgl_id = client + .identities + .lock() + .select(backend) + .bind_group_layouts + .process(backend); + + let action = DeviceAction::ComputePipelineGetBindGroupLayout(pipeline_id, index, bgl_id); + *bb = make_byte_buf(&action); + + bgl_id +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_create_pipeline_layout( + client: &Client, + device_id: id::DeviceId, + desc: &PipelineLayoutDescriptor, + bb: &mut ByteBuf, +) -> id::PipelineLayoutId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let id = client + .identities + .lock() + .select(backend) + .pipeline_layouts + .process(backend); + + let wgpu_desc = wgc::binding_model::PipelineLayoutDescriptor { + label, + bind_group_layouts: Cow::Borrowed(make_slice( + desc.bind_group_layouts, + desc.bind_group_layouts_length, + )), + push_constant_ranges: Cow::Borrowed(&[]), + }; + + let action = DeviceAction::CreatePipelineLayout(id, wgpu_desc); + *bb = make_byte_buf(&action); + id +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_create_bind_group( + client: &Client, + device_id: id::DeviceId, + desc: &BindGroupDescriptor, + bb: &mut ByteBuf, +) -> id::BindGroupId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let id = client + .identities + .lock() + .select(backend) + .bind_groups + .process(backend); + + let mut entries = Vec::with_capacity(desc.entries_length); + for entry in make_slice(desc.entries, desc.entries_length) { + entries.push(wgc::binding_model::BindGroupEntry { + binding: entry.binding, + resource: if let Some(id) = entry.buffer { + wgc::binding_model::BindingResource::Buffer(wgc::binding_model::BufferBinding { + buffer_id: id, + offset: entry.offset, + size: entry.size, + }) + } else if let Some(id) = entry.sampler { + wgc::binding_model::BindingResource::Sampler(id) + } else if let Some(id) = entry.texture_view { + wgc::binding_model::BindingResource::TextureView(id) + } else { + panic!("Unexpected binding entry {:?}", entry); + }, + }); + } + let wgpu_desc = wgc::binding_model::BindGroupDescriptor { + label, + layout: desc.layout, + entries: Cow::Owned(entries), + }; + + let action = DeviceAction::CreateBindGroup(id, wgpu_desc); + *bb = make_byte_buf(&action); + id +} + +#[no_mangle] +pub extern "C" fn wgpu_client_make_shader_module_id( + client: &Client, + device_id: id::DeviceId, +) -> id::ShaderModuleId { + let backend = device_id.backend(); + client + .identities + .lock() + .select(backend) + .shader_modules + .process(backend) +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_create_compute_pipeline( + client: &Client, + device_id: id::DeviceId, + desc: &ComputePipelineDescriptor, + bb: &mut ByteBuf, + implicit_pipeline_layout_id: *mut Option<id::PipelineLayoutId>, + implicit_bind_group_layout_ids: *mut Option<id::BindGroupLayoutId>, +) -> id::ComputePipelineId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let mut identities = client.identities.lock(); + let id = identities + .select(backend) + .compute_pipelines + .process(backend); + + let wgpu_desc = wgc::pipeline::ComputePipelineDescriptor { + label, + layout: desc.layout, + stage: desc.stage.to_wgpu(), + }; + + let implicit = match desc.layout { + Some(_) => None, + None => { + let implicit = ImplicitLayout::new(identities.select(backend), backend); + ptr::write(implicit_pipeline_layout_id, Some(implicit.pipeline)); + for (i, bgl_id) in implicit.bind_groups.iter().enumerate() { + *implicit_bind_group_layout_ids.add(i) = *bgl_id; + } + Some(implicit) + } + }; + + let action = DeviceAction::CreateComputePipeline(id, wgpu_desc, implicit); + *bb = make_byte_buf(&action); + id +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_client_create_render_pipeline( + client: &Client, + device_id: id::DeviceId, + desc: &RenderPipelineDescriptor, + bb: &mut ByteBuf, + implicit_pipeline_layout_id: *mut Option<id::PipelineLayoutId>, + implicit_bind_group_layout_ids: *mut Option<id::BindGroupLayoutId>, +) -> id::RenderPipelineId { + let label = wgpu_string(desc.label); + + let backend = device_id.backend(); + let mut identities = client.identities.lock(); + let id = identities.select(backend).render_pipelines.process(backend); + + let wgpu_desc = wgc::pipeline::RenderPipelineDescriptor { + label, + layout: desc.layout, + vertex: desc.vertex.to_wgpu(), + fragment: desc.fragment.map(FragmentState::to_wgpu), + primitive: desc.primitive.to_wgpu(), + depth_stencil: desc.depth_stencil.cloned(), + multisample: desc.multisample.clone(), + multiview: None, + }; + + let implicit = match desc.layout { + Some(_) => None, + None => { + let implicit = ImplicitLayout::new(identities.select(backend), backend); + ptr::write(implicit_pipeline_layout_id, Some(implicit.pipeline)); + for (i, bgl_id) in implicit.bind_groups.iter().enumerate() { + *implicit_bind_group_layout_ids.add(i) = *bgl_id; + } + Some(implicit) + } + }; + + let action = DeviceAction::CreateRenderPipeline(id, wgpu_desc, implicit); + *bb = make_byte_buf(&action); + id +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_command_encoder_copy_buffer_to_buffer( + src: id::BufferId, + src_offset: wgt::BufferAddress, + dst: id::BufferId, + dst_offset: wgt::BufferAddress, + size: wgt::BufferAddress, + bb: &mut ByteBuf, +) { + let action = CommandEncoderAction::CopyBufferToBuffer { + src, + src_offset, + dst, + dst_offset, + size, + }; + *bb = make_byte_buf(&action); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_command_encoder_copy_texture_to_buffer( + src: wgc::command::ImageCopyTexture, + dst_buffer: wgc::id::BufferId, + dst_layout: &ImageDataLayout, + size: wgt::Extent3d, + bb: &mut ByteBuf, +) { + let action = CommandEncoderAction::CopyTextureToBuffer { + src, + dst: wgc::command::ImageCopyBuffer { + buffer: dst_buffer, + layout: dst_layout.into_wgt(), + }, + size, + }; + *bb = make_byte_buf(&action); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_command_encoder_copy_buffer_to_texture( + src_buffer: wgc::id::BufferId, + src_layout: &ImageDataLayout, + dst: wgc::command::ImageCopyTexture, + size: wgt::Extent3d, + bb: &mut ByteBuf, +) { + let action = CommandEncoderAction::CopyBufferToTexture { + src: wgc::command::ImageCopyBuffer { + buffer: src_buffer, + layout: src_layout.into_wgt(), + }, + dst, + size, + }; + *bb = make_byte_buf(&action); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_command_encoder_copy_texture_to_texture( + src: wgc::command::ImageCopyTexture, + dst: wgc::command::ImageCopyTexture, + size: wgt::Extent3d, + bb: &mut ByteBuf, +) { + let action = CommandEncoderAction::CopyTextureToTexture { src, dst, size }; + *bb = make_byte_buf(&action); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_command_encoder_clear_buffer( + dst: wgc::id::BufferId, + offset: u64, + size: Option<&u64>, + bb: &mut ByteBuf, +) { + let action = CommandEncoderAction::ClearBuffer { + dst, + offset, + size: size.cloned(), + }; + *bb = make_byte_buf(&action); +} + +#[no_mangle] +pub extern "C" fn wgpu_command_encoder_push_debug_group(marker: &nsACString, bb: &mut ByteBuf) { + let string = marker.to_string(); + let action = CommandEncoderAction::PushDebugGroup(string); + *bb = make_byte_buf(&action); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_command_encoder_pop_debug_group(bb: &mut ByteBuf) { + let action = CommandEncoderAction::PopDebugGroup; + *bb = make_byte_buf(&action); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_command_encoder_insert_debug_marker( + marker: &nsACString, + bb: &mut ByteBuf, +) { + let string = marker.to_string(); + let action = CommandEncoderAction::InsertDebugMarker(string); + *bb = make_byte_buf(&action); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_queue_write_buffer( + dst: id::BufferId, + offset: wgt::BufferAddress, + bb: &mut ByteBuf, +) { + let action = QueueWriteAction::Buffer { dst, offset }; + *bb = make_byte_buf(&action); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_queue_write_texture( + dst: wgt::ImageCopyTexture<id::TextureId>, + layout: ImageDataLayout, + size: wgt::Extent3d, + bb: &mut ByteBuf, +) { + let layout = layout.into_wgt(); + let action = QueueWriteAction::Texture { dst, layout, size }; + *bb = make_byte_buf(&action); +} + +/// Returns the block size or zero if the format has multiple aspects (for example depth+stencil). +#[no_mangle] +pub extern "C" fn wgpu_texture_format_block_size_single_aspect(format: wgt::TextureFormat) -> u32 { + format.block_copy_size(None).unwrap_or(0) +} + +#[no_mangle] +pub extern "C" fn wgpu_client_use_external_texture_in_swapChain( + device_id: id::DeviceId, + format: wgt::TextureFormat, +) -> bool { + if device_id.backend() != wgt::Backend::Dx12 { + return false; + } + + if !static_prefs::pref!("dom.webgpu.swap-chain.external-texture-dx12") { + return false; + } + + let supported = match format { + wgt::TextureFormat::Bgra8Unorm => true, + _ => false, + }; + + supported +} diff --git a/gfx/wgpu_bindings/src/error.rs b/gfx/wgpu_bindings/src/error.rs new file mode 100644 index 0000000000..4497f35102 --- /dev/null +++ b/gfx/wgpu_bindings/src/error.rs @@ -0,0 +1,662 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +//! Types needed to marshal [`server`](crate::server) errors back to C++ in Firefox. The main type +//! of this module is [`ErrorBuffer`](crate::server::ErrorBuffer). + +use std::{ + error::Error, + fmt::{self, Display, Formatter}, + os::raw::c_char, + ptr, +}; + +use serde::{Deserialize, Serialize}; + +/// A non-owning representation of `mozilla::webgpu::ErrorBuffer` in C++, passed as an argument to +/// other functions in [this module](self). +/// +/// C++ callers of Rust functions (presumably in `WebGPUParent.cpp`) that expect one of these +/// structs can create a `mozilla::webgpu::ErrorBuffer` object, and call its `ToFFI` method to +/// construct a value of this type, available to C++ as `mozilla::webgpu::ffi::WGPUErrorBuffer`. If +/// we catch a `Result::Err` in other functions of [this module](self), the error is converted to +/// this type. +#[repr(C)] +pub struct ErrorBuffer { + /// The type of error that `string` is associated with. If this location is set to + /// [`ErrorBufferType::None`] after being passed as an argument to a function in [this module](self), + /// then the remaining fields are guaranteed to not have been altered by that function from + /// their original state. + r#type: *mut ErrorBufferType, + /// The (potentially truncated) message associated with this error. A fixed-capacity, + /// null-terminated UTF-8 string buffer owned by C++. + /// + /// When we convert WGPU errors to this type, we render the error as a string, copying into + /// `message` up to `capacity - 1`, and null-terminate it. + message: *mut c_char, + message_capacity: usize, +} + +impl ErrorBuffer { + /// Fill this buffer with the textual representation of `error`. + /// + /// If the error message is too long, truncate it to `self.capacity`. In either case, the error + /// message is always terminated by a zero byte. + /// + /// Note that there is no explicit indication of the message's length, only the terminating zero + /// byte. If the textual form of `error` itself includes a zero byte (as Rust strings can), then + /// the C++ code receiving this error message has no way to distinguish that from the + /// terminating zero byte, and will see the message as shorter than it is. + pub(crate) fn init(&mut self, error: impl HasErrorBufferType) { + use std::fmt::Write; + + let mut message = format!("{}", error); + let mut e = error.source(); + while let Some(source) = e { + write!(message, ", caused by: {}", source).unwrap(); + e = source.source(); + } + + let err_ty = error.error_type(); + // SAFETY: We presume the pointer provided by the caller is safe to write to. + unsafe { *self.r#type = err_ty }; + + if matches!(err_ty, ErrorBufferType::None) { + log::warn!("{message}"); + return; + } + + assert_ne!(self.message_capacity, 0); + let length = if message.len() >= self.message_capacity { + log::warn!( + "Error message's length {} reached capacity {}, truncating", + message.len(), + self.message_capacity + ); + self.message_capacity - 1 + } else { + message.len() + }; + unsafe { + ptr::copy_nonoverlapping(message.as_ptr(), self.message as *mut u8, length); + *self.message.add(length) = 0; + } + } +} + +/// Corresponds to an optional discriminant of [`GPUError`] type in the WebGPU API. Strongly +/// correlates to [`GPUErrorFilter`]s. +/// +/// [`GPUError`]: https://gpuweb.github.io/gpuweb/#gpuerror +/// [`GPUErrorFilter`]: https://gpuweb.github.io/gpuweb/#enumdef-gpuerrorfilter +#[repr(u8)] +#[derive(Clone, Copy, Debug, Deserialize, Serialize)] +pub(crate) enum ErrorBufferType { + None = 0, + DeviceLost = 1, + Internal = 2, + OutOfMemory = 3, + Validation = 4, +} + +/// A trait for querying the [`ErrorBufferType`] classification of an error. Used by +/// [`ErrorBuffer::init`](crate::server::ErrorBuffer::init). +pub(crate) trait HasErrorBufferType: Error { + fn error_type(&self) -> ErrorBufferType; +} + +/// Representation an error whose error message is already rendered as a [`&str`], and has no error +/// sources. Used for convenience in [`server`](crate::server) code. +#[derive(Clone, Debug)] +pub(crate) struct ErrMsg<'a> { + pub(crate) message: &'a str, + pub(crate) r#type: ErrorBufferType, +} + +impl Display for ErrMsg<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let Self { message, r#type: _ } = self; + write!(f, "{message}") + } +} + +impl Error for ErrMsg<'_> {} + +impl HasErrorBufferType for ErrMsg<'_> { + fn error_type(&self) -> ErrorBufferType { + self.r#type + } +} + +/// Encapsulates implementations of [`HasErrorType`] for [`wgpu_core`] types. +mod foreign { + use wgc::{ + binding_model::{ + CreateBindGroupError, CreateBindGroupLayoutError, CreatePipelineLayoutError, + GetBindGroupLayoutError, + }, + command::{ + ClearError, CommandEncoderError, ComputePassError, CopyError, CreateRenderBundleError, + QueryError, QueryUseError, RenderBundleError, RenderPassError, ResolveError, + TransferError, + }, + device::{ + queue::{QueueSubmitError, QueueWriteError}, + DeviceError, + }, + instance::{RequestAdapterError, RequestDeviceError}, + pipeline::{ + CreateComputePipelineError, CreateRenderPipelineError, CreateShaderModuleError, + }, + resource::{ + BufferAccessError, CreateBufferError, CreateSamplerError, CreateTextureError, + CreateTextureViewError, DestroyError, + }, + }; + + use super::{ErrorBufferType, HasErrorBufferType}; + + impl HasErrorBufferType for RequestAdapterError { + fn error_type(&self) -> ErrorBufferType { + match self { + RequestAdapterError::NotFound | RequestAdapterError::InvalidSurface(_) => { + ErrorBufferType::Validation + } + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for RequestDeviceError { + fn error_type(&self) -> ErrorBufferType { + match self { + RequestDeviceError::OutOfMemory => ErrorBufferType::OutOfMemory, + + RequestDeviceError::DeviceLost => ErrorBufferType::DeviceLost, + + RequestDeviceError::Internal + | RequestDeviceError::InvalidAdapter + | RequestDeviceError::NoGraphicsQueue => ErrorBufferType::Internal, + + RequestDeviceError::UnsupportedFeature(_) + | RequestDeviceError::LimitsExceeded(_) => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CreateBufferError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreateBufferError::Device(e) => e.error_type(), + CreateBufferError::AccessError(e) => e.error_type(), + + CreateBufferError::UnalignedSize + | CreateBufferError::InvalidUsage(_) + | CreateBufferError::UsageMismatch(_) + | CreateBufferError::MaxBufferSize { .. } + | CreateBufferError::MissingDownlevelFlags(_) => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for BufferAccessError { + fn error_type(&self) -> ErrorBufferType { + match self { + BufferAccessError::Device(e) => e.error_type(), + + BufferAccessError::Failed + | BufferAccessError::Invalid + | BufferAccessError::Destroyed + | BufferAccessError::AlreadyMapped + | BufferAccessError::MapAlreadyPending + | BufferAccessError::MissingBufferUsage(_) + | BufferAccessError::NotMapped + | BufferAccessError::UnalignedRange + | BufferAccessError::UnalignedOffset { .. } + | BufferAccessError::UnalignedRangeSize { .. } + | BufferAccessError::OutOfBoundsUnderrun { .. } + | BufferAccessError::OutOfBoundsOverrun { .. } + | BufferAccessError::NegativeRange { .. } + | BufferAccessError::MapAborted => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CreateTextureError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreateTextureError::Device(e) => e.error_type(), + + CreateTextureError::InvalidUsage(_) + | CreateTextureError::InvalidDimension(_) + | CreateTextureError::InvalidDepthDimension(_, _) + | CreateTextureError::InvalidCompressedDimension(_, _) + | CreateTextureError::InvalidMipLevelCount { .. } + | CreateTextureError::InvalidFormatUsages(_, _, _) + | CreateTextureError::InvalidViewFormat(_, _) + | CreateTextureError::InvalidDimensionUsages(_, _) + | CreateTextureError::InvalidMultisampledStorageBinding + | CreateTextureError::InvalidMultisampledFormat(_) + | CreateTextureError::InvalidSampleCount(..) + | CreateTextureError::MultisampledNotRenderAttachment + | CreateTextureError::MissingFeatures(_, _) + | CreateTextureError::MissingDownlevelFlags(_) => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CreateSamplerError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreateSamplerError::Device(e) => e.error_type(), + + CreateSamplerError::InvalidLodMinClamp(_) + | CreateSamplerError::InvalidLodMaxClamp { .. } + | CreateSamplerError::InvalidAnisotropy(_) + | CreateSamplerError::InvalidFilterModeWithAnisotropy { .. } + | CreateSamplerError::TooManyObjects + | CreateSamplerError::MissingFeatures(_) => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CreateBindGroupLayoutError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreateBindGroupLayoutError::Device(e) => e.error_type(), + + CreateBindGroupLayoutError::ConflictBinding(_) + | CreateBindGroupLayoutError::Entry { .. } + | CreateBindGroupLayoutError::TooManyBindings(_) + | CreateBindGroupLayoutError::InvalidBindingIndex { .. } + | CreateBindGroupLayoutError::InvalidVisibility(_) => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CreatePipelineLayoutError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreatePipelineLayoutError::Device(e) => e.error_type(), + + CreatePipelineLayoutError::InvalidBindGroupLayout(_) + | CreatePipelineLayoutError::MisalignedPushConstantRange { .. } + | CreatePipelineLayoutError::MissingFeatures(_) + | CreatePipelineLayoutError::MoreThanOnePushConstantRangePerStage { .. } + | CreatePipelineLayoutError::PushConstantRangeTooLarge { .. } + | CreatePipelineLayoutError::TooManyBindings(_) + | CreatePipelineLayoutError::TooManyGroups { .. } => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CreateBindGroupError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreateBindGroupError::Device(e) => e.error_type(), + + CreateBindGroupError::InvalidLayout + | CreateBindGroupError::InvalidBuffer(_) + | CreateBindGroupError::InvalidTextureView(_) + | CreateBindGroupError::InvalidTexture(_) + | CreateBindGroupError::InvalidSampler(_) + | CreateBindGroupError::BindingArrayPartialLengthMismatch { .. } + | CreateBindGroupError::BindingArrayLengthMismatch { .. } + | CreateBindGroupError::BindingArrayZeroLength + | CreateBindGroupError::BindingRangeTooLarge { .. } + | CreateBindGroupError::BindingSizeTooSmall { .. } + | CreateBindGroupError::BindingZeroSize(_) + | CreateBindGroupError::BindingsNumMismatch { .. } + | CreateBindGroupError::DuplicateBinding(_) + | CreateBindGroupError::MissingBindingDeclaration(_) + | CreateBindGroupError::MissingBufferUsage(_) + | CreateBindGroupError::MissingTextureUsage(_) + | CreateBindGroupError::SingleBindingExpected + | CreateBindGroupError::UnalignedBufferOffset(_, _, _) + | CreateBindGroupError::BufferRangeTooLarge { .. } + | CreateBindGroupError::WrongBindingType { .. } + | CreateBindGroupError::InvalidTextureMultisample { .. } + | CreateBindGroupError::InvalidTextureSampleType { .. } + | CreateBindGroupError::InvalidTextureDimension { .. } + | CreateBindGroupError::InvalidStorageTextureFormat { .. } + | CreateBindGroupError::InvalidStorageTextureMipLevelCount { .. } + | CreateBindGroupError::WrongSamplerComparison { .. } + | CreateBindGroupError::WrongSamplerFiltering { .. } + | CreateBindGroupError::DepthStencilAspect + | CreateBindGroupError::StorageReadNotSupported(_) + | CreateBindGroupError::ResourceUsageConflict(_) => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CreateShaderModuleError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreateShaderModuleError::Device(e) => e.error_type(), + + CreateShaderModuleError::Generation => ErrorBufferType::Internal, + + CreateShaderModuleError::Parsing(_) + | CreateShaderModuleError::Validation(_) + | CreateShaderModuleError::MissingFeatures(_) + | CreateShaderModuleError::InvalidGroupIndex { .. } => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CreateComputePipelineError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreateComputePipelineError::Device(e) => e.error_type(), + + CreateComputePipelineError::Internal(_) => ErrorBufferType::Internal, + + CreateComputePipelineError::InvalidLayout + | CreateComputePipelineError::Implicit(_) + | CreateComputePipelineError::Stage(_) + | CreateComputePipelineError::MissingDownlevelFlags(_) => { + ErrorBufferType::Validation + } + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CreateRenderPipelineError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreateRenderPipelineError::Device(e) => e.error_type(), + + CreateRenderPipelineError::Internal { .. } => ErrorBufferType::Internal, + + CreateRenderPipelineError::ColorAttachment(_) + | CreateRenderPipelineError::InvalidLayout + | CreateRenderPipelineError::Implicit(_) + | CreateRenderPipelineError::ColorState(_, _) + | CreateRenderPipelineError::DepthStencilState(_) + | CreateRenderPipelineError::InvalidSampleCount(_) + | CreateRenderPipelineError::TooManyVertexBuffers { .. } + | CreateRenderPipelineError::TooManyVertexAttributes { .. } + | CreateRenderPipelineError::VertexStrideTooLarge { .. } + | CreateRenderPipelineError::UnalignedVertexStride { .. } + | CreateRenderPipelineError::InvalidVertexAttributeOffset { .. } + | CreateRenderPipelineError::ShaderLocationClash(_) + | CreateRenderPipelineError::StripIndexFormatForNonStripTopology { .. } + | CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode + | CreateRenderPipelineError::MissingFeatures(_) + | CreateRenderPipelineError::MissingDownlevelFlags(_) + | CreateRenderPipelineError::Stage { .. } + | CreateRenderPipelineError::UnalignedShader { .. } => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for RenderBundleError { + fn error_type(&self) -> ErrorBufferType { + // We can't classify this ourselves, because inner error classification is private. May + // need some upstream work to do this properly. + ErrorBufferType::Validation + } + } + + impl HasErrorBufferType for DeviceError { + fn error_type(&self) -> ErrorBufferType { + match self { + DeviceError::Invalid | DeviceError::WrongDevice => ErrorBufferType::Validation, + DeviceError::InvalidQueueId => ErrorBufferType::Validation, + DeviceError::Lost => ErrorBufferType::DeviceLost, + DeviceError::OutOfMemory => ErrorBufferType::OutOfMemory, + DeviceError::ResourceCreationFailed => ErrorBufferType::Internal, + _ => ErrorBufferType::Internal, + } + } + } + + impl HasErrorBufferType for CreateTextureViewError { + fn error_type(&self) -> ErrorBufferType { + match self { + CreateTextureViewError::OutOfMemory => ErrorBufferType::OutOfMemory, + + CreateTextureViewError::InvalidTexture + | CreateTextureViewError::InvalidTextureViewDimension { .. } + | CreateTextureViewError::InvalidMultisampledTextureViewDimension(_) + | CreateTextureViewError::InvalidCubemapTextureDepth { .. } + | CreateTextureViewError::InvalidCubemapArrayTextureDepth { .. } + | CreateTextureViewError::InvalidCubeTextureViewSize + | CreateTextureViewError::ZeroMipLevelCount + | CreateTextureViewError::ZeroArrayLayerCount + | CreateTextureViewError::TooManyMipLevels { .. } + | CreateTextureViewError::TooManyArrayLayers { .. } + | CreateTextureViewError::InvalidArrayLayerCount { .. } + | CreateTextureViewError::InvalidAspect { .. } + | CreateTextureViewError::FormatReinterpretation { .. } => { + ErrorBufferType::Validation + } + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for CopyError { + fn error_type(&self) -> ErrorBufferType { + match self { + CopyError::Encoder(e) => e.error_type(), + CopyError::Transfer(e) => e.error_type(), + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for TransferError { + fn error_type(&self) -> ErrorBufferType { + match self { + TransferError::MemoryInitFailure(e) => e.error_type(), + + TransferError::InvalidBuffer(_) + | TransferError::InvalidTexture(_) + | TransferError::SameSourceDestinationBuffer + | TransferError::MissingCopySrcUsageFlag + | TransferError::MissingCopyDstUsageFlag(_, _) + | TransferError::MissingRenderAttachmentUsageFlag(_) + | TransferError::BufferOverrun { .. } + | TransferError::TextureOverrun { .. } + | TransferError::InvalidTextureAspect { .. } + | TransferError::InvalidTextureMipLevel { .. } + | TransferError::InvalidDimensionExternal(_) + | TransferError::UnalignedBufferOffset(_) + | TransferError::UnalignedCopySize(_) + | TransferError::UnalignedCopyWidth + | TransferError::UnalignedCopyHeight + | TransferError::UnalignedCopyOriginX + | TransferError::UnalignedCopyOriginY + | TransferError::UnalignedBytesPerRow + | TransferError::UnspecifiedBytesPerRow + | TransferError::UnspecifiedRowsPerImage + | TransferError::InvalidBytesPerRow + | TransferError::InvalidCopySize + | TransferError::InvalidRowsPerImage + | TransferError::CopySrcMissingAspects + | TransferError::CopyDstMissingAspects + | TransferError::CopyAspectNotOne + | TransferError::CopyFromForbiddenTextureFormat { .. } + | TransferError::CopyToForbiddenTextureFormat { .. } + | TransferError::ExternalCopyToForbiddenTextureFormat(_) + | TransferError::InvalidDepthTextureExtent + | TransferError::TextureFormatsNotCopyCompatible { .. } + | TransferError::MissingDownlevelFlags(_) + | TransferError::InvalidSampleCount { .. } + | TransferError::InvalidMipLevel { .. } => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for ComputePassError { + fn error_type(&self) -> ErrorBufferType { + // We can't classify this ourselves, because inner error classification is private. We + // may need some upstream work to do this properly. For now, we trust that this opaque + // type only ever represents `Validation`. + ErrorBufferType::Validation + } + } + + impl HasErrorBufferType for QueryError { + fn error_type(&self) -> ErrorBufferType { + match self { + QueryError::Encoder(e) => e.error_type(), + QueryError::Use(e) => e.error_type(), + QueryError::Resolve(e) => e.error_type(), + + QueryError::InvalidBuffer(_) | QueryError::InvalidQuerySet(_) => { + ErrorBufferType::Validation + } + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for QueryUseError { + fn error_type(&self) -> ErrorBufferType { + // We can't classify this ourselves, because inner error classification is private. We + // may need some upstream work to do this properly. For now, we trust that this opaque + // type only ever represents `Validation`. + ErrorBufferType::Validation + } + } + + impl HasErrorBufferType for ResolveError { + fn error_type(&self) -> ErrorBufferType { + // We can't classify this ourselves, because inner error classification is private. We + // may need some upstream work to do this properly. For now, we trust that this opaque + // type only ever represents `Validation`. + ErrorBufferType::Validation + } + } + + impl HasErrorBufferType for RenderPassError { + fn error_type(&self) -> ErrorBufferType { + // TODO: This type's `inner` member has an `OutOfMemory` variant. We definitely need to + // expose this upstream, or move this implementation upstream. + // + // Bug for tracking: https://bugzilla.mozilla.org/show_bug.cgi?id=1840926 + ErrorBufferType::Validation + } + } + + impl HasErrorBufferType for ClearError { + fn error_type(&self) -> ErrorBufferType { + // We can't classify this ourselves, because inner error classification is private. We + // may need some upstream work to do this properly. For now, we trust that this opaque + // type only ever represents `Validation`. + ErrorBufferType::Validation + } + } + + impl HasErrorBufferType for CommandEncoderError { + fn error_type(&self) -> ErrorBufferType { + // We can't classify this ourselves, because inner error classification is private. We + // may need some upstream work to do this properly. For now, we trust that this opaque + // type only ever represents `Validation`. + ErrorBufferType::Validation + } + } + + impl HasErrorBufferType for QueueSubmitError { + fn error_type(&self) -> ErrorBufferType { + match self { + QueueSubmitError::Queue(e) => e.error_type(), + QueueSubmitError::Unmap(e) => e.error_type(), + + QueueSubmitError::StuckGpu => ErrorBufferType::Internal, // TODO: validate + QueueSubmitError::DestroyedBuffer(_) + | QueueSubmitError::DestroyedTexture(_) + | QueueSubmitError::BufferStillMapped(_) + | QueueSubmitError::SurfaceOutputDropped + | QueueSubmitError::SurfaceUnconfigured => ErrorBufferType::Validation, + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for QueueWriteError { + fn error_type(&self) -> ErrorBufferType { + match self { + QueueWriteError::Queue(e) => e.error_type(), + QueueWriteError::Transfer(e) => e.error_type(), + QueueWriteError::MemoryInitFailure(e) => e.error_type(), + + // N.B: forced non-exhaustiveness + _ => ErrorBufferType::Validation, + } + } + } + + impl HasErrorBufferType for GetBindGroupLayoutError { + fn error_type(&self) -> ErrorBufferType { + // We can't classify this ourselves, because inner error classification is private. We + // may need some upstream work to do this properly. For now, we trust that this opaque + // type only ever represents `Validation`. + ErrorBufferType::Validation + } + } + + impl HasErrorBufferType for CreateRenderBundleError { + fn error_type(&self) -> ErrorBufferType { + // We can't classify this ourselves, because inner error classification is private. We + // may need some upstream work to do this properly. For now, we trust that this opaque + // type only ever represents `Validation`. + ErrorBufferType::Validation + } + } + + impl HasErrorBufferType for DestroyError { + fn error_type(&self) -> ErrorBufferType { + ErrorBufferType::Validation + } + } +} diff --git a/gfx/wgpu_bindings/src/identity.rs b/gfx/wgpu_bindings/src/identity.rs new file mode 100644 index 0000000000..e0032240a4 --- /dev/null +++ b/gfx/wgpu_bindings/src/identity.rs @@ -0,0 +1,3 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ diff --git a/gfx/wgpu_bindings/src/lib.rs b/gfx/wgpu_bindings/src/lib.rs new file mode 100644 index 0000000000..3299234182 --- /dev/null +++ b/gfx/wgpu_bindings/src/lib.rs @@ -0,0 +1,233 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use crate::error::ErrorBufferType; +use wgc::id; + +pub use wgc::command::{compute_ffi::*, render_ffi::*}; + +pub mod client; +pub mod error; +pub mod server; + +pub use wgc::device::trace::Command as CommandEncoderAction; + +use std::marker::PhantomData; +use std::{borrow::Cow, mem, slice}; + +use nsstring::nsACString; + +type RawString = *const std::os::raw::c_char; + +//TODO: figure out why 'a and 'b have to be different here +//TODO: remove this +fn cow_label<'a, 'b>(raw: &'a RawString) -> Option<Cow<'b, str>> { + if raw.is_null() { + None + } else { + let cstr = unsafe { std::ffi::CStr::from_ptr(*raw) }; + cstr.to_str().ok().map(Cow::Borrowed) + } +} + +// Hides the repeated boilerplate of turning a `Option<&nsACString>` into a `Option<Cow<str>`. +pub fn wgpu_string(gecko_string: Option<&nsACString>) -> Option<Cow<str>> { + gecko_string.map(|s| s.to_utf8()) +} + +/// An equivalent of `&[T]` for ffi structures and function parameters. +#[repr(C)] +pub struct FfiSlice<'a, T> { + // `data` may be null. + pub data: *const T, + pub length: usize, + pub _marker: PhantomData<&'a T>, +} + +impl<'a, T> FfiSlice<'a, T> { + pub unsafe fn as_slice(&self) -> &'a [T] { + if self.data.is_null() { + // It is invalid to construct a rust slice with a null pointer. + return &[]; + } + + std::slice::from_raw_parts(self.data, self.length) + } +} + +impl<'a, T> Copy for FfiSlice<'a, T> {} +impl<'a, T> Clone for FfiSlice<'a, T> { + fn clone(&self) -> Self { + *self + } +} + +#[repr(C)] +pub struct ByteBuf { + data: *const u8, + len: usize, + capacity: usize, +} + +impl ByteBuf { + fn from_vec(vec: Vec<u8>) -> Self { + if vec.is_empty() { + ByteBuf { + data: std::ptr::null(), + len: 0, + capacity: 0, + } + } else { + let bb = ByteBuf { + data: vec.as_ptr(), + len: vec.len(), + capacity: vec.capacity(), + }; + mem::forget(vec); + bb + } + } + + unsafe fn as_slice(&self) -> &[u8] { + slice::from_raw_parts(self.data, self.len) + } +} + +#[repr(C)] +#[derive(serde::Serialize, serde::Deserialize)] +pub struct AdapterInformation<S> { + id: id::AdapterId, + limits: wgt::Limits, + features: wgt::Features, + name: S, + vendor: u32, + device: u32, + device_type: wgt::DeviceType, + driver: S, + driver_info: S, + backend: wgt::Backend, +} + +#[derive(serde::Serialize, serde::Deserialize)] +struct ImplicitLayout<'a> { + pipeline: id::PipelineLayoutId, + bind_groups: Cow<'a, [Option<id::BindGroupLayoutId>]>, +} + +#[derive(serde::Serialize, serde::Deserialize)] +enum DeviceAction<'a> { + CreateTexture( + id::TextureId, + wgc::resource::TextureDescriptor<'a>, + Option<SwapChainId>, + ), + CreateSampler(id::SamplerId, wgc::resource::SamplerDescriptor<'a>), + CreateBindGroupLayout( + id::BindGroupLayoutId, + wgc::binding_model::BindGroupLayoutDescriptor<'a>, + ), + RenderPipelineGetBindGroupLayout(id::RenderPipelineId, u32, id::BindGroupLayoutId), + ComputePipelineGetBindGroupLayout(id::ComputePipelineId, u32, id::BindGroupLayoutId), + CreatePipelineLayout( + id::PipelineLayoutId, + wgc::binding_model::PipelineLayoutDescriptor<'a>, + ), + CreateBindGroup(id::BindGroupId, wgc::binding_model::BindGroupDescriptor<'a>), + CreateShaderModule( + id::ShaderModuleId, + wgc::pipeline::ShaderModuleDescriptor<'a>, + Cow<'a, str>, + ), + CreateComputePipeline( + id::ComputePipelineId, + wgc::pipeline::ComputePipelineDescriptor<'a>, + Option<ImplicitLayout<'a>>, + ), + CreateRenderPipeline( + id::RenderPipelineId, + wgc::pipeline::RenderPipelineDescriptor<'a>, + Option<ImplicitLayout<'a>>, + ), + CreateRenderBundle( + id::RenderBundleId, + wgc::command::RenderBundleEncoder, + wgc::command::RenderBundleDescriptor<'a>, + ), + CreateRenderBundleError(id::RenderBundleId, wgc::Label<'a>), + CreateCommandEncoder( + id::CommandEncoderId, + wgt::CommandEncoderDescriptor<wgc::Label<'a>>, + ), + Error { + message: String, + r#type: ErrorBufferType, + }, +} + +#[derive(serde::Serialize, serde::Deserialize)] +enum QueueWriteAction { + Buffer { + dst: id::BufferId, + offset: wgt::BufferAddress, + }, + Texture { + dst: wgt::ImageCopyTexture<id::TextureId>, + layout: wgt::ImageDataLayout, + size: wgt::Extent3d, + }, +} + +#[derive(serde::Serialize, serde::Deserialize)] +enum TextureAction<'a> { + CreateView(id::TextureViewId, wgc::resource::TextureViewDescriptor<'a>), +} + +#[repr(C)] +#[derive(serde::Serialize, serde::Deserialize)] +enum DropAction { + Adapter(id::AdapterId), + Device(id::DeviceId), + ShaderModule(id::ShaderModuleId), + PipelineLayout(id::PipelineLayoutId), + BindGroupLayout(id::BindGroupLayoutId), + BindGroup(id::BindGroupId), + CommandBuffer(id::CommandBufferId), + RenderBundle(id::RenderBundleId), + RenderPipeline(id::RenderPipelineId), + ComputePipeline(id::ComputePipelineId), + Buffer(id::BufferId), + Texture(id::TextureId), + TextureView(id::TextureViewId), + Sampler(id::SamplerId), +} + +impl DropAction { + // helper function to construct byte bufs + fn to_byte_buf(&self) -> ByteBuf { + let mut data = Vec::new(); + bincode::serialize_into(&mut data, self).unwrap(); + ByteBuf::from_vec(data) + } +} + +#[repr(C)] +pub struct ImageDataLayout<'a> { + pub offset: wgt::BufferAddress, + pub bytes_per_row: Option<&'a u32>, + pub rows_per_image: Option<&'a u32>, +} + +impl<'a> ImageDataLayout<'a> { + fn into_wgt(&self) -> wgt::ImageDataLayout { + wgt::ImageDataLayout { + offset: self.offset, + bytes_per_row: self.bytes_per_row.map(|bpr| *bpr), + rows_per_image: self.rows_per_image.map(|rpi| *rpi), + } + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct SwapChainId(pub u64); diff --git a/gfx/wgpu_bindings/src/server.rs b/gfx/wgpu_bindings/src/server.rs new file mode 100644 index 0000000000..7a7e08aa30 --- /dev/null +++ b/gfx/wgpu_bindings/src/server.rs @@ -0,0 +1,1345 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use crate::{ + error::{ErrMsg, ErrorBuffer, ErrorBufferType}, + wgpu_string, AdapterInformation, ByteBuf, CommandEncoderAction, DeviceAction, DropAction, + QueueWriteAction, SwapChainId, TextureAction, +}; + +use nsstring::{nsACString, nsCString, nsString}; + +use wgc::{device::DeviceError, gfx_select, id}; +use wgc::{pipeline::CreateShaderModuleError, resource::BufferAccessError}; +#[allow(unused_imports)] +use wgh::Instance; + +use std::borrow::Cow; +#[allow(unused_imports)] +use std::mem; +use std::os::raw::c_void; +use std::ptr; +use std::slice; +use std::sync::atomic::{AtomicU32, Ordering}; + +use std::ffi::{c_long, c_ulong}; +#[cfg(target_os = "windows")] +use winapi::shared::dxgi; +#[cfg(target_os = "windows")] +use winapi::um::d3d12 as d3d12_ty; +#[cfg(target_os = "windows")] +use winapi::um::winnt::GENERIC_ALL; +#[cfg(target_os = "windows")] +use winapi::Interface; + +// The seemingly redundant u64 suffixes help cbindgen with generating the right C++ code. +// See https://github.com/mozilla/cbindgen/issues/849. + +/// We limit the size of buffer allocations for stability reason. +/// We can reconsider this limit in the future. Note that some drivers (mesa for example), +/// have issues when the size of a buffer, mapping or copy command does not fit into a +/// signed 32 bits integer, so beyond a certain size, large allocations will need some form +/// of driver allow/blocklist. +pub const MAX_BUFFER_SIZE: wgt::BufferAddress = 1u64 << 30u64; +const MAX_BUFFER_SIZE_U32: u32 = MAX_BUFFER_SIZE as u32; + +// Mesa has issues with height/depth that don't fit in a 16 bits signed integers. +const MAX_TEXTURE_EXTENT: u32 = std::i16::MAX as u32; +// We have to restrict the number of bindings for any given resource type so that +// the sum of these limits multiplied by the number of shader stages fits +// maxBindingsPerBindGroup (1000). This restriction is arbitrary and is likely to +// change eventually. See github.com/gpuweb/gpuweb/pull/4484 +// For now it's impractical for users to have very large numbers of bindings so this +// limit should not be too restrictive until we add support for a bindless API. +// Then we may have to ignore the spec or get it changed. +const MAX_BINDINGS_PER_RESOURCE_TYPE: u32 = 64; + +fn restrict_limits(limits: wgt::Limits) -> wgt::Limits { + wgt::Limits { + max_buffer_size: limits.max_buffer_size.min(MAX_BUFFER_SIZE), + max_texture_dimension_1d: limits.max_texture_dimension_1d.min(MAX_TEXTURE_EXTENT), + max_texture_dimension_2d: limits.max_texture_dimension_2d.min(MAX_TEXTURE_EXTENT), + max_texture_dimension_3d: limits.max_texture_dimension_3d.min(MAX_TEXTURE_EXTENT), + max_sampled_textures_per_shader_stage: limits + .max_sampled_textures_per_shader_stage + .min(MAX_BINDINGS_PER_RESOURCE_TYPE), + max_samplers_per_shader_stage: limits + .max_samplers_per_shader_stage + .min(MAX_BINDINGS_PER_RESOURCE_TYPE), + max_storage_textures_per_shader_stage: limits + .max_storage_textures_per_shader_stage + .min(MAX_BINDINGS_PER_RESOURCE_TYPE), + max_uniform_buffers_per_shader_stage: limits + .max_uniform_buffers_per_shader_stage + .min(MAX_BINDINGS_PER_RESOURCE_TYPE), + max_storage_buffers_per_shader_stage: limits + .max_storage_buffers_per_shader_stage + .min(MAX_BINDINGS_PER_RESOURCE_TYPE), + max_uniform_buffer_binding_size: limits + .max_uniform_buffer_binding_size + .min(MAX_BUFFER_SIZE_U32), + max_storage_buffer_binding_size: limits + .max_storage_buffer_binding_size + .min(MAX_BUFFER_SIZE_U32), + max_non_sampler_bindings: 10_000, + ..limits + } +} + +// hide wgc's global in private +pub struct Global { + global: wgc::global::Global, + #[allow(dead_code)] + owner: *mut c_void, +} + +impl std::ops::Deref for Global { + type Target = wgc::global::Global; + fn deref(&self) -> &Self::Target { + &self.global + } +} + +#[no_mangle] +pub extern "C" fn wgpu_server_new(owner: *mut c_void) -> *mut Global { + log::info!("Initializing WGPU server"); + let backends_pref = static_prefs::pref!("dom.webgpu.wgpu-backend").to_string(); + let backends = if backends_pref.is_empty() { + #[cfg(windows)] + { + wgt::Backends::DX12 + } + #[cfg(not(windows))] + { + wgt::Backends::PRIMARY + } + } else { + log::info!( + "Selecting backends based on dom.webgpu.wgpu-backend pref: {:?}", + backends_pref + ); + wgc::instance::parse_backends_from_comma_list(&backends_pref) + }; + + let mut instance_flags = wgt::InstanceFlags::from_build_config().with_env(); + if !static_prefs::pref!("dom.webgpu.hal-labels") { + instance_flags.insert(wgt::InstanceFlags::DISCARD_HAL_LABELS); + } + + let global = wgc::global::Global::new( + "wgpu", + wgt::InstanceDescriptor { + backends, + flags: instance_flags, + dx12_shader_compiler: wgt::Dx12Compiler::Fxc, + gles_minor_version: wgt::Gles3MinorVersion::Automatic, + }, + ); + let global = Global { global, owner }; + Box::into_raw(Box::new(global)) +} + +/// # Safety +/// +/// This function is unsafe because improper use may lead to memory +/// problems. For example, a double-free may occur if the function is called +/// twice on the same raw pointer. +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_delete(global: *mut Global) { + log::info!("Terminating WGPU server"); + let _ = Box::from_raw(global); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_poll_all_devices(global: &Global, force_wait: bool) { + global.poll_all_devices(force_wait).unwrap(); +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct FfiLUID { + low_part: c_ulong, + high_part: c_long, +} + +/// Request an adapter according to the specified options. +/// Provide the list of IDs to pick from. +/// +/// Returns the index in this list, or -1 if unable to pick. +/// +/// # Safety +/// +/// This function is unsafe as there is no guarantee that the given pointer is +/// valid for `id_length` elements. +#[allow(unused_variables)] +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_instance_request_adapter( + global: &Global, + desc: &wgc::instance::RequestAdapterOptions, + ids: *const id::AdapterId, + id_length: usize, + adapter_luid: Option<&FfiLUID>, + mut error_buf: ErrorBuffer, +) -> i8 { + let ids = slice::from_raw_parts(ids, id_length); + + // Prefer to use the dx12 backend, if one exists, and use the same DXGI adapter as WebRender. + // If wgpu uses a different adapter than WebRender, textures created by + // webgpu::ExternalTexture do not work with wgpu. + #[cfg(target_os = "windows")] + if global.global.instance.dx12.is_some() && adapter_luid.is_some() { + let hal = global.global.instance_as_hal::<wgc::api::Dx12>().unwrap(); + for adapter in hal.enumerate_adapters() { + let raw_adapter = adapter.adapter.raw_adapter(); + let mut desc: dxgi::DXGI_ADAPTER_DESC = unsafe { mem::zeroed() }; + unsafe { + raw_adapter.GetDesc(&mut desc); + } + let id = ids + .iter() + .find_map(|id| (id.backend() == wgt::Backend::Dx12).then_some(id)); + if id.is_some() + && desc.AdapterLuid.LowPart == adapter_luid.unwrap().low_part + && desc.AdapterLuid.HighPart == adapter_luid.unwrap().high_part + { + let adapter_id = global + .create_adapter_from_hal::<wgh::api::Dx12>(adapter, Some(id.unwrap().clone())); + return ids.iter().position(|&i| i == adapter_id).unwrap() as i8; + } + } + error_buf.init(ErrMsg { + message: "Failed to create adapter for dx12", + r#type: ErrorBufferType::Internal, + }); + return -1; + } + + match global.request_adapter(desc, wgc::instance::AdapterInputs::IdSet(ids)) { + Ok(id) => ids.iter().position(|&i| i == id).unwrap() as i8, + Err(e) => { + error_buf.init(e); + -1 + } + } +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_adapter_pack_info( + global: &Global, + self_id: Option<id::AdapterId>, + byte_buf: &mut ByteBuf, +) { + let mut data = Vec::new(); + match self_id { + Some(id) => { + let wgt::AdapterInfo { + name, + vendor, + device, + device_type, + driver, + driver_info, + backend, + } = gfx_select!(id => global.adapter_get_info(id)).unwrap(); + + if static_prefs::pref!("dom.webgpu.testing.assert-hardware-adapter") { + let is_hardware = match device_type { + wgt::DeviceType::IntegratedGpu | wgt::DeviceType::DiscreteGpu => true, + _ => false, + }; + assert!( + is_hardware, + "Expected a hardware gpu adapter, got {:?}", + device_type + ); + } + + let info = AdapterInformation { + id, + limits: restrict_limits(gfx_select!(id => global.adapter_limits(id)).unwrap()), + features: gfx_select!(id => global.adapter_features(id)).unwrap(), + name, + vendor, + device, + device_type, + driver, + driver_info, + backend, + }; + bincode::serialize_into(&mut data, &info).unwrap(); + } + None => { + bincode::serialize_into(&mut data, &0u64).unwrap(); + } + } + *byte_buf = ByteBuf::from_vec(data); +} + +static TRACE_IDX: AtomicU32 = AtomicU32::new(0); + +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_adapter_request_device( + global: &Global, + self_id: id::AdapterId, + byte_buf: &ByteBuf, + new_id: id::DeviceId, + mut error_buf: ErrorBuffer, +) { + let desc: wgc::device::DeviceDescriptor = bincode::deserialize(byte_buf.as_slice()).unwrap(); + let trace_string = std::env::var("WGPU_TRACE").ok().map(|s| { + let idx = TRACE_IDX.fetch_add(1, Ordering::Relaxed); + let path = format!("{}/{}/", s, idx); + + if std::fs::create_dir_all(&path).is_err() { + log::warn!("Failed to create directory {:?} for wgpu recording.", path); + } + + path + }); + let trace_path = trace_string + .as_ref() + .map(|string| std::path::Path::new(string.as_str())); + // TODO: in https://github.com/gfx-rs/wgpu/pull/3626/files#diff-033343814319f5a6bd781494692ea626f06f6c3acc0753a12c867b53a646c34eR97 + // which introduced the queue id parameter, the queue id is also the device id. I don't know how applicable this is to + // other situations (this one in particular). + let (_, _, error) = gfx_select!(self_id => global.adapter_request_device(self_id, &desc, trace_path, Some(new_id), Some(new_id.transmute()))); + if let Some(err) = error { + error_buf.init(err); + } +} + +#[no_mangle] +pub extern "C" fn wgpu_server_adapter_drop(global: &Global, adapter_id: id::AdapterId) { + gfx_select!(adapter_id => global.adapter_drop(adapter_id)) +} + +#[no_mangle] +pub extern "C" fn wgpu_server_device_destroy(global: &Global, self_id: id::DeviceId) { + gfx_select!(self_id => global.device_destroy(self_id)) +} + +#[no_mangle] +pub extern "C" fn wgpu_server_device_drop(global: &Global, self_id: id::DeviceId) { + gfx_select!(self_id => global.device_drop(self_id)) +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_set_device_lost_callback( + global: &Global, + self_id: id::DeviceId, + callback: wgc::device::DeviceLostClosureC, +) { + gfx_select!(self_id => global.device_set_device_lost_closure(self_id, wgc::device::DeviceLostClosure::from_c(callback))); +} + +impl ShaderModuleCompilationMessage { + fn set_error(&mut self, error: &CreateShaderModuleError, source: &str) { + // The WebGPU spec says that if the message doesn't point to a particular position in + // the source, the line number, position, offset and lengths should be zero. + self.line_number = 0; + self.line_pos = 0; + self.utf16_offset = 0; + self.utf16_length = 0; + + if let Some(location) = error.location(source) { + self.line_number = location.line_number as u64; + self.line_pos = location.line_position as u64; + + let start = location.offset as usize; + let end = start + location.length as usize; + self.utf16_offset = source[0..start].chars().map(|c| c.len_utf16() as u64).sum(); + self.utf16_length = source[start..end] + .chars() + .map(|c| c.len_utf16() as u64) + .sum(); + } + + let error_string = error.to_string(); + + if !error_string.is_empty() { + self.message = nsString::from(&error_string[..]); + } + } +} + +/// A compilation message representation for the ffi boundary. +/// the message is immediately copied into an equivalent C++ +/// structure that owns its strings. +#[repr(C)] +#[derive(Clone)] +pub struct ShaderModuleCompilationMessage { + pub line_number: u64, + pub line_pos: u64, + pub utf16_offset: u64, + pub utf16_length: u64, + pub message: nsString, +} + +/// Creates a shader module and returns an object describing the errors if any. +/// +/// If there was no error, the returned pointer is nil. +#[no_mangle] +pub extern "C" fn wgpu_server_device_create_shader_module( + global: &Global, + self_id: id::DeviceId, + module_id: id::ShaderModuleId, + label: Option<&nsACString>, + code: &nsCString, + out_message: &mut ShaderModuleCompilationMessage, + mut error_buf: ErrorBuffer, +) -> bool { + let utf8_label = label.map(|utf16| utf16.to_string()); + let label = utf8_label.as_ref().map(|s| Cow::from(&s[..])); + + let source_str = code.to_utf8(); + + let source = wgc::pipeline::ShaderModuleSource::Wgsl(Cow::from(&source_str[..])); + + let desc = wgc::pipeline::ShaderModuleDescriptor { + label, + shader_bound_checks: wgt::ShaderBoundChecks::new(), + }; + + let (_, error) = gfx_select!( + self_id => global.device_create_shader_module( + self_id, &desc, source, Some(module_id) + ) + ); + + if let Some(err) = error { + out_message.set_error(&err, &source_str[..]); + let err_type = match &err { + CreateShaderModuleError::Device(DeviceError::OutOfMemory) => ErrorBufferType::OutOfMemory, + CreateShaderModuleError::Device(DeviceError::Lost) => ErrorBufferType::DeviceLost, + _ => ErrorBufferType::Validation, + }; + + // Per spec: "User agents should not include detailed compiler error messages or + // shader text in the message text of validation errors arising here: these details + // are accessible via getCompilationInfo()" + let message = match &err { + CreateShaderModuleError::Parsing(_) => "Parsing error".to_string(), + CreateShaderModuleError::Validation(_) => "Shader validation error".to_string(), + CreateShaderModuleError::Device(device_err) => format!("{device_err:?}"), + _ => format!("{err:?}"), + }; + + error_buf.init(ErrMsg { + message: &format!("Shader module creation failed: {message}"), + r#type: err_type, + }); + return false; + } + + // Avoid allocating the structure that holds errors in the common case (no errors). + return true; +} + +#[no_mangle] +pub extern "C" fn wgpu_server_device_create_buffer( + global: &Global, + self_id: id::DeviceId, + buffer_id: id::BufferId, + label: Option<&nsACString>, + size: wgt::BufferAddress, + usage: u32, + mapped_at_creation: bool, + shm_allocation_failed: bool, + mut error_buf: ErrorBuffer, +) { + let utf8_label = label.map(|utf16| utf16.to_string()); + let label = utf8_label.as_ref().map(|s| Cow::from(&s[..])); + let usage = wgt::BufferUsages::from_bits_retain(usage); + + // Don't trust the graphics driver with buffer sizes larger than our conservative max texture size. + if shm_allocation_failed || size > MAX_BUFFER_SIZE { + error_buf.init(ErrMsg { + message: "Out of memory", + r#type: ErrorBufferType::OutOfMemory, + }); + gfx_select!(self_id => global.create_buffer_error(Some(buffer_id), label)); + return; + } + + let desc = wgc::resource::BufferDescriptor { + label, + size, + usage, + mapped_at_creation, + }; + let (_, error) = + gfx_select!(self_id => global.device_create_buffer(self_id, &desc, Some(buffer_id))); + if let Some(err) = error { + error_buf.init(err); + } +} + +/// # Safety +/// +/// Callers are responsible for ensuring `callback` is well-formed. +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_buffer_map( + global: &Global, + buffer_id: id::BufferId, + start: wgt::BufferAddress, + size: wgt::BufferAddress, + map_mode: wgc::device::HostMap, + callback: wgc::resource::BufferMapCallbackC, + mut error_buf: ErrorBuffer, +) { + let callback = wgc::resource::BufferMapCallback::from_c(callback); + let operation = wgc::resource::BufferMapOperation { + host: map_mode, + callback: Some(callback), + }; + // All errors are also exposed to the mapping callback, so we handle them there and ignore + // the returned value of buffer_map_async. + let result = gfx_select!(buffer_id => global.buffer_map_async( + buffer_id, + start .. start + size, + operation + )); + + if let Err(error) = result { + error_buf.init(error); + } +} + +#[repr(C)] +pub struct MappedBufferSlice { + pub ptr: *mut u8, + pub length: u64, +} + +/// # Safety +/// +/// This function is unsafe as there is no guarantee that the given pointer is +/// valid for `size` elements. +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_buffer_get_mapped_range( + global: &Global, + buffer_id: id::BufferId, + start: wgt::BufferAddress, + size: wgt::BufferAddress, + mut error_buf: ErrorBuffer, +) -> MappedBufferSlice { + let result = gfx_select!(buffer_id => global.buffer_get_mapped_range( + buffer_id, + start, + Some(size) + )); + + let (ptr, length) = result.unwrap_or_else(|error| { + error_buf.init(error); + (std::ptr::null_mut(), 0) + }); + MappedBufferSlice { ptr, length } +} + +#[no_mangle] +pub extern "C" fn wgpu_server_buffer_unmap( + global: &Global, + buffer_id: id::BufferId, + mut error_buf: ErrorBuffer, +) { + if let Err(e) = gfx_select!(buffer_id => global.buffer_unmap(buffer_id)) { + match e { + // NOTE: This is presumed by CTS test cases, and was even formally specified in the + // WebGPU spec. previously, but this doesn't seem formally specified now. :confused: + // + // TODO: upstream this; see <https://bugzilla.mozilla.org/show_bug.cgi?id=1842297>. + BufferAccessError::Invalid => (), + other => error_buf.init(other), + } + } +} + +#[no_mangle] +pub extern "C" fn wgpu_server_buffer_destroy(global: &Global, self_id: id::BufferId) { + // Per spec, there is no need for the buffer or even device to be in a valid state, + // even calling calling destroy multiple times is fine, so no error to push into + // an error scope. + let _ = gfx_select!(self_id => global.buffer_destroy(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_buffer_drop(global: &Global, self_id: id::BufferId) { + gfx_select!(self_id => global.buffer_drop(self_id, false)); +} + +#[allow(unused_variables)] +#[no_mangle] +pub extern "C" fn wgpu_server_get_device_fence_handle( + global: &Global, + device_id: id::DeviceId, +) -> *mut c_void { + assert!(device_id.backend() == wgt::Backend::Dx12); + + #[cfg(target_os = "windows")] + if device_id.backend() == wgt::Backend::Dx12 { + let mut handle = ptr::null_mut(); + let dx12_device = unsafe { + global.device_as_hal::<wgc::api::Dx12, _, Option<d3d12::Device>>(device_id, |hal_device| { + hal_device.map(|device| device.raw_device().clone()) + }) + }; + let dx12_device = match dx12_device { + Some(device) => device, + None => { + return ptr::null_mut(); + } + }; + + let dx12_fence = unsafe { + global.device_fence_as_hal::<wgc::api::Dx12, _, Option<d3d12::Fence>>(device_id, |hal_fence| { + hal_fence.map(|fence| fence.raw_fence().clone()) + }) + }; + let dx12_fence = match dx12_fence { + Some(fence) => fence, + None => { + return ptr::null_mut(); + } + }; + + let hr = unsafe { + dx12_device.CreateSharedHandle( + dx12_fence.as_mut_ptr() as *mut winapi::um::d3d12::ID3D12DeviceChild, + std::ptr::null(), + GENERIC_ALL, + std::ptr::null(), + &mut handle, + ) + }; + + if hr != 0 { + return ptr::null_mut(); + } + + return handle; + } + ptr::null_mut() +} + +extern "C" { + #[allow(dead_code)] + fn wgpu_server_use_external_texture_for_swap_chain( + param: *mut c_void, + swap_chain_id: SwapChainId, + ) -> bool; + #[allow(dead_code)] + fn wgpu_server_ensure_external_texture_for_swap_chain( + param: *mut c_void, + swap_chain_id: SwapChainId, + device_id: id::DeviceId, + texture_id: id::TextureId, + width: u32, + height: u32, + format: wgt::TextureFormat, + usage: wgt::TextureUsages, + ) -> bool; + #[allow(dead_code)] + fn wgpu_server_get_external_texture_handle( + param: *mut c_void, + id: id::TextureId, + ) -> *mut c_void; +} + +impl Global { + fn device_action<A: wgc::hal_api::HalApi>( + &self, + self_id: id::DeviceId, + action: DeviceAction, + mut error_buf: ErrorBuffer, + ) { + match action { + #[allow(unused_variables)] + DeviceAction::CreateTexture(id, desc, swap_chain_id) => { + let max = MAX_TEXTURE_EXTENT; + if desc.size.width > max + || desc.size.height > max + || desc.size.depth_or_array_layers > max + { + gfx_select!(self_id => self.create_texture_error(Some(id), desc.label)); + error_buf.init(ErrMsg { + message: "Out of memory", + r#type: ErrorBufferType::OutOfMemory, + }); + return; + } + + #[cfg(target_os = "windows")] + { + let use_external_texture = if swap_chain_id.is_some() { + unsafe { + wgpu_server_use_external_texture_for_swap_chain( + self.owner, + swap_chain_id.unwrap(), + ) + } + } else { + false + }; + + if use_external_texture && self_id.backend() == wgt::Backend::Dx12 { + let ret = unsafe { + wgpu_server_ensure_external_texture_for_swap_chain( + self.owner, + swap_chain_id.unwrap(), + self_id, + id, + desc.size.width, + desc.size.height, + desc.format, + desc.usage, + ) + }; + if ret != true { + error_buf.init(ErrMsg { + message: "Failed to create external texture", + r#type: ErrorBufferType::Internal, + }); + return; + } + + let dx12_device = unsafe { + self.device_as_hal::<wgc::api::Dx12, _, d3d12::Device>( + self_id, + |hal_device| hal_device.unwrap().raw_device().clone(), + ) + }; + + let handle = + unsafe { wgpu_server_get_external_texture_handle(self.owner, id) }; + if handle.is_null() { + error_buf.init(ErrMsg { + message: "Failed to get external texture handle", + r#type: ErrorBufferType::Internal, + }); + } + let mut resource = d3d12::Resource::null(); + let hr = unsafe { + dx12_device.OpenSharedHandle( + handle, + &d3d12_ty::ID3D12Resource::uuidof(), + resource.mut_void(), + ) + }; + if hr != 0 { + error_buf.init(ErrMsg { + message: "Failed to open shared handle", + r#type: ErrorBufferType::Internal, + }); + } + + let hal_texture = unsafe { + <wgh::api::Dx12 as wgh::Api>::Device::texture_from_raw( + resource, + wgt::TextureFormat::Bgra8Unorm, + wgt::TextureDimension::D2, + desc.size, + 1, + 1, + ) + }; + let (_, error) = unsafe { + self.create_texture_from_hal::<wgh::api::Dx12>( + hal_texture, + self_id, + &desc, + Some(id), + ) + }; + if let Some(err) = error { + error_buf.init(err); + } + return; + } + } + + let (_, error) = self.device_create_texture::<A>(self_id, &desc, Some(id)); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::CreateSampler(id, desc) => { + let (_, error) = self.device_create_sampler::<A>(self_id, &desc, Some(id)); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::CreateBindGroupLayout(id, desc) => { + let (_, error) = + self.device_create_bind_group_layout::<A>(self_id, &desc, Some(id)); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::RenderPipelineGetBindGroupLayout(pipeline_id, index, bgl_id) => { + let (_, error) = self.render_pipeline_get_bind_group_layout::<A>( + pipeline_id, + index, + Some(bgl_id), + ); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::ComputePipelineGetBindGroupLayout(pipeline_id, index, bgl_id) => { + let (_, error) = self.compute_pipeline_get_bind_group_layout::<A>( + pipeline_id, + index, + Some(bgl_id), + ); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::CreatePipelineLayout(id, desc) => { + let (_, error) = self.device_create_pipeline_layout::<A>(self_id, &desc, Some(id)); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::CreateBindGroup(id, desc) => { + let (_, error) = self.device_create_bind_group::<A>(self_id, &desc, Some(id)); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::CreateShaderModule(id, desc, code) => { + let source = wgc::pipeline::ShaderModuleSource::Wgsl(code); + let (_, error) = + self.device_create_shader_module::<A>(self_id, &desc, source, Some(id)); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::CreateComputePipeline(id, desc, implicit) => { + let implicit_ids = implicit + .as_ref() + .map(|imp| wgc::device::ImplicitPipelineIds { + root_id: Some(imp.pipeline), + group_ids: &imp.bind_groups, + }); + let (_, error) = self.device_create_compute_pipeline::<A>( + self_id, + &desc, + Some(id), + implicit_ids, + ); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::CreateRenderPipeline(id, desc, implicit) => { + let implicit_ids = implicit + .as_ref() + .map(|imp| wgc::device::ImplicitPipelineIds { + root_id: Some(imp.pipeline), + group_ids: &imp.bind_groups, + }); + let (_, error) = + self.device_create_render_pipeline::<A>(self_id, &desc, Some(id), implicit_ids); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::CreateRenderBundle(id, encoder, desc) => { + let (_, error) = self.render_bundle_encoder_finish::<A>(encoder, &desc, Some(id)); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::CreateRenderBundleError(buffer_id, label) => { + self.create_render_bundle_error::<A>(Some(buffer_id), label); + } + DeviceAction::CreateCommandEncoder(id, desc) => { + let (_, error) = self.device_create_command_encoder::<A>(self_id, &desc, Some(id)); + if let Some(err) = error { + error_buf.init(err); + } + } + DeviceAction::Error { message, r#type } => { + error_buf.init(ErrMsg { + message: &message, + r#type, + }); + } + } + } + + fn texture_action<A: wgc::hal_api::HalApi>( + &self, + self_id: id::TextureId, + action: TextureAction, + mut error_buf: ErrorBuffer, + ) { + match action { + TextureAction::CreateView(id, desc) => { + let (_, error) = self.texture_create_view::<A>(self_id, &desc, Some(id)); + if let Some(err) = error { + error_buf.init(err); + } + } + } + } + + fn command_encoder_action<A: wgc::hal_api::HalApi>( + &self, + self_id: id::CommandEncoderId, + action: CommandEncoderAction, + mut error_buf: ErrorBuffer, + ) { + match action { + CommandEncoderAction::CopyBufferToBuffer { + src, + src_offset, + dst, + dst_offset, + size, + } => { + if let Err(err) = self.command_encoder_copy_buffer_to_buffer::<A>( + self_id, src, src_offset, dst, dst_offset, size, + ) { + error_buf.init(err); + } + } + CommandEncoderAction::CopyBufferToTexture { src, dst, size } => { + if let Err(err) = + self.command_encoder_copy_buffer_to_texture::<A>(self_id, &src, &dst, &size) + { + error_buf.init(err); + } + } + CommandEncoderAction::CopyTextureToBuffer { src, dst, size } => { + if let Err(err) = + self.command_encoder_copy_texture_to_buffer::<A>(self_id, &src, &dst, &size) + { + error_buf.init(err); + } + } + CommandEncoderAction::CopyTextureToTexture { src, dst, size } => { + if let Err(err) = + self.command_encoder_copy_texture_to_texture::<A>(self_id, &src, &dst, &size) + { + error_buf.init(err); + } + } + CommandEncoderAction::RunComputePass { + base, + timestamp_writes, + } => { + if let Err(err) = self.command_encoder_run_compute_pass_impl::<A>( + self_id, + base.as_ref(), + timestamp_writes.as_ref(), + ) { + error_buf.init(err); + } + } + CommandEncoderAction::WriteTimestamp { + query_set_id, + query_index, + } => { + if let Err(err) = + self.command_encoder_write_timestamp::<A>(self_id, query_set_id, query_index) + { + error_buf.init(err); + } + } + CommandEncoderAction::ResolveQuerySet { + query_set_id, + start_query, + query_count, + destination, + destination_offset, + } => { + if let Err(err) = self.command_encoder_resolve_query_set::<A>( + self_id, + query_set_id, + start_query, + query_count, + destination, + destination_offset, + ) { + error_buf.init(err); + } + } + CommandEncoderAction::RunRenderPass { + base, + target_colors, + target_depth_stencil, + timestamp_writes, + occlusion_query_set_id, + } => { + if let Err(err) = self.command_encoder_run_render_pass_impl::<A>( + self_id, + base.as_ref(), + &target_colors, + target_depth_stencil.as_ref(), + timestamp_writes.as_ref(), + occlusion_query_set_id, + ) { + error_buf.init(err); + } + } + CommandEncoderAction::ClearBuffer { dst, offset, size } => { + if let Err(err) = self.command_encoder_clear_buffer::<A>(self_id, dst, offset, size) + { + error_buf.init(err); + } + } + CommandEncoderAction::ClearTexture { + dst, + ref subresource_range, + } => { + if let Err(err) = + self.command_encoder_clear_texture::<A>(self_id, dst, subresource_range) + { + error_buf.init(err); + } + } + CommandEncoderAction::PushDebugGroup(marker) => { + if let Err(err) = self.command_encoder_push_debug_group::<A>(self_id, &marker) { + error_buf.init(err); + } + } + CommandEncoderAction::PopDebugGroup => { + if let Err(err) = self.command_encoder_pop_debug_group::<A>(self_id) { + error_buf.init(err); + } + } + CommandEncoderAction::InsertDebugMarker(marker) => { + if let Err(err) = self.command_encoder_insert_debug_marker::<A>(self_id, &marker) { + error_buf.init(err); + } + } + } + } +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_device_action( + global: &Global, + self_id: id::DeviceId, + byte_buf: &ByteBuf, + error_buf: ErrorBuffer, +) { + let action = bincode::deserialize(byte_buf.as_slice()).unwrap(); + gfx_select!(self_id => global.device_action(self_id, action, error_buf)); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_texture_action( + global: &Global, + self_id: id::TextureId, + byte_buf: &ByteBuf, + error_buf: ErrorBuffer, +) { + let action = bincode::deserialize(byte_buf.as_slice()).unwrap(); + gfx_select!(self_id => global.texture_action(self_id, action, error_buf)); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_command_encoder_action( + global: &Global, + self_id: id::CommandEncoderId, + byte_buf: &ByteBuf, + error_buf: ErrorBuffer, +) { + let action = bincode::deserialize(byte_buf.as_slice()).unwrap(); + gfx_select!(self_id => global.command_encoder_action(self_id, action, error_buf)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_device_create_encoder( + global: &Global, + self_id: id::DeviceId, + desc: &wgt::CommandEncoderDescriptor<Option<&nsACString>>, + new_id: id::CommandEncoderId, + mut error_buf: ErrorBuffer, +) { + let utf8_label = desc.label.map(|utf16| utf16.to_string()); + let label = utf8_label.as_ref().map(|s| Cow::from(&s[..])); + + let desc = desc.map_label(|_| label); + let (_, error) = + gfx_select!(self_id => global.device_create_command_encoder(self_id, &desc, Some(new_id))); + if let Some(err) = error { + error_buf.init(err); + } +} + +#[no_mangle] +pub extern "C" fn wgpu_server_encoder_finish( + global: &Global, + self_id: id::CommandEncoderId, + desc: &wgt::CommandBufferDescriptor<Option<&nsACString>>, + mut error_buf: ErrorBuffer, +) { + let label = wgpu_string(desc.label); + let desc = desc.map_label(|_| label); + let (_, error) = gfx_select!(self_id => global.command_encoder_finish(self_id, &desc)); + if let Some(err) = error { + error_buf.init(err); + } +} + +#[no_mangle] +pub extern "C" fn wgpu_server_encoder_drop(global: &Global, self_id: id::CommandEncoderId) { + gfx_select!(self_id => global.command_encoder_drop(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_render_bundle_drop(global: &Global, self_id: id::RenderBundleId) { + gfx_select!(self_id => global.render_bundle_drop(self_id)); +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_encoder_copy_texture_to_buffer( + global: &Global, + self_id: id::CommandEncoderId, + source: &wgc::command::ImageCopyTexture, + dst_buffer: wgc::id::BufferId, + dst_layout: &crate::ImageDataLayout, + size: &wgt::Extent3d, + mut error_buf: ErrorBuffer, +) { + let destination = wgc::command::ImageCopyBuffer { + buffer: dst_buffer, + layout: dst_layout.into_wgt(), + }; + if let Err(err) = gfx_select!(self_id => global.command_encoder_copy_texture_to_buffer(self_id, source, &destination, size)) + { + error_buf.init(err); + } +} + +/// # Safety +/// +/// This function is unsafe as there is no guarantee that the given pointer is +/// valid for `command_buffer_id_length` elements. +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_queue_submit( + global: &Global, + self_id: id::QueueId, + command_buffer_ids: *const id::CommandBufferId, + command_buffer_id_length: usize, + mut error_buf: ErrorBuffer, +) -> u64 { + let command_buffers = slice::from_raw_parts(command_buffer_ids, command_buffer_id_length); + let result = gfx_select!(self_id => global.queue_submit(self_id, command_buffers)); + + match result { + Err(err) => { + error_buf.init(err); + return 0; + } + Ok(wrapped_index) => wrapped_index.index, + } +} + +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_on_submitted_work_done( + global: &Global, + self_id: id::QueueId, + callback: wgc::device::queue::SubmittedWorkDoneClosureC, +) { + gfx_select!(self_id => global.queue_on_submitted_work_done(self_id, wgc::device::queue::SubmittedWorkDoneClosure::from_c(callback))).unwrap(); +} + +/// # Safety +/// +/// This function is unsafe as there is no guarantee that the given pointer is +/// valid for `data_length` elements. +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_queue_write_action( + global: &Global, + self_id: id::QueueId, + byte_buf: &ByteBuf, + data: *const u8, + data_length: usize, + mut error_buf: ErrorBuffer, +) { + let action: QueueWriteAction = bincode::deserialize(byte_buf.as_slice()).unwrap(); + let data = slice::from_raw_parts(data, data_length); + let result = match action { + QueueWriteAction::Buffer { dst, offset } => { + gfx_select!(self_id => global.queue_write_buffer(self_id, dst, offset, data)) + } + QueueWriteAction::Texture { dst, layout, size } => { + gfx_select!(self_id => global.queue_write_texture(self_id, &dst, data, &layout, &size)) + } + }; + if let Err(err) = result { + error_buf.init(err); + } +} + +#[no_mangle] +pub extern "C" fn wgpu_server_bind_group_layout_drop( + global: &Global, + self_id: id::BindGroupLayoutId, +) { + gfx_select!(self_id => global.bind_group_layout_drop(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_pipeline_layout_drop(global: &Global, self_id: id::PipelineLayoutId) { + gfx_select!(self_id => global.pipeline_layout_drop(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_bind_group_drop(global: &Global, self_id: id::BindGroupId) { + gfx_select!(self_id => global.bind_group_drop(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_shader_module_drop(global: &Global, self_id: id::ShaderModuleId) { + gfx_select!(self_id => global.shader_module_drop(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_compute_pipeline_drop( + global: &Global, + self_id: id::ComputePipelineId, +) { + gfx_select!(self_id => global.compute_pipeline_drop(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_render_pipeline_drop(global: &Global, self_id: id::RenderPipelineId) { + gfx_select!(self_id => global.render_pipeline_drop(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_texture_destroy(global: &Global, self_id: id::TextureId) { + let _ = gfx_select!(self_id => global.texture_destroy(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_texture_drop(global: &Global, self_id: id::TextureId) { + gfx_select!(self_id => global.texture_drop(self_id, false)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_texture_view_drop(global: &Global, self_id: id::TextureViewId) { + gfx_select!(self_id => global.texture_view_drop(self_id, false)).unwrap(); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_sampler_drop(global: &Global, self_id: id::SamplerId) { + gfx_select!(self_id => global.sampler_drop(self_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_compute_pipeline_get_bind_group_layout( + global: &Global, + self_id: id::ComputePipelineId, + index: u32, + assign_id: id::BindGroupLayoutId, + mut error_buf: ErrorBuffer, +) { + let (_, error) = gfx_select!(self_id => global.compute_pipeline_get_bind_group_layout(self_id, index, Some(assign_id))); + if let Some(err) = error { + error_buf.init(err); + } +} + +#[no_mangle] +pub extern "C" fn wgpu_server_render_pipeline_get_bind_group_layout( + global: &Global, + self_id: id::RenderPipelineId, + index: u32, + assign_id: id::BindGroupLayoutId, + mut error_buf: ErrorBuffer, +) { + let (_, error) = gfx_select!(self_id => global.render_pipeline_get_bind_group_layout(self_id, index, Some(assign_id))); + if let Some(err) = error { + error_buf.init(err); + } +} + +/// Encode the freeing of the selected ID into a byte buf. +#[no_mangle] +pub extern "C" fn wgpu_server_adapter_free(id: id::AdapterId, drop_byte_buf: &mut ByteBuf) { + *drop_byte_buf = DropAction::Adapter(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_device_free(id: id::DeviceId, drop_byte_buf: &mut ByteBuf) { + *drop_byte_buf = DropAction::Device(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_shader_module_free( + id: id::ShaderModuleId, + drop_byte_buf: &mut ByteBuf, +) { + *drop_byte_buf = DropAction::ShaderModule(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_pipeline_layout_free( + id: id::PipelineLayoutId, + drop_byte_buf: &mut ByteBuf, +) { + *drop_byte_buf = DropAction::PipelineLayout(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_bind_group_layout_free( + id: id::BindGroupLayoutId, + drop_byte_buf: &mut ByteBuf, +) { + *drop_byte_buf = DropAction::BindGroupLayout(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_bind_group_free(id: id::BindGroupId, drop_byte_buf: &mut ByteBuf) { + *drop_byte_buf = DropAction::BindGroup(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_command_buffer_free( + id: id::CommandBufferId, + drop_byte_buf: &mut ByteBuf, +) { + *drop_byte_buf = DropAction::CommandBuffer(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_render_bundle_free( + id: id::RenderBundleId, + drop_byte_buf: &mut ByteBuf, +) { + *drop_byte_buf = DropAction::RenderBundle(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_render_pipeline_free( + id: id::RenderPipelineId, + drop_byte_buf: &mut ByteBuf, +) { + *drop_byte_buf = DropAction::RenderPipeline(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_compute_pipeline_free( + id: id::ComputePipelineId, + drop_byte_buf: &mut ByteBuf, +) { + *drop_byte_buf = DropAction::ComputePipeline(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_buffer_free(id: id::BufferId, drop_byte_buf: &mut ByteBuf) { + *drop_byte_buf = DropAction::Buffer(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_texture_free(id: id::TextureId, drop_byte_buf: &mut ByteBuf) { + *drop_byte_buf = DropAction::Texture(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_texture_view_free( + id: id::TextureViewId, + drop_byte_buf: &mut ByteBuf, +) { + *drop_byte_buf = DropAction::TextureView(id).to_byte_buf(); +} +#[no_mangle] +pub extern "C" fn wgpu_server_sampler_free(id: id::SamplerId, drop_byte_buf: &mut ByteBuf) { + *drop_byte_buf = DropAction::Sampler(id).to_byte_buf(); +} |