summaryrefslogtreecommitdiffstats
path: root/gfx/wgpu/wgpu-core/src/device
diff options
context:
space:
mode:
Diffstat (limited to 'gfx/wgpu/wgpu-core/src/device')
-rw-r--r--gfx/wgpu/wgpu-core/src/device/alloc.rs294
-rw-r--r--gfx/wgpu/wgpu-core/src/device/descriptor.rs168
-rw-r--r--gfx/wgpu/wgpu-core/src/device/life.rs760
-rw-r--r--gfx/wgpu/wgpu-core/src/device/mod.rs4217
-rw-r--r--gfx/wgpu/wgpu-core/src/device/queue.rs696
-rw-r--r--gfx/wgpu/wgpu-core/src/device/trace.rs192
6 files changed, 6327 insertions, 0 deletions
diff --git a/gfx/wgpu/wgpu-core/src/device/alloc.rs b/gfx/wgpu/wgpu-core/src/device/alloc.rs
new file mode 100644
index 0000000000..893830ace6
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/alloc.rs
@@ -0,0 +1,294 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use super::DeviceError;
+use hal::device::Device;
+use std::{borrow::Cow, fmt, iter, ptr::NonNull, sync::Arc};
+
+pub struct MemoryAllocator<B: hal::Backend>(gpu_alloc::GpuAllocator<Arc<B::Memory>>);
+#[derive(Debug)]
+pub struct MemoryBlock<B: hal::Backend>(gpu_alloc::MemoryBlock<Arc<B::Memory>>);
+struct MemoryDevice<'a, B: hal::Backend>(&'a B::Device);
+
+//TODO: https://github.com/zakarumych/gpu-alloc/issues/9
+impl<B: hal::Backend> fmt::Debug for MemoryAllocator<B> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "MemoryAllocator")
+ }
+}
+
+impl<B: hal::Backend> MemoryAllocator<B> {
+ pub fn new(mem_props: hal::adapter::MemoryProperties, limits: hal::Limits) -> Self {
+ let mem_config = gpu_alloc::Config {
+ dedicated_treshold: 32 << 20,
+ preferred_dedicated_treshold: 8 << 20,
+ transient_dedicated_treshold: 128 << 20,
+ linear_chunk: 128 << 20,
+ minimal_buddy_size: 1 << 10,
+ initial_buddy_dedicated_size: 8 << 20,
+ };
+ let properties = gpu_alloc::DeviceProperties {
+ memory_types: Cow::Owned(
+ mem_props
+ .memory_types
+ .iter()
+ .map(|mt| gpu_alloc::MemoryType {
+ heap: mt.heap_index as u32,
+ props: gpu_alloc::MemoryPropertyFlags::from_bits_truncate(
+ mt.properties.bits() as u8,
+ ),
+ })
+ .collect::<Vec<_>>(),
+ ),
+ memory_heaps: Cow::Owned(
+ mem_props
+ .memory_heaps
+ .iter()
+ .map(|mh| gpu_alloc::MemoryHeap { size: mh.size })
+ .collect::<Vec<_>>(),
+ ),
+ max_memory_allocation_count: if limits.max_memory_allocation_count == 0 {
+ tracing::warn!("max_memory_allocation_count is not set by gfx-rs backend");
+ !0
+ } else {
+ limits.max_memory_allocation_count.min(!0u32 as usize) as u32
+ },
+ max_memory_allocation_size: !0,
+ non_coherent_atom_size: limits.non_coherent_atom_size as u64,
+ buffer_device_address: false,
+ };
+ MemoryAllocator(gpu_alloc::GpuAllocator::new(mem_config, properties))
+ }
+
+ pub fn allocate(
+ &mut self,
+ device: &B::Device,
+ requirements: hal::memory::Requirements,
+ usage: gpu_alloc::UsageFlags,
+ ) -> Result<MemoryBlock<B>, DeviceError> {
+ assert!(requirements.alignment.is_power_of_two());
+ let request = gpu_alloc::Request {
+ size: requirements.size,
+ align_mask: requirements.alignment - 1,
+ memory_types: requirements.type_mask,
+ usage,
+ };
+
+ unsafe { self.0.alloc(&MemoryDevice::<B>(device), request) }
+ .map(MemoryBlock)
+ .map_err(|err| match err {
+ gpu_alloc::AllocationError::OutOfHostMemory
+ | gpu_alloc::AllocationError::OutOfDeviceMemory => DeviceError::OutOfMemory,
+ _ => panic!("Unable to allocate memory: {:?}", err),
+ })
+ }
+
+ pub fn free(&mut self, device: &B::Device, block: MemoryBlock<B>) {
+ unsafe { self.0.dealloc(&MemoryDevice::<B>(device), block.0) }
+ }
+
+ pub fn clear(&mut self, device: &B::Device) {
+ unsafe { self.0.cleanup(&MemoryDevice::<B>(device)) }
+ }
+}
+
+impl<B: hal::Backend> MemoryBlock<B> {
+ pub fn bind_buffer(
+ &self,
+ device: &B::Device,
+ buffer: &mut B::Buffer,
+ ) -> Result<(), DeviceError> {
+ unsafe {
+ device
+ .bind_buffer_memory(self.0.memory(), self.0.offset(), buffer)
+ .map_err(DeviceError::from_bind)
+ }
+ }
+
+ pub fn bind_image(&self, device: &B::Device, image: &mut B::Image) -> Result<(), DeviceError> {
+ unsafe {
+ device
+ .bind_image_memory(self.0.memory(), self.0.offset(), image)
+ .map_err(DeviceError::from_bind)
+ }
+ }
+
+ pub fn is_coherent(&self) -> bool {
+ self.0
+ .props()
+ .contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT)
+ }
+
+ pub fn map(
+ &mut self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ size: wgt::BufferAddress,
+ ) -> Result<NonNull<u8>, DeviceError> {
+ let offset = inner_offset;
+ unsafe {
+ self.0
+ .map(&MemoryDevice::<B>(device), offset, size as usize)
+ .map_err(DeviceError::from)
+ }
+ }
+
+ pub fn unmap(&mut self, device: &B::Device) {
+ unsafe { self.0.unmap(&MemoryDevice::<B>(device)) };
+ }
+
+ pub fn write_bytes(
+ &mut self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ data: &[u8],
+ ) -> Result<(), DeviceError> {
+ let offset = inner_offset;
+ unsafe {
+ self.0
+ .write_bytes(&MemoryDevice::<B>(device), offset, data)
+ .map_err(DeviceError::from)
+ }
+ }
+
+ pub fn read_bytes(
+ &mut self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ data: &mut [u8],
+ ) -> Result<(), DeviceError> {
+ let offset = inner_offset;
+ unsafe {
+ self.0
+ .read_bytes(&MemoryDevice::<B>(device), offset, data)
+ .map_err(DeviceError::from)
+ }
+ }
+
+ fn segment(
+ &self,
+ inner_offset: wgt::BufferAddress,
+ size: Option<wgt::BufferAddress>,
+ ) -> hal::memory::Segment {
+ hal::memory::Segment {
+ offset: self.0.offset() + inner_offset,
+ size: size.or(Some(self.0.size())),
+ }
+ }
+
+ pub fn flush_range(
+ &self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ size: Option<wgt::BufferAddress>,
+ ) -> Result<(), DeviceError> {
+ let segment = self.segment(inner_offset, size);
+ unsafe {
+ device
+ .flush_mapped_memory_ranges(iter::once((&**self.0.memory(), segment)))
+ .or(Err(DeviceError::OutOfMemory))
+ }
+ }
+
+ pub fn invalidate_range(
+ &self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ size: Option<wgt::BufferAddress>,
+ ) -> Result<(), DeviceError> {
+ let segment = self.segment(inner_offset, size);
+ unsafe {
+ device
+ .invalidate_mapped_memory_ranges(iter::once((&**self.0.memory(), segment)))
+ .or(Err(DeviceError::OutOfMemory))
+ }
+ }
+}
+
+impl<B: hal::Backend> gpu_alloc::MemoryDevice<Arc<B::Memory>> for MemoryDevice<'_, B> {
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn allocate_memory(
+ &self,
+ size: u64,
+ memory_type: u32,
+ flags: gpu_alloc::AllocationFlags,
+ ) -> Result<Arc<B::Memory>, gpu_alloc::OutOfMemory> {
+ assert!(flags.is_empty());
+
+ self.0
+ .allocate_memory(hal::MemoryTypeId(memory_type as _), size)
+ .map(Arc::new)
+ .map_err(|_| gpu_alloc::OutOfMemory::OutOfDeviceMemory)
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn deallocate_memory(&self, memory: Arc<B::Memory>) {
+ let memory = Arc::try_unwrap(memory).expect("Memory must not be used anywhere");
+ self.0.free_memory(memory);
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn map_memory(
+ &self,
+ memory: &Arc<B::Memory>,
+ offset: u64,
+ size: u64,
+ ) -> Result<NonNull<u8>, gpu_alloc::DeviceMapError> {
+ match self.0.map_memory(
+ memory,
+ hal::memory::Segment {
+ offset,
+ size: Some(size),
+ },
+ ) {
+ Ok(ptr) => Ok(NonNull::new(ptr).expect("Pointer to memory mapping must not be null")),
+ Err(hal::device::MapError::OutOfMemory(_)) => {
+ Err(gpu_alloc::DeviceMapError::OutOfDeviceMemory)
+ }
+ Err(hal::device::MapError::MappingFailed) => Err(gpu_alloc::DeviceMapError::MapFailed),
+ Err(other) => panic!("Unexpected map error: {:?}", other),
+ }
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn unmap_memory(&self, memory: &Arc<B::Memory>) {
+ self.0.unmap_memory(memory);
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn invalidate_memory_ranges(
+ &self,
+ ranges: &[gpu_alloc::MappedMemoryRange<'_, Arc<B::Memory>>],
+ ) -> Result<(), gpu_alloc::OutOfMemory> {
+ self.0
+ .invalidate_mapped_memory_ranges(ranges.iter().map(|range| {
+ (
+ &**range.memory,
+ hal::memory::Segment {
+ offset: range.offset,
+ size: Some(range.size),
+ },
+ )
+ }))
+ .map_err(|_| gpu_alloc::OutOfMemory::OutOfHostMemory)
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn flush_memory_ranges(
+ &self,
+ ranges: &[gpu_alloc::MappedMemoryRange<'_, Arc<B::Memory>>],
+ ) -> Result<(), gpu_alloc::OutOfMemory> {
+ self.0
+ .flush_mapped_memory_ranges(ranges.iter().map(|range| {
+ (
+ &**range.memory,
+ hal::memory::Segment {
+ offset: range.offset,
+ size: Some(range.size),
+ },
+ )
+ }))
+ .map_err(|_| gpu_alloc::OutOfMemory::OutOfHostMemory)
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/descriptor.rs b/gfx/wgpu/wgpu-core/src/device/descriptor.rs
new file mode 100644
index 0000000000..92b896bd31
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/descriptor.rs
@@ -0,0 +1,168 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use super::DeviceError;
+use arrayvec::ArrayVec;
+
+pub use gpu_descriptor::DescriptorTotalCount;
+
+pub type DescriptorSet<B> = gpu_descriptor::DescriptorSet<<B as hal::Backend>::DescriptorSet>;
+
+#[derive(Debug)]
+pub struct DescriptorAllocator<B: hal::Backend>(
+ gpu_descriptor::DescriptorAllocator<B::DescriptorPool, B::DescriptorSet>,
+);
+struct DescriptorDevice<'a, B: hal::Backend>(&'a B::Device);
+
+impl<B: hal::Backend> DescriptorAllocator<B> {
+ pub fn new() -> Self {
+ DescriptorAllocator(unsafe { gpu_descriptor::DescriptorAllocator::new(0) })
+ }
+
+ pub fn allocate(
+ &mut self,
+ device: &B::Device,
+ layout: &B::DescriptorSetLayout,
+ layout_descriptor_count: &DescriptorTotalCount,
+ count: u32,
+ ) -> Result<Vec<DescriptorSet<B>>, DeviceError> {
+ self.0
+ .allocate(
+ &DescriptorDevice::<B>(device),
+ layout,
+ gpu_descriptor::DescriptorSetLayoutCreateFlags::empty(),
+ layout_descriptor_count,
+ count,
+ )
+ .map_err(|err| {
+ tracing::warn!("Descriptor set allocation failed: {}", err);
+ DeviceError::OutOfMemory
+ })
+ }
+
+ pub fn free(&mut self, device: &B::Device, sets: impl IntoIterator<Item = DescriptorSet<B>>) {
+ unsafe { self.0.free(&DescriptorDevice::<B>(device), sets) }
+ }
+
+ pub fn cleanup(&mut self, device: &B::Device) {
+ self.0.cleanup(&DescriptorDevice::<B>(device))
+ }
+}
+
+impl<B: hal::Backend>
+ gpu_descriptor::DescriptorDevice<B::DescriptorSetLayout, B::DescriptorPool, B::DescriptorSet>
+ for DescriptorDevice<'_, B>
+{
+ unsafe fn create_descriptor_pool(
+ &self,
+ descriptor_count: &DescriptorTotalCount,
+ max_sets: u32,
+ flags: gpu_descriptor::DescriptorPoolCreateFlags,
+ ) -> Result<B::DescriptorPool, gpu_descriptor::CreatePoolError> {
+ let mut ranges = ArrayVec::<[_; 7]>::new();
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Sampler,
+ count: descriptor_count.sampler as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Image {
+ ty: hal::pso::ImageDescriptorType::Sampled {
+ with_sampler: false,
+ },
+ },
+ count: descriptor_count.sampled_image as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Image {
+ ty: hal::pso::ImageDescriptorType::Storage { read_only: false },
+ },
+ count: descriptor_count.storage_image as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Buffer {
+ ty: hal::pso::BufferDescriptorType::Uniform,
+ format: hal::pso::BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ },
+ count: descriptor_count.uniform_buffer as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Buffer {
+ ty: hal::pso::BufferDescriptorType::Storage { read_only: false },
+ format: hal::pso::BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ },
+ count: descriptor_count.storage_buffer as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Buffer {
+ ty: hal::pso::BufferDescriptorType::Uniform,
+ format: hal::pso::BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ },
+ count: descriptor_count.uniform_buffer_dynamic as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Buffer {
+ ty: hal::pso::BufferDescriptorType::Storage { read_only: false },
+ format: hal::pso::BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ },
+ count: descriptor_count.storage_buffer_dynamic as _,
+ });
+ ranges.retain(|rd| rd.count != 0);
+
+ match hal::device::Device::create_descriptor_pool(
+ self.0,
+ max_sets as usize,
+ ranges,
+ hal::pso::DescriptorPoolCreateFlags::from_bits_truncate(flags.bits() as u32),
+ ) {
+ Ok(pool) => Ok(pool),
+ Err(hal::device::OutOfMemory::Host) => {
+ Err(gpu_descriptor::CreatePoolError::OutOfHostMemory)
+ }
+ Err(hal::device::OutOfMemory::Device) => {
+ Err(gpu_descriptor::CreatePoolError::OutOfDeviceMemory)
+ }
+ }
+ }
+
+ unsafe fn destroy_descriptor_pool(&self, pool: B::DescriptorPool) {
+ hal::device::Device::destroy_descriptor_pool(self.0, pool);
+ }
+
+ unsafe fn alloc_descriptor_sets<'a>(
+ &self,
+ pool: &mut B::DescriptorPool,
+ layouts: impl Iterator<Item = &'a B::DescriptorSetLayout>,
+ sets: &mut impl Extend<B::DescriptorSet>,
+ ) -> Result<(), gpu_descriptor::DeviceAllocationError> {
+ use gpu_descriptor::DeviceAllocationError as Dae;
+ match hal::pso::DescriptorPool::allocate(pool, layouts, sets) {
+ Ok(()) => Ok(()),
+ Err(hal::pso::AllocationError::OutOfMemory(oom)) => Err(match oom {
+ hal::device::OutOfMemory::Host => Dae::OutOfHostMemory,
+ hal::device::OutOfMemory::Device => Dae::OutOfDeviceMemory,
+ }),
+ Err(hal::pso::AllocationError::OutOfPoolMemory) => Err(Dae::OutOfPoolMemory),
+ Err(hal::pso::AllocationError::FragmentedPool) => Err(Dae::FragmentedPool),
+ Err(hal::pso::AllocationError::IncompatibleLayout) => {
+ panic!("Incompatible descriptor set layout")
+ }
+ }
+ }
+
+ unsafe fn dealloc_descriptor_sets<'a>(
+ &self,
+ pool: &mut B::DescriptorPool,
+ sets: impl Iterator<Item = B::DescriptorSet>,
+ ) {
+ hal::pso::DescriptorPool::free(pool, sets)
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/life.rs b/gfx/wgpu/wgpu-core/src/device/life.rs
new file mode 100644
index 0000000000..3c46b72362
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/life.rs
@@ -0,0 +1,760 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#[cfg(feature = "trace")]
+use crate::device::trace;
+use crate::{
+ device::{
+ alloc,
+ descriptor::{DescriptorAllocator, DescriptorSet},
+ queue::TempResource,
+ DeviceError,
+ },
+ hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token},
+ id, resource,
+ track::TrackerSet,
+ FastHashMap, RefCount, Stored, SubmissionIndex,
+};
+
+use copyless::VecHelper as _;
+use hal::device::Device as _;
+use parking_lot::Mutex;
+use thiserror::Error;
+
+use std::sync::atomic::Ordering;
+
+const CLEANUP_WAIT_MS: u64 = 5000;
+
+/// A struct that keeps lists of resources that are no longer needed by the user.
+#[derive(Debug, Default)]
+pub struct SuspectedResources {
+ pub(crate) buffers: Vec<id::Valid<id::BufferId>>,
+ pub(crate) textures: Vec<id::Valid<id::TextureId>>,
+ pub(crate) texture_views: Vec<id::Valid<id::TextureViewId>>,
+ pub(crate) samplers: Vec<id::Valid<id::SamplerId>>,
+ pub(crate) bind_groups: Vec<id::Valid<id::BindGroupId>>,
+ pub(crate) compute_pipelines: Vec<id::Valid<id::ComputePipelineId>>,
+ pub(crate) render_pipelines: Vec<id::Valid<id::RenderPipelineId>>,
+ pub(crate) bind_group_layouts: Vec<id::Valid<id::BindGroupLayoutId>>,
+ pub(crate) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>,
+ pub(crate) render_bundles: Vec<id::Valid<id::RenderBundleId>>,
+}
+
+impl SuspectedResources {
+ pub(crate) fn clear(&mut self) {
+ self.buffers.clear();
+ self.textures.clear();
+ self.texture_views.clear();
+ self.samplers.clear();
+ self.bind_groups.clear();
+ self.compute_pipelines.clear();
+ self.render_pipelines.clear();
+ self.bind_group_layouts.clear();
+ self.pipeline_layouts.clear();
+ self.render_bundles.clear();
+ }
+
+ pub(crate) fn extend(&mut self, other: &Self) {
+ self.buffers.extend_from_slice(&other.buffers);
+ self.textures.extend_from_slice(&other.textures);
+ self.texture_views.extend_from_slice(&other.texture_views);
+ self.samplers.extend_from_slice(&other.samplers);
+ self.bind_groups.extend_from_slice(&other.bind_groups);
+ self.compute_pipelines
+ .extend_from_slice(&other.compute_pipelines);
+ self.render_pipelines
+ .extend_from_slice(&other.render_pipelines);
+ self.bind_group_layouts
+ .extend_from_slice(&other.bind_group_layouts);
+ self.pipeline_layouts
+ .extend_from_slice(&other.pipeline_layouts);
+ self.render_bundles.extend_from_slice(&other.render_bundles);
+ }
+
+ pub(crate) fn add_trackers(&mut self, trackers: &TrackerSet) {
+ self.buffers.extend(trackers.buffers.used());
+ self.textures.extend(trackers.textures.used());
+ self.texture_views.extend(trackers.views.used());
+ self.samplers.extend(trackers.samplers.used());
+ self.bind_groups.extend(trackers.bind_groups.used());
+ self.compute_pipelines.extend(trackers.compute_pipes.used());
+ self.render_pipelines.extend(trackers.render_pipes.used());
+ self.render_bundles.extend(trackers.bundles.used());
+ }
+}
+
+/// A struct that keeps lists of resources that are no longer needed.
+#[derive(Debug)]
+struct NonReferencedResources<B: hal::Backend> {
+ buffers: Vec<(B::Buffer, alloc::MemoryBlock<B>)>,
+ images: Vec<(B::Image, alloc::MemoryBlock<B>)>,
+ // Note: we keep the associated ID here in order to be able to check
+ // at any point what resources are used in a submission.
+ image_views: Vec<(id::Valid<id::TextureViewId>, B::ImageView)>,
+ samplers: Vec<B::Sampler>,
+ framebuffers: Vec<B::Framebuffer>,
+ desc_sets: Vec<DescriptorSet<B>>,
+ compute_pipes: Vec<B::ComputePipeline>,
+ graphics_pipes: Vec<B::GraphicsPipeline>,
+ descriptor_set_layouts: Vec<B::DescriptorSetLayout>,
+ pipeline_layouts: Vec<B::PipelineLayout>,
+}
+
+impl<B: hal::Backend> NonReferencedResources<B> {
+ fn new() -> Self {
+ Self {
+ buffers: Vec::new(),
+ images: Vec::new(),
+ image_views: Vec::new(),
+ samplers: Vec::new(),
+ framebuffers: Vec::new(),
+ desc_sets: Vec::new(),
+ compute_pipes: Vec::new(),
+ graphics_pipes: Vec::new(),
+ descriptor_set_layouts: Vec::new(),
+ pipeline_layouts: Vec::new(),
+ }
+ }
+
+ fn extend(&mut self, other: Self) {
+ self.buffers.extend(other.buffers);
+ self.images.extend(other.images);
+ self.image_views.extend(other.image_views);
+ self.samplers.extend(other.samplers);
+ self.framebuffers.extend(other.framebuffers);
+ self.desc_sets.extend(other.desc_sets);
+ self.compute_pipes.extend(other.compute_pipes);
+ self.graphics_pipes.extend(other.graphics_pipes);
+ assert!(other.descriptor_set_layouts.is_empty());
+ assert!(other.pipeline_layouts.is_empty());
+ }
+
+ unsafe fn clean(
+ &mut self,
+ device: &B::Device,
+ memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<B>>,
+ descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
+ ) {
+ if !self.buffers.is_empty() || !self.images.is_empty() {
+ let mut allocator = memory_allocator_mutex.lock();
+ for (raw, memory) in self.buffers.drain(..) {
+ tracing::trace!("Buffer {:?} is destroyed with memory {:?}", raw, memory);
+ device.destroy_buffer(raw);
+ allocator.free(device, memory);
+ }
+ for (raw, memory) in self.images.drain(..) {
+ tracing::trace!("Image {:?} is destroyed with memory {:?}", raw, memory);
+ device.destroy_image(raw);
+ allocator.free(device, memory);
+ }
+ }
+
+ for (_, raw) in self.image_views.drain(..) {
+ device.destroy_image_view(raw);
+ }
+ for raw in self.samplers.drain(..) {
+ device.destroy_sampler(raw);
+ }
+ for raw in self.framebuffers.drain(..) {
+ device.destroy_framebuffer(raw);
+ }
+
+ if !self.desc_sets.is_empty() {
+ descriptor_allocator_mutex
+ .lock()
+ .free(device, self.desc_sets.drain(..));
+ }
+
+ for raw in self.compute_pipes.drain(..) {
+ device.destroy_compute_pipeline(raw);
+ }
+ for raw in self.graphics_pipes.drain(..) {
+ device.destroy_graphics_pipeline(raw);
+ }
+ for raw in self.descriptor_set_layouts.drain(..) {
+ device.destroy_descriptor_set_layout(raw);
+ }
+ for raw in self.pipeline_layouts.drain(..) {
+ device.destroy_pipeline_layout(raw);
+ }
+ }
+}
+
+#[derive(Debug)]
+struct ActiveSubmission<B: hal::Backend> {
+ index: SubmissionIndex,
+ fence: B::Fence,
+ last_resources: NonReferencedResources<B>,
+ mapped: Vec<id::Valid<id::BufferId>>,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum WaitIdleError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("GPU got stuck :(")]
+ StuckGpu,
+}
+
+/// A struct responsible for tracking resource lifetimes.
+///
+/// Here is how host mapping is handled:
+/// 1. When mapping is requested we add the buffer to the life_tracker list of `mapped` buffers.
+/// 2. When `triage_suspected` is called, it checks the last submission index associated with each of the mapped buffer,
+/// and register the buffer with either a submission in flight, or straight into `ready_to_map` vector.
+/// 3. When `ActiveSubmission` is retired, the mapped buffers associated with it are moved to `ready_to_map` vector.
+/// 4. Finally, `handle_mapping` issues all the callbacks.
+#[derive(Debug)]
+pub(crate) struct LifetimeTracker<B: hal::Backend> {
+ /// Resources that the user has requested be mapped, but are still in use.
+ mapped: Vec<Stored<id::BufferId>>,
+ /// Buffers can be used in a submission that is yet to be made, by the
+ /// means of `write_buffer()`, so we have a special place for them.
+ pub future_suspected_buffers: Vec<Stored<id::BufferId>>,
+ /// Textures can be used in the upcoming submission by `write_texture`.
+ pub future_suspected_textures: Vec<Stored<id::TextureId>>,
+ /// Resources that are suspected for destruction.
+ pub suspected_resources: SuspectedResources,
+ /// Resources that are not referenced any more but still used by GPU.
+ /// Grouped by submissions associated with a fence and a submission index.
+ /// The active submissions have to be stored in FIFO order: oldest come first.
+ active: Vec<ActiveSubmission<B>>,
+ /// Resources that are neither referenced or used, just life_tracker
+ /// actual deletion.
+ free_resources: NonReferencedResources<B>,
+ ready_to_map: Vec<id::Valid<id::BufferId>>,
+}
+
+impl<B: hal::Backend> LifetimeTracker<B> {
+ pub fn new() -> Self {
+ Self {
+ mapped: Vec::new(),
+ future_suspected_buffers: Vec::new(),
+ future_suspected_textures: Vec::new(),
+ suspected_resources: SuspectedResources::default(),
+ active: Vec::new(),
+ free_resources: NonReferencedResources::new(),
+ ready_to_map: Vec::new(),
+ }
+ }
+
+ pub fn track_submission(
+ &mut self,
+ index: SubmissionIndex,
+ fence: B::Fence,
+ new_suspects: &SuspectedResources,
+ temp_resources: impl Iterator<Item = (TempResource<B>, alloc::MemoryBlock<B>)>,
+ ) {
+ let mut last_resources = NonReferencedResources::new();
+ for (res, memory) in temp_resources {
+ match res {
+ TempResource::Buffer(raw) => last_resources.buffers.push((raw, memory)),
+ TempResource::Image(raw) => last_resources.images.push((raw, memory)),
+ }
+ }
+
+ self.suspected_resources.buffers.extend(
+ self.future_suspected_buffers
+ .drain(..)
+ .map(|stored| stored.value),
+ );
+ self.suspected_resources.textures.extend(
+ self.future_suspected_textures
+ .drain(..)
+ .map(|stored| stored.value),
+ );
+ self.suspected_resources.extend(new_suspects);
+
+ self.active.alloc().init(ActiveSubmission {
+ index,
+ fence,
+ last_resources,
+ mapped: Vec::new(),
+ });
+ }
+
+ pub(crate) fn map(&mut self, value: id::Valid<id::BufferId>, ref_count: RefCount) {
+ self.mapped.push(Stored { value, ref_count });
+ }
+
+ fn wait_idle(&self, device: &B::Device) -> Result<(), WaitIdleError> {
+ if !self.active.is_empty() {
+ tracing::debug!("Waiting for IDLE...");
+ let status = unsafe {
+ device
+ .wait_for_fences(
+ self.active.iter().map(|a| &a.fence),
+ hal::device::WaitFor::All,
+ CLEANUP_WAIT_MS * 1_000_000,
+ )
+ .map_err(DeviceError::from)?
+ };
+ tracing::debug!("...Done");
+
+ if status == false {
+ // We timed out while waiting for the fences
+ return Err(WaitIdleError::StuckGpu);
+ }
+ }
+ Ok(())
+ }
+
+ /// Returns the last submission index that is done.
+ pub fn triage_submissions(
+ &mut self,
+ device: &B::Device,
+ force_wait: bool,
+ ) -> Result<SubmissionIndex, WaitIdleError> {
+ if force_wait {
+ self.wait_idle(device)?;
+ }
+ //TODO: enable when `is_sorted_by_key` is stable
+ //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
+ let done_count = self
+ .active
+ .iter()
+ .position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap_or(false) })
+ .unwrap_or_else(|| self.active.len());
+ let last_done = if done_count != 0 {
+ self.active[done_count - 1].index
+ } else {
+ return Ok(0);
+ };
+
+ for a in self.active.drain(..done_count) {
+ tracing::trace!("Active submission {} is done", a.index);
+ self.free_resources.extend(a.last_resources);
+ self.ready_to_map.extend(a.mapped);
+ unsafe {
+ device.destroy_fence(a.fence);
+ }
+ }
+
+ Ok(last_done)
+ }
+
+ pub fn cleanup(
+ &mut self,
+ device: &B::Device,
+ memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<B>>,
+ descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
+ ) {
+ unsafe {
+ self.free_resources
+ .clean(device, memory_allocator_mutex, descriptor_allocator_mutex);
+ descriptor_allocator_mutex.lock().cleanup(device);
+ }
+ }
+
+ pub fn schedule_resource_destruction(
+ &mut self,
+ temp_resource: TempResource<B>,
+ memory: alloc::MemoryBlock<B>,
+ last_submit_index: SubmissionIndex,
+ ) {
+ let resources = self
+ .active
+ .iter_mut()
+ .find(|a| a.index == last_submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources);
+ match temp_resource {
+ TempResource::Buffer(raw) => resources.buffers.push((raw, memory)),
+ TempResource::Image(raw) => resources.images.push((raw, memory)),
+ }
+ }
+}
+
+impl<B: GfxBackend> LifetimeTracker<B> {
+ pub(crate) fn triage_suspected<G: GlobalIdentityHandlerFactory>(
+ &mut self,
+ hub: &Hub<B, G>,
+ trackers: &Mutex<TrackerSet>,
+ #[cfg(feature = "trace")] trace: Option<&Mutex<trace::Trace>>,
+ token: &mut Token<super::Device<B>>,
+ ) {
+ if !self.suspected_resources.render_bundles.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.render_bundles.write(token);
+
+ while let Some(id) = self.suspected_resources.render_bundles.pop() {
+ if trackers.bundles.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyRenderBundle(id.0)));
+
+ if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
+ self.suspected_resources.add_trackers(&res.used);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.bind_groups.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.bind_groups.write(token);
+
+ while let Some(id) = self.suspected_resources.bind_groups.pop() {
+ if trackers.bind_groups.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyBindGroup(id.0)));
+
+ if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
+ self.suspected_resources.add_trackers(&res.used);
+
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .desc_sets
+ .push(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.texture_views.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.texture_views.write(token);
+
+ for id in self.suspected_resources.texture_views.drain(..) {
+ if trackers.views.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyTextureView(id.0)));
+
+ if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
+ let raw = match res.inner {
+ resource::TextureViewInner::Native { raw, source_id } => {
+ self.suspected_resources.textures.push(source_id.value);
+ raw
+ }
+ resource::TextureViewInner::SwapChain { .. } => unreachable!(),
+ };
+
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .image_views
+ .push((id, raw));
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.textures.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.textures.write(token);
+
+ for id in self.suspected_resources.textures.drain(..) {
+ if trackers.textures.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyTexture(id.0)));
+
+ if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .images
+ .extend(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.samplers.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.samplers.write(token);
+
+ for id in self.suspected_resources.samplers.drain(..) {
+ if trackers.samplers.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroySampler(id.0)));
+
+ if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .samplers
+ .push(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.buffers.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.buffers.write(token);
+
+ for id in self.suspected_resources.buffers.drain(..) {
+ if trackers.buffers.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyBuffer(id.0)));
+ tracing::debug!("Buffer {:?} is detached", id);
+
+ if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .buffers
+ .extend(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.compute_pipelines.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.compute_pipelines.write(token);
+
+ for id in self.suspected_resources.compute_pipelines.drain(..) {
+ if trackers.compute_pipes.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
+
+ if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .compute_pipes
+ .push(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.render_pipelines.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.render_pipelines.write(token);
+
+ for id in self.suspected_resources.render_pipelines.drain(..) {
+ if trackers.render_pipes.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyRenderPipeline(id.0)));
+
+ if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .graphics_pipes
+ .push(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.pipeline_layouts.is_empty() {
+ let (mut guard, _) = hub.pipeline_layouts.write(token);
+
+ for Stored {
+ value: id,
+ ref_count,
+ } in self.suspected_resources.pipeline_layouts.drain(..)
+ {
+ //Note: this has to happen after all the suspected pipelines are destroyed
+ if ref_count.load() == 1 {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyPipelineLayout(id.0)));
+
+ if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) {
+ self.suspected_resources
+ .bind_group_layouts
+ .extend_from_slice(&lay.bind_group_layout_ids);
+ self.free_resources.pipeline_layouts.push(lay.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.bind_group_layouts.is_empty() {
+ let (mut guard, _) = hub.bind_group_layouts.write(token);
+
+ for id in self.suspected_resources.bind_group_layouts.drain(..) {
+ //Note: this has to happen after all the suspected pipelines are destroyed
+ //Note: nothing else can bump the refcount since the guard is locked exclusively
+ //Note: same BGL can appear multiple times in the list, but only the last
+ // encounter could drop the refcount to 0.
+ if guard[id].multi_ref_count.dec_and_check_empty() {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyBindGroupLayout(id.0)));
+ if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) {
+ self.free_resources.descriptor_set_layouts.push(lay.raw);
+ }
+ }
+ }
+ }
+ }
+
+ pub(crate) fn triage_mapped<G: GlobalIdentityHandlerFactory>(
+ &mut self,
+ hub: &Hub<B, G>,
+ token: &mut Token<super::Device<B>>,
+ ) {
+ if self.mapped.is_empty() {
+ return;
+ }
+ let (buffer_guard, _) = hub.buffers.read(token);
+
+ for stored in self.mapped.drain(..) {
+ let resource_id = stored.value;
+ let buf = &buffer_guard[resource_id];
+
+ let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire);
+ tracing::trace!(
+ "Mapping of {:?} at submission {:?} gets assigned to active {:?}",
+ resource_id,
+ submit_index,
+ self.active.iter().position(|a| a.index == submit_index)
+ );
+
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
+ .push(resource_id);
+ }
+ }
+
+ pub(crate) fn triage_framebuffers<G: GlobalIdentityHandlerFactory>(
+ &mut self,
+ hub: &Hub<B, G>,
+ framebuffers: &mut FastHashMap<super::FramebufferKey, B::Framebuffer>,
+ token: &mut Token<super::Device<B>>,
+ ) {
+ let (texture_view_guard, _) = hub.texture_views.read(token);
+ let remove_list = framebuffers
+ .keys()
+ .filter_map(|key| {
+ let mut last_submit = None;
+ let mut needs_cleanup = false;
+
+ // A framebuffer needs to be scheduled for cleanup, if there's at least one
+ // attachment is no longer valid.
+
+ for &at in key.all() {
+ // If this attachment is still registered, it's still valid
+ if texture_view_guard.contains(at.0) {
+ continue;
+ }
+
+ // This attachment is no longer registered, this framebuffer needs cleanup
+ needs_cleanup = true;
+
+ // Check if there's any active submissions that are still referring to this
+ // attachment, if there are we need to get the greatest submission index, as
+ // that's the last time this attachment is still valid
+ let mut attachment_last_submit = None;
+ for a in &self.active {
+ if a.last_resources.image_views.iter().any(|&(id, _)| id == at) {
+ let max = attachment_last_submit.unwrap_or(0).max(a.index);
+ attachment_last_submit = Some(max);
+ }
+ }
+
+ // Between all attachments, we need the smallest index, because that's the last
+ // time this framebuffer is still valid
+ if let Some(attachment_last_submit) = attachment_last_submit {
+ let min = last_submit
+ .unwrap_or(std::usize::MAX)
+ .min(attachment_last_submit);
+ last_submit = Some(min);
+ }
+ }
+
+ if needs_cleanup {
+ Some((key.clone(), last_submit.unwrap_or(0)))
+ } else {
+ None
+ }
+ })
+ .collect::<FastHashMap<_, _>>();
+
+ if !remove_list.is_empty() {
+ tracing::debug!("Free framebuffers {:?}", remove_list);
+ for (ref key, submit_index) in remove_list {
+ let framebuffer = framebuffers.remove(key).unwrap();
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .framebuffers
+ .push(framebuffer);
+ }
+ }
+ }
+
+ pub(crate) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
+ &mut self,
+ hub: &Hub<B, G>,
+ raw: &B::Device,
+ trackers: &Mutex<TrackerSet>,
+ token: &mut Token<super::Device<B>>,
+ ) -> Vec<super::BufferMapPendingCallback> {
+ if self.ready_to_map.is_empty() {
+ return Vec::new();
+ }
+ let (mut buffer_guard, _) = hub.buffers.write(token);
+ let mut pending_callbacks: Vec<super::BufferMapPendingCallback> =
+ Vec::with_capacity(self.ready_to_map.len());
+ let mut trackers = trackers.lock();
+ for buffer_id in self.ready_to_map.drain(..) {
+ let buffer = &mut buffer_guard[buffer_id];
+ if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id)
+ {
+ buffer.map_state = resource::BufferMapState::Idle;
+ tracing::debug!("Mapping request is dropped because the buffer is destroyed.");
+ if let Some(buf) = hub
+ .buffers
+ .unregister_locked(buffer_id.0, &mut *buffer_guard)
+ {
+ self.free_resources.buffers.extend(buf.raw);
+ }
+ } else {
+ let mapping = match std::mem::replace(
+ &mut buffer.map_state,
+ resource::BufferMapState::Idle,
+ ) {
+ resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
+ _ => panic!("No pending mapping."),
+ };
+ let status = if mapping.range.start != mapping.range.end {
+ tracing::debug!("Buffer {:?} map state -> Active", buffer_id);
+ let host = mapping.op.host;
+ let size = mapping.range.end - mapping.range.start;
+ match super::map_buffer(raw, buffer, mapping.range.start, size, host) {
+ Ok(ptr) => {
+ buffer.map_state = resource::BufferMapState::Active {
+ ptr,
+ sub_range: hal::buffer::SubRange {
+ offset: mapping.range.start,
+ size: Some(size),
+ },
+ host,
+ };
+ resource::BufferMapAsyncStatus::Success
+ }
+ Err(e) => {
+ tracing::error!("Mapping failed {:?}", e);
+ resource::BufferMapAsyncStatus::Error
+ }
+ }
+ } else {
+ resource::BufferMapAsyncStatus::Success
+ };
+ pending_callbacks.push((mapping.op, status));
+ }
+ }
+ pending_callbacks
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/mod.rs b/gfx/wgpu/wgpu-core/src/device/mod.rs
new file mode 100644
index 0000000000..b1b7105fe3
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/mod.rs
@@ -0,0 +1,4217 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ binding_model, command, conv,
+ device::life::WaitIdleError,
+ hub::{
+ GfxBackend, Global, GlobalIdentityHandlerFactory, Hub, Input, InvalidId, Storage, Token,
+ },
+ id, pipeline, resource, span, swap_chain,
+ track::{BufferState, TextureSelector, TextureState, TrackerSet},
+ validation::{self, check_buffer_usage, check_texture_usage},
+ FastHashMap, Label, LabelHelpers, LifeGuard, MultiRefCount, PrivateFeatures, Stored,
+ SubmissionIndex, MAX_BIND_GROUPS,
+};
+
+use arrayvec::ArrayVec;
+use copyless::VecHelper as _;
+use hal::{
+ command::CommandBuffer as _,
+ device::Device as _,
+ window::{PresentationSurface as _, Surface as _},
+};
+use parking_lot::{Mutex, MutexGuard};
+use thiserror::Error;
+use wgt::{
+ BufferAddress, BufferSize, InputStepMode, TextureDimension, TextureFormat, TextureViewDimension,
+};
+
+use std::{
+ borrow::Cow,
+ collections::{hash_map::Entry, BTreeMap},
+ iter,
+ marker::PhantomData,
+ mem,
+ ops::Range,
+ ptr,
+ sync::atomic::Ordering,
+};
+
+pub mod alloc;
+pub mod descriptor;
+mod life;
+mod queue;
+#[cfg(any(feature = "trace", feature = "replay"))]
+pub mod trace;
+
+use smallvec::SmallVec;
+#[cfg(feature = "trace")]
+use trace::{Action, Trace};
+
+pub const MAX_COLOR_TARGETS: usize = 4;
+pub const MAX_MIP_LEVELS: u32 = 16;
+pub const MAX_VERTEX_BUFFERS: usize = 16;
+pub const MAX_ANISOTROPY: u8 = 16;
+pub const SHADER_STAGE_COUNT: usize = 3;
+
+pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
+
+pub fn all_buffer_stages() -> hal::pso::PipelineStage {
+ use hal::pso::PipelineStage as Ps;
+ Ps::DRAW_INDIRECT
+ | Ps::VERTEX_INPUT
+ | Ps::VERTEX_SHADER
+ | Ps::FRAGMENT_SHADER
+ | Ps::COMPUTE_SHADER
+ | Ps::TRANSFER
+ | Ps::HOST
+}
+pub fn all_image_stages() -> hal::pso::PipelineStage {
+ use hal::pso::PipelineStage as Ps;
+ Ps::EARLY_FRAGMENT_TESTS
+ | Ps::LATE_FRAGMENT_TESTS
+ | Ps::COLOR_ATTACHMENT_OUTPUT
+ | Ps::VERTEX_SHADER
+ | Ps::FRAGMENT_SHADER
+ | Ps::COMPUTE_SHADER
+ | Ps::TRANSFER
+}
+
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum HostMap {
+ Read,
+ Write,
+}
+
+#[derive(Clone, Debug, Hash, PartialEq)]
+#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
+pub(crate) struct AttachmentData<T> {
+ pub colors: ArrayVec<[T; MAX_COLOR_TARGETS]>,
+ pub resolves: ArrayVec<[T; MAX_COLOR_TARGETS]>,
+ pub depth_stencil: Option<T>,
+}
+impl<T: PartialEq> Eq for AttachmentData<T> {}
+impl<T> AttachmentData<T> {
+ pub(crate) fn all(&self) -> impl Iterator<Item = &T> {
+ self.colors
+ .iter()
+ .chain(&self.resolves)
+ .chain(&self.depth_stencil)
+ }
+}
+
+pub(crate) type AttachmentDataVec<T> = ArrayVec<[T; MAX_COLOR_TARGETS + MAX_COLOR_TARGETS + 1]>;
+
+pub(crate) type RenderPassKey = AttachmentData<(hal::pass::Attachment, hal::image::Layout)>;
+pub(crate) type FramebufferKey = AttachmentData<id::Valid<id::TextureViewId>>;
+
+#[derive(Clone, Debug, Hash, PartialEq)]
+#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
+pub(crate) struct RenderPassContext {
+ pub attachments: AttachmentData<TextureFormat>,
+ pub sample_count: u8,
+}
+#[derive(Clone, Debug, Error)]
+pub enum RenderPassCompatibilityError {
+ #[error("Incompatible color attachment: {0:?} != {1:?}")]
+ IncompatibleColorAttachment(
+ ArrayVec<[TextureFormat; MAX_COLOR_TARGETS]>,
+ ArrayVec<[TextureFormat; MAX_COLOR_TARGETS]>,
+ ),
+ #[error("Incompatible depth-stencil attachment: {0:?} != {1:?}")]
+ IncompatibleDepthStencilAttachment(Option<TextureFormat>, Option<TextureFormat>),
+ #[error("Incompatible sample count: {0:?} != {1:?}")]
+ IncompatibleSampleCount(u8, u8),
+}
+
+impl RenderPassContext {
+ // Assumed the renderpass only contains one subpass
+ pub(crate) fn check_compatible(
+ &self,
+ other: &RenderPassContext,
+ ) -> Result<(), RenderPassCompatibilityError> {
+ if self.attachments.colors != other.attachments.colors {
+ return Err(RenderPassCompatibilityError::IncompatibleColorAttachment(
+ self.attachments.colors.clone(),
+ other.attachments.colors.clone(),
+ ));
+ }
+ if self.attachments.depth_stencil != other.attachments.depth_stencil {
+ return Err(
+ RenderPassCompatibilityError::IncompatibleDepthStencilAttachment(
+ self.attachments.depth_stencil.clone(),
+ other.attachments.depth_stencil.clone(),
+ ),
+ );
+ }
+ if self.sample_count != other.sample_count {
+ return Err(RenderPassCompatibilityError::IncompatibleSampleCount(
+ self.sample_count,
+ other.sample_count,
+ ));
+ }
+ Ok(())
+ }
+}
+
+type BufferMapPendingCallback = (resource::BufferMapOperation, resource::BufferMapAsyncStatus);
+
+fn map_buffer<B: hal::Backend>(
+ raw: &B::Device,
+ buffer: &mut resource::Buffer<B>,
+ offset: hal::buffer::Offset,
+ size: BufferAddress,
+ kind: HostMap,
+) -> Result<ptr::NonNull<u8>, resource::BufferAccessError> {
+ let &mut (_, ref mut block) = buffer
+ .raw
+ .as_mut()
+ .ok_or(resource::BufferAccessError::Destroyed)?;
+ let ptr = block.map(raw, offset, size).map_err(DeviceError::from)?;
+
+ buffer.sync_mapped_writes = match kind {
+ HostMap::Read if !block.is_coherent() => {
+ block.invalidate_range(raw, offset, Some(size))?;
+ None
+ }
+ HostMap::Write if !block.is_coherent() => Some(hal::memory::Segment {
+ offset,
+ size: Some(size),
+ }),
+ _ => None,
+ };
+ Ok(ptr)
+}
+
+fn unmap_buffer<B: hal::Backend>(
+ raw: &B::Device,
+ buffer: &mut resource::Buffer<B>,
+) -> Result<(), resource::BufferAccessError> {
+ let &mut (_, ref mut block) = buffer
+ .raw
+ .as_mut()
+ .ok_or(resource::BufferAccessError::Destroyed)?;
+ if let Some(segment) = buffer.sync_mapped_writes.take() {
+ block.flush_range(raw, segment.offset, segment.size)?;
+ }
+ block.unmap(raw);
+ Ok(())
+}
+
+//Note: this logic is specifically moved out of `handle_mapping()` in order to
+// have nothing locked by the time we execute users callback code.
+fn fire_map_callbacks<I: IntoIterator<Item = BufferMapPendingCallback>>(callbacks: I) {
+ for (operation, status) in callbacks {
+ unsafe { (operation.callback)(status, operation.user_data) }
+ }
+}
+
+#[derive(Debug)]
+pub struct Device<B: hal::Backend> {
+ pub(crate) raw: B::Device,
+ pub(crate) adapter_id: Stored<id::AdapterId>,
+ pub(crate) queue_group: hal::queue::QueueGroup<B>,
+ pub(crate) cmd_allocator: command::CommandAllocator<B>,
+ mem_allocator: Mutex<alloc::MemoryAllocator<B>>,
+ desc_allocator: Mutex<descriptor::DescriptorAllocator<B>>,
+ //Note: The submission index here corresponds to the last submission that is done.
+ pub(crate) life_guard: LifeGuard,
+ pub(crate) active_submission_index: SubmissionIndex,
+ pub(crate) trackers: Mutex<TrackerSet>,
+ pub(crate) render_passes: Mutex<FastHashMap<RenderPassKey, B::RenderPass>>,
+ pub(crate) framebuffers: Mutex<FastHashMap<FramebufferKey, B::Framebuffer>>,
+ // Life tracker should be locked right after the device and before anything else.
+ life_tracker: Mutex<life::LifetimeTracker<B>>,
+ temp_suspected: life::SuspectedResources,
+ pub(crate) hal_limits: hal::Limits,
+ pub(crate) private_features: PrivateFeatures,
+ pub(crate) limits: wgt::Limits,
+ pub(crate) features: wgt::Features,
+ //TODO: move this behind another mutex. This would allow several methods to switch
+ // to borrow Device immutably, such as `write_buffer`, `write_texture`, and `buffer_unmap`.
+ pending_writes: queue::PendingWrites<B>,
+ #[cfg(feature = "trace")]
+ pub(crate) trace: Option<Mutex<Trace>>,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateDeviceError {
+ #[error("not enough memory left")]
+ OutOfMemory,
+}
+
+impl<B: GfxBackend> Device<B> {
+ pub(crate) fn new(
+ raw: B::Device,
+ adapter_id: Stored<id::AdapterId>,
+ queue_group: hal::queue::QueueGroup<B>,
+ mem_props: hal::adapter::MemoryProperties,
+ hal_limits: hal::Limits,
+ private_features: PrivateFeatures,
+ desc: &DeviceDescriptor,
+ trace_path: Option<&std::path::Path>,
+ ) -> Result<Self, CreateDeviceError> {
+ let cmd_allocator = command::CommandAllocator::new(queue_group.family, &raw)
+ .or(Err(CreateDeviceError::OutOfMemory))?;
+
+ let mem_allocator = alloc::MemoryAllocator::new(mem_props, hal_limits);
+ let descriptors = descriptor::DescriptorAllocator::new();
+ #[cfg(not(feature = "trace"))]
+ match trace_path {
+ Some(_) => tracing::error!("Feature 'trace' is not enabled"),
+ None => (),
+ }
+
+ Ok(Self {
+ raw,
+ adapter_id,
+ cmd_allocator,
+ mem_allocator: Mutex::new(mem_allocator),
+ desc_allocator: Mutex::new(descriptors),
+ queue_group,
+ life_guard: LifeGuard::new("<device>"),
+ active_submission_index: 0,
+ trackers: Mutex::new(TrackerSet::new(B::VARIANT)),
+ render_passes: Mutex::new(FastHashMap::default()),
+ framebuffers: Mutex::new(FastHashMap::default()),
+ life_tracker: Mutex::new(life::LifetimeTracker::new()),
+ temp_suspected: life::SuspectedResources::default(),
+ #[cfg(feature = "trace")]
+ trace: trace_path.and_then(|path| match Trace::new(path) {
+ Ok(mut trace) => {
+ trace.add(Action::Init {
+ desc: desc.clone(),
+ backend: B::VARIANT,
+ });
+ Some(Mutex::new(trace))
+ }
+ Err(e) => {
+ tracing::error!("Unable to start a trace in '{:?}': {:?}", path, e);
+ None
+ }
+ }),
+ hal_limits,
+ private_features,
+ limits: desc.limits.clone(),
+ features: desc.features.clone(),
+ pending_writes: queue::PendingWrites::new(),
+ })
+ }
+
+ pub(crate) fn last_completed_submission_index(&self) -> SubmissionIndex {
+ self.life_guard.submission_index.load(Ordering::Acquire)
+ }
+
+ fn lock_life_internal<'this, 'token: 'this>(
+ tracker: &'this Mutex<life::LifetimeTracker<B>>,
+ _token: &mut Token<'token, Self>,
+ ) -> MutexGuard<'this, life::LifetimeTracker<B>> {
+ tracker.lock()
+ }
+
+ pub(crate) fn lock_life<'this, 'token: 'this>(
+ &'this self,
+ //TODO: fix this - the token has to be borrowed for the lock
+ token: &mut Token<'token, Self>,
+ ) -> MutexGuard<'this, life::LifetimeTracker<B>> {
+ Self::lock_life_internal(&self.life_tracker, token)
+ }
+
+ fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
+ &'this self,
+ hub: &Hub<B, G>,
+ force_wait: bool,
+ token: &mut Token<'token, Self>,
+ ) -> Result<Vec<BufferMapPendingCallback>, WaitIdleError> {
+ let mut life_tracker = self.lock_life(token);
+
+ life_tracker.triage_suspected(
+ hub,
+ &self.trackers,
+ #[cfg(feature = "trace")]
+ self.trace.as_ref(),
+ token,
+ );
+ life_tracker.triage_mapped(hub, token);
+ life_tracker.triage_framebuffers(hub, &mut *self.framebuffers.lock(), token);
+ let last_done = life_tracker.triage_submissions(&self.raw, force_wait)?;
+ let callbacks = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token);
+ life_tracker.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator);
+
+ self.life_guard
+ .submission_index
+ .store(last_done, Ordering::Release);
+ self.cmd_allocator.maintain(&self.raw, last_done);
+ Ok(callbacks)
+ }
+
+ fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
+ &'this mut self,
+ hub: &Hub<B, G>,
+ trackers: &TrackerSet,
+ mut token: &mut Token<'token, Self>,
+ ) {
+ self.temp_suspected.clear();
+ // As the tracker is cleared/dropped, we need to consider all the resources
+ // that it references for destruction in the next GC pass.
+ {
+ let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
+ let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
+ let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
+ let (buffer_guard, mut token) = hub.buffers.read(&mut token);
+ let (texture_guard, mut token) = hub.textures.read(&mut token);
+ let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
+ let (sampler_guard, _) = hub.samplers.read(&mut token);
+
+ for id in trackers.buffers.used() {
+ if buffer_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.buffers.push(id);
+ }
+ }
+ for id in trackers.textures.used() {
+ if texture_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.textures.push(id);
+ }
+ }
+ for id in trackers.views.used() {
+ if texture_view_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.texture_views.push(id);
+ }
+ }
+ for id in trackers.bind_groups.used() {
+ if bind_group_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.bind_groups.push(id);
+ }
+ }
+ for id in trackers.samplers.used() {
+ if sampler_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.samplers.push(id);
+ }
+ }
+ for id in trackers.compute_pipes.used() {
+ if compute_pipe_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.compute_pipelines.push(id);
+ }
+ }
+ for id in trackers.render_pipes.used() {
+ if render_pipe_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.render_pipelines.push(id);
+ }
+ }
+ }
+
+ self.lock_life(&mut token)
+ .suspected_resources
+ .extend(&self.temp_suspected);
+ }
+
+ fn create_buffer(
+ &self,
+ self_id: id::DeviceId,
+ desc: &resource::BufferDescriptor,
+ transient: bool,
+ ) -> Result<resource::Buffer<B>, resource::CreateBufferError> {
+ debug_assert_eq!(self_id.backend(), B::VARIANT);
+ let (mut usage, _memory_properties) = conv::map_buffer_usage(desc.usage);
+ if desc.mapped_at_creation {
+ if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ return Err(resource::CreateBufferError::UnalignedSize);
+ }
+ if !desc.usage.contains(wgt::BufferUsage::MAP_WRITE) {
+ // we are going to be copying into it, internally
+ usage |= hal::buffer::Usage::TRANSFER_DST;
+ }
+ }
+
+ let mem_usage = {
+ use gpu_alloc::UsageFlags as Uf;
+ use wgt::BufferUsage as Bu;
+
+ let mut flags = Uf::empty();
+ let map_flags = desc.usage & (Bu::MAP_READ | Bu::MAP_WRITE);
+ if !(desc.usage - map_flags).is_empty() {
+ flags |= Uf::FAST_DEVICE_ACCESS;
+ }
+ if transient {
+ flags |= Uf::TRANSIENT;
+ }
+
+ if !map_flags.is_empty() {
+ let upload_usage = Bu::MAP_WRITE | Bu::COPY_SRC;
+ let download_usage = Bu::MAP_READ | Bu::COPY_DST;
+
+ flags |= Uf::HOST_ACCESS;
+ if desc.usage.contains(upload_usage) {
+ flags |= Uf::UPLOAD;
+ }
+ if desc.usage.contains(download_usage) {
+ flags |= Uf::DOWNLOAD;
+ }
+
+ let is_native_only = self
+ .features
+ .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS);
+ if !is_native_only
+ && !upload_usage.contains(desc.usage)
+ && !download_usage.contains(desc.usage)
+ {
+ return Err(resource::CreateBufferError::UsageMismatch(desc.usage));
+ }
+ }
+
+ flags
+ };
+
+ let mut buffer = unsafe { self.raw.create_buffer(desc.size.max(1), usage) }.map_err(
+ |err| match err {
+ hal::buffer::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create buffer: {}", err),
+ },
+ )?;
+ if let Some(ref label) = desc.label {
+ unsafe { self.raw.set_buffer_name(&mut buffer, label) };
+ }
+
+ let requirements = unsafe { self.raw.get_buffer_requirements(&buffer) };
+ let block = self
+ .mem_allocator
+ .lock()
+ .allocate(&self.raw, requirements, mem_usage)?;
+ block.bind_buffer(&self.raw, &mut buffer)?;
+
+ Ok(resource::Buffer {
+ raw: Some((buffer, block)),
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ usage: desc.usage,
+ size: desc.size,
+ full_range: (),
+ sync_mapped_writes: None,
+ map_state: resource::BufferMapState::Idle,
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ })
+ }
+
+ fn create_texture(
+ &self,
+ self_id: id::DeviceId,
+ desc: &resource::TextureDescriptor,
+ ) -> Result<resource::Texture<B>, resource::CreateTextureError> {
+ debug_assert_eq!(self_id.backend(), B::VARIANT);
+
+ let features = conv::texture_features(desc.format);
+ if !self.features.contains(features) {
+ return Err(resource::CreateTextureError::MissingFeature(
+ features,
+ desc.format,
+ ));
+ }
+
+ // Ensure `D24Plus` textures cannot be copied
+ match desc.format {
+ TextureFormat::Depth24Plus | TextureFormat::Depth24PlusStencil8 => {
+ if desc
+ .usage
+ .intersects(wgt::TextureUsage::COPY_SRC | wgt::TextureUsage::COPY_DST)
+ {
+ return Err(resource::CreateTextureError::CannotCopyD24Plus);
+ }
+ }
+ _ => {}
+ }
+
+ let kind = conv::map_texture_dimension_size(desc.dimension, desc.size, desc.sample_count)?;
+ let format = conv::map_texture_format(desc.format, self.private_features);
+ let aspects = format.surface_desc().aspects;
+ let usage = conv::map_texture_usage(desc.usage, aspects);
+
+ let mip_level_count = desc.mip_level_count;
+ if mip_level_count == 0
+ || mip_level_count > MAX_MIP_LEVELS
+ || mip_level_count > kind.compute_num_levels() as u32
+ {
+ return Err(resource::CreateTextureError::InvalidMipLevelCount(
+ mip_level_count,
+ ));
+ }
+ let mut view_capabilities = hal::image::ViewCapabilities::empty();
+
+ // 2D textures with array layer counts that are multiples of 6 could be cubemaps
+ // Following gpuweb/gpuweb#68 always add the hint in that case
+ if desc.dimension == TextureDimension::D2 && desc.size.depth % 6 == 0 {
+ view_capabilities |= hal::image::ViewCapabilities::KIND_CUBE;
+ };
+
+ // TODO: 2D arrays, cubemap arrays
+
+ let mut image = unsafe {
+ let mut image = self
+ .raw
+ .create_image(
+ kind,
+ desc.mip_level_count as hal::image::Level,
+ format,
+ hal::image::Tiling::Optimal,
+ usage,
+ view_capabilities,
+ )
+ .map_err(|err| match err {
+ hal::image::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create texture: {}", err),
+ })?;
+ if let Some(ref label) = desc.label {
+ self.raw.set_image_name(&mut image, label);
+ }
+ image
+ };
+
+ let requirements = unsafe { self.raw.get_image_requirements(&image) };
+ let block = self.mem_allocator.lock().allocate(
+ &self.raw,
+ requirements,
+ gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS,
+ )?;
+ block.bind_image(&self.raw, &mut image)?;
+
+ Ok(resource::Texture {
+ raw: Some((image, block)),
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ usage: desc.usage,
+ aspects,
+ dimension: desc.dimension,
+ kind,
+ format: desc.format,
+ full_range: TextureSelector {
+ levels: 0..desc.mip_level_count as hal::image::Level,
+ layers: 0..kind.num_layers(),
+ },
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ })
+ }
+
+ fn create_texture_view(
+ &self,
+ texture: &resource::Texture<B>,
+ texture_id: id::TextureId,
+ desc: &resource::TextureViewDescriptor,
+ ) -> Result<resource::TextureView<B>, resource::CreateTextureViewError> {
+ let &(ref texture_raw, _) = texture
+ .raw
+ .as_ref()
+ .ok_or(resource::CreateTextureViewError::InvalidTexture)?;
+
+ let view_dim =
+ match desc.dimension {
+ Some(dim) => {
+ use hal::image::Kind;
+
+ let required_tex_dim = dim.compatible_texture_dimension();
+
+ if required_tex_dim != texture.dimension {
+ return Err(
+ resource::CreateTextureViewError::InvalidTextureViewDimension {
+ view: dim,
+ image: texture.dimension,
+ },
+ );
+ }
+
+ if let Kind::D2(_, _, depth, _) = texture.kind {
+ match dim {
+ TextureViewDimension::Cube if depth != 6 => {
+ return Err(
+ resource::CreateTextureViewError::InvalidCubemapTextureDepth {
+ depth,
+ },
+ )
+ }
+ TextureViewDimension::CubeArray if depth % 6 != 0 => return Err(
+ resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth {
+ depth,
+ },
+ ),
+ _ => {}
+ }
+ }
+
+ dim
+ }
+ None => match texture.kind {
+ hal::image::Kind::D1(..) => wgt::TextureViewDimension::D1,
+ hal::image::Kind::D2(_, _, depth, _)
+ if depth > 1 && desc.array_layer_count.is_none() =>
+ {
+ wgt::TextureViewDimension::D2Array
+ }
+ hal::image::Kind::D2(..) => wgt::TextureViewDimension::D2,
+ hal::image::Kind::D3(..) => wgt::TextureViewDimension::D3,
+ },
+ };
+
+ let required_level_count =
+ desc.base_mip_level + desc.level_count.map_or(1, |count| count.get());
+ let required_layer_count =
+ desc.base_array_layer + desc.array_layer_count.map_or(1, |count| count.get());
+ let level_end = texture.full_range.levels.end;
+ let layer_end = texture.full_range.layers.end;
+ if required_level_count > level_end as u32 {
+ return Err(resource::CreateTextureViewError::TooManyMipLevels {
+ requested: required_level_count,
+ total: level_end,
+ });
+ }
+ if required_layer_count > layer_end as u32 {
+ return Err(resource::CreateTextureViewError::TooManyArrayLayers {
+ requested: required_layer_count,
+ total: layer_end,
+ });
+ };
+
+ let aspects = match desc.aspect {
+ wgt::TextureAspect::All => texture.aspects,
+ wgt::TextureAspect::DepthOnly => hal::format::Aspects::DEPTH,
+ wgt::TextureAspect::StencilOnly => hal::format::Aspects::STENCIL,
+ };
+ if !texture.aspects.contains(aspects) {
+ return Err(resource::CreateTextureViewError::InvalidAspect {
+ requested: aspects,
+ total: texture.aspects,
+ });
+ }
+
+ let end_level = desc
+ .level_count
+ .map_or(level_end, |_| required_level_count as u8);
+ let end_layer = desc
+ .array_layer_count
+ .map_or(layer_end, |_| required_layer_count as u16);
+ let selector = TextureSelector {
+ levels: desc.base_mip_level as u8..end_level,
+ layers: desc.base_array_layer as u16..end_layer,
+ };
+
+ let view_layer_count = (selector.layers.end - selector.layers.start) as u32;
+ let layer_check_ok = match view_dim {
+ wgt::TextureViewDimension::D1
+ | wgt::TextureViewDimension::D2
+ | wgt::TextureViewDimension::D3 => view_layer_count == 1,
+ wgt::TextureViewDimension::D2Array => true,
+ wgt::TextureViewDimension::Cube => view_layer_count == 6,
+ wgt::TextureViewDimension::CubeArray => view_layer_count % 6 == 0,
+ };
+ if !layer_check_ok {
+ return Err(resource::CreateTextureViewError::InvalidArrayLayerCount {
+ requested: view_layer_count,
+ dim: view_dim,
+ });
+ }
+
+ let format = desc.format.unwrap_or(texture.format);
+ let range = hal::image::SubresourceRange {
+ aspects,
+ level_start: desc.base_mip_level as _,
+ level_count: desc.level_count.map(|v| v.get() as _),
+ layer_start: desc.base_array_layer as _,
+ layer_count: desc.array_layer_count.map(|v| v.get() as _),
+ };
+
+ let raw = unsafe {
+ self.raw
+ .create_image_view(
+ texture_raw,
+ conv::map_texture_view_dimension(view_dim),
+ conv::map_texture_format(format, self.private_features),
+ hal::format::Swizzle::NO,
+ range.clone(),
+ )
+ .or(Err(resource::CreateTextureViewError::OutOfMemory))?
+ };
+
+ Ok(resource::TextureView {
+ inner: resource::TextureViewInner::Native {
+ raw,
+ source_id: Stored {
+ value: id::Valid(texture_id),
+ ref_count: texture.life_guard.add_ref(),
+ },
+ },
+ aspects,
+ format: texture.format,
+ extent: texture.kind.extent().at_level(desc.base_mip_level as _),
+ samples: texture.kind.num_samples(),
+ selector,
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ })
+ }
+
+ fn create_sampler(
+ &self,
+ self_id: id::DeviceId,
+ desc: &resource::SamplerDescriptor,
+ ) -> Result<resource::Sampler<B>, resource::CreateSamplerError> {
+ let clamp_to_border_enabled = self
+ .features
+ .contains(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER);
+ let clamp_to_border_found = desc
+ .address_modes
+ .iter()
+ .any(|am| am == &wgt::AddressMode::ClampToBorder);
+ if clamp_to_border_found && !clamp_to_border_enabled {
+ return Err(resource::CreateSamplerError::MissingFeature(
+ wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER,
+ ));
+ }
+
+ let actual_clamp = if let Some(clamp) = desc.anisotropy_clamp {
+ let clamp = clamp.get();
+ let valid_clamp = clamp <= MAX_ANISOTROPY && conv::is_power_of_two(clamp as u32);
+ if !valid_clamp {
+ return Err(resource::CreateSamplerError::InvalidClamp(clamp));
+ }
+ if self.private_features.anisotropic_filtering {
+ Some(clamp)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ let border = match desc.border_color {
+ None | Some(wgt::SamplerBorderColor::TransparentBlack) => {
+ hal::image::BorderColor::TransparentBlack
+ }
+ Some(wgt::SamplerBorderColor::OpaqueBlack) => hal::image::BorderColor::OpaqueBlack,
+ Some(wgt::SamplerBorderColor::OpaqueWhite) => hal::image::BorderColor::OpaqueWhite,
+ };
+
+ let info = hal::image::SamplerDesc {
+ min_filter: conv::map_filter(desc.min_filter),
+ mag_filter: conv::map_filter(desc.mag_filter),
+ mip_filter: conv::map_filter(desc.mipmap_filter),
+ wrap_mode: (
+ conv::map_wrap(desc.address_modes[0]),
+ conv::map_wrap(desc.address_modes[1]),
+ conv::map_wrap(desc.address_modes[2]),
+ ),
+ lod_bias: hal::image::Lod(0.0),
+ lod_range: hal::image::Lod(desc.lod_min_clamp)..hal::image::Lod(desc.lod_max_clamp),
+ comparison: desc.compare.map(conv::map_compare_function),
+ border,
+ normalized: true,
+ anisotropy_clamp: actual_clamp,
+ };
+
+ let raw = unsafe {
+ self.raw.create_sampler(&info).map_err(|err| match err {
+ hal::device::AllocationError::OutOfMemory(_) => {
+ resource::CreateSamplerError::Device(DeviceError::OutOfMemory)
+ }
+ hal::device::AllocationError::TooManyObjects => {
+ resource::CreateSamplerError::TooManyObjects
+ }
+ })?
+ };
+ Ok(resource::Sampler {
+ raw,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ comparison: info.comparison.is_some(),
+ })
+ }
+
+ fn create_shader_module<'a>(
+ &self,
+ self_id: id::DeviceId,
+ desc: &'a pipeline::ShaderModuleDescriptor<'a>,
+ ) -> Result<(pipeline::ShaderModule<B>, Cow<'a, [u32]>), pipeline::CreateShaderModuleError>
+ {
+ let spv_flags = if cfg!(debug_assertions) {
+ naga::back::spv::WriterFlags::DEBUG
+ } else {
+ naga::back::spv::WriterFlags::empty()
+ };
+
+ let (spv, naga) = match desc.source {
+ pipeline::ShaderModuleSource::SpirV(ref spv) => {
+ let module = if self.private_features.shader_validation {
+ // Parse the given shader code and store its representation.
+ let spv_iter = spv.iter().cloned();
+ naga::front::spv::Parser::new(spv_iter, &Default::default())
+ .parse()
+ .map_err(|err| {
+ // TODO: eventually, when Naga gets support for all features,
+ // we want to convert these to a hard error,
+ tracing::warn!("Failed to parse shader SPIR-V code: {:?}", err);
+ tracing::warn!("Shader module will not be validated");
+ })
+ .ok()
+ } else {
+ None
+ };
+ (Cow::Borrowed(&**spv), module)
+ }
+ pipeline::ShaderModuleSource::Wgsl(ref code) => {
+ // TODO: refactor the corresponding Naga error to be owned, and then
+ // display it instead of unwrapping
+ let module = naga::front::wgsl::parse_str(code).unwrap();
+ let spv = naga::back::spv::Writer::new(&module.header, spv_flags).write(&module);
+ (
+ Cow::Owned(spv),
+ if self.private_features.shader_validation {
+ Some(module)
+ } else {
+ None
+ },
+ )
+ } /*
+ pipeline::ShaderModuleSource::Naga(module) => {
+ let spv = naga::back::spv::Writer::new(&module.header, spv_flags).write(&module);
+ (
+ Cow::Owned(spv),
+ if device.private_features.shader_validation {
+ Some(module)
+ } else {
+ None
+ },
+ )
+ }*/
+ };
+
+ if let Some(ref module) = naga {
+ naga::proc::Validator::new().validate(module)?;
+ }
+
+ let raw = unsafe {
+ self.raw
+ .create_shader_module(&spv)
+ .map_err(|err| match err {
+ hal::device::ShaderError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create shader module: {}", err),
+ })?
+ };
+ let shader = pipeline::ShaderModule {
+ raw,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ module: naga,
+ #[cfg(debug_assertions)]
+ label: desc.label.to_string_or_default(),
+ };
+ Ok((shader, spv))
+ }
+
+ /// Create a compatible render pass with a given key.
+ ///
+ /// This functions doesn't consider the following aspects for compatibility:
+ /// - image layouts
+ /// - resolve attachments
+ fn create_compatible_render_pass(
+ &self,
+ key: &RenderPassKey,
+ ) -> Result<B::RenderPass, hal::device::OutOfMemory> {
+ let mut color_ids = [(0, hal::image::Layout::ColorAttachmentOptimal); MAX_COLOR_TARGETS];
+ for i in 0..key.colors.len() {
+ color_ids[i].0 = i;
+ }
+ let depth_id = key.depth_stencil.as_ref().map(|_| {
+ (
+ key.colors.len(),
+ hal::image::Layout::DepthStencilAttachmentOptimal,
+ )
+ });
+
+ let subpass = hal::pass::SubpassDesc {
+ colors: &color_ids[..key.colors.len()],
+ depth_stencil: depth_id.as_ref(),
+ inputs: &[],
+ resolves: &[],
+ preserves: &[],
+ };
+ let all = key
+ .all()
+ .map(|(at, _)| at)
+ .collect::<AttachmentDataVec<_>>();
+
+ unsafe { self.raw.create_render_pass(all, iter::once(subpass), &[]) }
+ }
+
+ fn deduplicate_bind_group_layout(
+ self_id: id::DeviceId,
+ entry_map: &binding_model::BindEntryMap,
+ guard: &Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
+ ) -> Option<id::BindGroupLayoutId> {
+ guard
+ .iter(self_id.backend())
+ .find(|(_, bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map)
+ .map(|(id, value)| {
+ value.multi_ref_count.inc();
+ id
+ })
+ }
+
+ fn get_introspection_bind_group_layouts<'a>(
+ pipeline_layout: &binding_model::PipelineLayout<B>,
+ bgl_guard: &'a Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
+ ) -> validation::IntrospectionBindGroupLayouts<'a> {
+ validation::IntrospectionBindGroupLayouts::Given(
+ pipeline_layout
+ .bind_group_layout_ids
+ .iter()
+ .map(|&id| &bgl_guard[id].entries)
+ .collect(),
+ )
+ }
+
+ fn create_bind_group_layout(
+ &self,
+ self_id: id::DeviceId,
+ label: Option<&str>,
+ entry_map: binding_model::BindEntryMap,
+ ) -> Result<binding_model::BindGroupLayout<B>, binding_model::CreateBindGroupLayoutError> {
+ let mut desc_count = descriptor::DescriptorTotalCount::default();
+ for binding in entry_map.values() {
+ use wgt::BindingType as Bt;
+ let (counter, array_feature) = match binding.ty {
+ Bt::Buffer {
+ ty: wgt::BufferBindingType::Uniform,
+ has_dynamic_offset: false,
+ min_binding_size: _,
+ } => (&mut desc_count.uniform_buffer, None),
+ Bt::Buffer {
+ ty: wgt::BufferBindingType::Uniform,
+ has_dynamic_offset: true,
+ min_binding_size: _,
+ } => (&mut desc_count.uniform_buffer_dynamic, None),
+ Bt::Buffer {
+ ty: wgt::BufferBindingType::Storage { .. },
+ has_dynamic_offset: false,
+ min_binding_size: _,
+ } => (&mut desc_count.storage_buffer, None),
+ Bt::Buffer {
+ ty: wgt::BufferBindingType::Storage { .. },
+ has_dynamic_offset: true,
+ min_binding_size: _,
+ } => (&mut desc_count.storage_buffer_dynamic, None),
+ Bt::Sampler { .. } => (&mut desc_count.sampler, None),
+ Bt::Texture { .. } => (
+ &mut desc_count.sampled_image,
+ Some(wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY),
+ ),
+ Bt::StorageTexture { .. } => (&mut desc_count.storage_image, None),
+ };
+ *counter += match binding.count {
+ // Validate the count parameter
+ Some(count) => {
+ let feature = array_feature
+ .ok_or(binding_model::CreateBindGroupLayoutError::ArrayUnsupported)?;
+ if !self.features.contains(feature) {
+ return Err(binding_model::CreateBindGroupLayoutError::MissingFeature(
+ feature,
+ ));
+ }
+ count.get()
+ }
+ None => 1,
+ };
+ }
+
+ let raw_bindings = entry_map
+ .values()
+ .map(|entry| hal::pso::DescriptorSetLayoutBinding {
+ binding: entry.binding,
+ ty: conv::map_binding_type(entry),
+ count: entry
+ .count
+ .map_or(1, |v| v.get() as hal::pso::DescriptorArrayIndex), //TODO: consolidate
+ stage_flags: conv::map_shader_stage_flags(entry.visibility),
+ immutable_samplers: false, // TODO
+ });
+ let raw = unsafe {
+ let mut raw_layout = self
+ .raw
+ .create_descriptor_set_layout(raw_bindings, &[])
+ .or(Err(DeviceError::OutOfMemory))?;
+ if let Some(label) = label {
+ self.raw
+ .set_descriptor_set_layout_name(&mut raw_layout, label);
+ }
+ raw_layout
+ };
+
+ let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
+ for entry in entry_map.values() {
+ count_validator.add_binding(entry);
+ }
+ // If a single bind group layout violates limits, the pipeline layout is definitely
+ // going to violate limits too, lets catch it now.
+ count_validator
+ .validate(&self.limits)
+ .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?;
+
+ Ok(binding_model::BindGroupLayout {
+ raw,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ multi_ref_count: MultiRefCount::new(),
+ desc_count,
+ dynamic_count: entry_map
+ .values()
+ .filter(|b| b.ty.has_dynamic_offset())
+ .count(),
+ count_validator,
+ entries: entry_map,
+ #[cfg(debug_assertions)]
+ label: label.unwrap_or("").to_string(),
+ })
+ }
+
+ fn create_bind_group<G: GlobalIdentityHandlerFactory>(
+ &self,
+ self_id: id::DeviceId,
+ layout: &binding_model::BindGroupLayout<B>,
+ desc: &binding_model::BindGroupDescriptor,
+ hub: &Hub<B, G>,
+ token: &mut Token<binding_model::BindGroupLayout<B>>,
+ ) -> Result<binding_model::BindGroup<B>, binding_model::CreateBindGroupError> {
+ use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error};
+ {
+ // Check that the number of entries in the descriptor matches
+ // the number of entries in the layout.
+ let actual = desc.entries.len();
+ let expected = layout.entries.len();
+ if actual != expected {
+ return Err(Error::BindingsNumMismatch { expected, actual });
+ }
+ }
+
+ // TODO: arrayvec/smallvec
+ // Record binding info for dynamic offset validation
+ let mut dynamic_binding_info = Vec::new();
+ // fill out the descriptors
+ let mut used = TrackerSet::new(B::VARIANT);
+
+ let (buffer_guard, mut token) = hub.buffers.read(token);
+ let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token
+ let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
+ let (sampler_guard, _) = hub.samplers.read(&mut token);
+
+ // `BTreeMap` has ordered bindings as keys, which allows us to coalesce
+ // the descriptor writes into a single transaction.
+ let mut write_map = BTreeMap::new();
+ for entry in desc.entries.iter() {
+ let binding = entry.binding;
+ // Find the corresponding declaration in the layout
+ let decl = layout
+ .entries
+ .get(&binding)
+ .ok_or(Error::MissingBindingDeclaration(binding))?;
+ let descriptors: SmallVec<[_; 1]> = match entry.resource {
+ Br::Buffer(ref bb) => {
+ let (binding_ty, dynamic, min_size) = match decl.ty {
+ wgt::BindingType::Buffer {
+ ty,
+ has_dynamic_offset,
+ min_binding_size,
+ } => (ty, has_dynamic_offset, min_binding_size),
+ _ => {
+ return Err(Error::WrongBindingType {
+ binding,
+ actual: decl.ty.clone(),
+ expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer",
+ })
+ }
+ };
+ let (pub_usage, internal_use) = match binding_ty {
+ wgt::BufferBindingType::Uniform => {
+ (wgt::BufferUsage::UNIFORM, resource::BufferUse::UNIFORM)
+ }
+ wgt::BufferBindingType::Storage { read_only } => (
+ wgt::BufferUsage::STORAGE,
+ if read_only {
+ resource::BufferUse::STORAGE_LOAD
+ } else {
+ resource::BufferUse::STORAGE_STORE
+ },
+ ),
+ };
+
+ if bb.offset % wgt::BIND_BUFFER_ALIGNMENT != 0 {
+ return Err(Error::UnalignedBufferOffset(bb.offset));
+ }
+
+ let buffer = used
+ .buffers
+ .use_extend(&*buffer_guard, bb.buffer_id, (), internal_use)
+ .map_err(|_| Error::InvalidBuffer(bb.buffer_id))?;
+ check_buffer_usage(buffer.usage, pub_usage)?;
+ let &(ref buffer_raw, _) = buffer
+ .raw
+ .as_ref()
+ .ok_or(Error::InvalidBuffer(bb.buffer_id))?;
+
+ let (bind_size, bind_end) = match bb.size {
+ Some(size) => {
+ let end = bb.offset + size.get();
+ if end > buffer.size {
+ return Err(Error::BindingRangeTooLarge {
+ range: bb.offset..end,
+ size: buffer.size,
+ });
+ }
+ (size.get(), end)
+ }
+ None => (buffer.size - bb.offset, buffer.size),
+ };
+
+ if binding_ty == wgt::BufferBindingType::Uniform
+ && (self.limits.max_uniform_buffer_binding_size as u64) < bind_size
+ {
+ return Err(Error::UniformBufferRangeTooLarge);
+ }
+
+ // Record binding info for validating dynamic offsets
+ if dynamic {
+ dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData {
+ maximum_dynamic_offset: buffer.size - bind_end,
+ });
+ }
+
+ if let Some(non_zero) = min_size {
+ let min_size = non_zero.get();
+ if min_size > bind_size {
+ return Err(Error::BindingSizeTooSmall {
+ actual: bind_size,
+ min: min_size,
+ });
+ }
+ }
+
+ let sub_range = hal::buffer::SubRange {
+ offset: bb.offset,
+ size: Some(bind_size),
+ };
+ SmallVec::from([hal::pso::Descriptor::Buffer(buffer_raw, sub_range)])
+ }
+ Br::Sampler(id) => {
+ match decl.ty {
+ wgt::BindingType::Sampler {
+ filtering: _,
+ comparison,
+ } => {
+ let sampler = used
+ .samplers
+ .use_extend(&*sampler_guard, id, (), ())
+ .map_err(|_| Error::InvalidSampler(id))?;
+
+ // Check the actual sampler to also (not) be a comparison sampler
+ if sampler.comparison != comparison {
+ return Err(Error::WrongSamplerComparison);
+ }
+
+ SmallVec::from([hal::pso::Descriptor::Sampler(&sampler.raw)])
+ }
+ _ => {
+ return Err(Error::WrongBindingType {
+ binding,
+ actual: decl.ty.clone(),
+ expected: "Sampler",
+ })
+ }
+ }
+ }
+ Br::TextureView(id) => {
+ let view = used
+ .views
+ .use_extend(&*texture_view_guard, id, (), ())
+ .map_err(|_| Error::InvalidTextureView(id))?;
+ let (pub_usage, internal_use) = match decl.ty {
+ wgt::BindingType::Texture { .. } => {
+ (wgt::TextureUsage::SAMPLED, resource::TextureUse::SAMPLED)
+ }
+ wgt::BindingType::StorageTexture { access, .. } => (
+ wgt::TextureUsage::STORAGE,
+ match access {
+ wgt::StorageTextureAccess::ReadOnly => {
+ resource::TextureUse::STORAGE_LOAD
+ }
+ wgt::StorageTextureAccess::WriteOnly => {
+ resource::TextureUse::STORAGE_STORE
+ }
+ },
+ ),
+ _ => return Err(Error::WrongBindingType {
+ binding,
+ actual: decl.ty.clone(),
+ expected:
+ "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
+ }),
+ };
+ if view
+ .aspects
+ .contains(hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL)
+ {
+ return Err(Error::DepthStencilAspect);
+ }
+ match view.inner {
+ resource::TextureViewInner::Native {
+ ref raw,
+ ref source_id,
+ } => {
+ // Careful here: the texture may no longer have its own ref count,
+ // if it was deleted by the user.
+ let texture = &texture_guard[source_id.value];
+ used.textures
+ .change_extend(
+ source_id.value,
+ &source_id.ref_count,
+ view.selector.clone(),
+ internal_use,
+ )
+ .unwrap();
+ check_texture_usage(texture.usage, pub_usage)?;
+ let image_layout =
+ conv::map_texture_state(internal_use, view.aspects).1;
+ SmallVec::from([hal::pso::Descriptor::Image(raw, image_layout)])
+ }
+ resource::TextureViewInner::SwapChain { .. } => {
+ return Err(Error::SwapChainImage);
+ }
+ }
+ }
+ Br::TextureViewArray(ref bindings_array) => {
+ let required_feats = wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY;
+ if !self.features.contains(required_feats) {
+ return Err(Error::MissingFeatures(required_feats));
+ }
+
+ if let Some(count) = decl.count {
+ let count = count.get() as usize;
+ let num_bindings = bindings_array.len();
+ if count != num_bindings {
+ return Err(Error::BindingArrayLengthMismatch {
+ actual: num_bindings,
+ expected: count,
+ });
+ }
+ } else {
+ return Err(Error::SingleBindingExpected);
+ }
+
+ let (pub_usage, internal_use) = match decl.ty {
+ wgt::BindingType::Texture { .. } => {
+ (wgt::TextureUsage::SAMPLED, resource::TextureUse::SAMPLED)
+ }
+ _ => {
+ return Err(Error::WrongBindingType {
+ binding,
+ actual: decl.ty.clone(),
+ expected: "SampledTextureArray",
+ })
+ }
+ };
+ bindings_array
+ .iter()
+ .map(|&id| {
+ let view = used
+ .views
+ .use_extend(&*texture_view_guard, id, (), ())
+ .map_err(|_| Error::InvalidTextureView(id))?;
+ match view.inner {
+ resource::TextureViewInner::Native {
+ ref raw,
+ ref source_id,
+ } => {
+ // Careful here: the texture may no longer have its own ref count,
+ // if it was deleted by the user.
+ let texture = &texture_guard[source_id.value];
+ used.textures
+ .change_extend(
+ source_id.value,
+ &source_id.ref_count,
+ view.selector.clone(),
+ internal_use,
+ )
+ .unwrap();
+ check_texture_usage(texture.usage, pub_usage)?;
+ let image_layout =
+ conv::map_texture_state(internal_use, view.aspects).1;
+ Ok(hal::pso::Descriptor::Image(raw, image_layout))
+ }
+ resource::TextureViewInner::SwapChain { .. } => {
+ Err(Error::SwapChainImage)
+ }
+ }
+ })
+ .collect::<Result<_, _>>()?
+ }
+ };
+ if write_map.insert(binding, descriptors).is_some() {
+ return Err(Error::DuplicateBinding(binding));
+ }
+ }
+
+ let mut desc_sets =
+ self.desc_allocator
+ .lock()
+ .allocate(&self.raw, &layout.raw, &layout.desc_count, 1)?;
+ let mut desc_set = desc_sets.pop().unwrap();
+
+ // Set the descriptor set's label for easier debugging.
+ if let Some(label) = desc.label.as_ref() {
+ unsafe {
+ self.raw.set_descriptor_set_name(desc_set.raw_mut(), &label);
+ }
+ }
+
+ if let Some(start_binding) = write_map.keys().next().cloned() {
+ let descriptors = write_map
+ .into_iter()
+ .flat_map(|(_, list)| list)
+ .collect::<Vec<_>>();
+ let write = hal::pso::DescriptorSetWrite {
+ set: desc_set.raw(),
+ binding: start_binding,
+ array_offset: 0,
+ descriptors,
+ };
+ unsafe {
+ self.raw.write_descriptor_sets(iter::once(write));
+ }
+ }
+
+ Ok(binding_model::BindGroup {
+ raw: desc_set,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ layout_id: id::Valid(desc.layout),
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ used,
+ dynamic_binding_info,
+ })
+ }
+
+ fn create_pipeline_layout(
+ &self,
+ self_id: id::DeviceId,
+ desc: &binding_model::PipelineLayoutDescriptor,
+ bgl_guard: &Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
+ ) -> Result<binding_model::PipelineLayout<B>, binding_model::CreatePipelineLayoutError> {
+ use crate::binding_model::CreatePipelineLayoutError as Error;
+
+ let bind_group_layouts_count = desc.bind_group_layouts.len();
+ let device_max_bind_groups = self.limits.max_bind_groups as usize;
+ if bind_group_layouts_count > device_max_bind_groups {
+ return Err(Error::TooManyGroups {
+ actual: bind_group_layouts_count,
+ max: device_max_bind_groups,
+ });
+ }
+
+ if !desc.push_constant_ranges.is_empty()
+ && !self.features.contains(wgt::Features::PUSH_CONSTANTS)
+ {
+ return Err(Error::MissingFeature(wgt::Features::PUSH_CONSTANTS));
+ }
+ let mut used_stages = wgt::ShaderStage::empty();
+ for (index, pc) in desc.push_constant_ranges.iter().enumerate() {
+ if pc.stages.intersects(used_stages) {
+ return Err(Error::MoreThanOnePushConstantRangePerStage {
+ index,
+ provided: pc.stages,
+ intersected: pc.stages & used_stages,
+ });
+ }
+ used_stages |= pc.stages;
+
+ let device_max_pc_size = self.limits.max_push_constant_size;
+ if device_max_pc_size < pc.range.end {
+ return Err(Error::PushConstantRangeTooLarge {
+ index,
+ range: pc.range.clone(),
+ max: device_max_pc_size,
+ });
+ }
+
+ if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
+ return Err(Error::MisalignedPushConstantRange {
+ index,
+ bound: pc.range.start,
+ });
+ }
+ if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
+ return Err(Error::MisalignedPushConstantRange {
+ index,
+ bound: pc.range.end,
+ });
+ }
+ }
+
+ let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
+
+ // validate total resource counts
+ for &id in desc.bind_group_layouts.iter() {
+ let bind_group_layout = bgl_guard
+ .get(id)
+ .map_err(|_| Error::InvalidBindGroupLayout(id))?;
+ count_validator.merge(&bind_group_layout.count_validator);
+ }
+ count_validator
+ .validate(&self.limits)
+ .map_err(Error::TooManyBindings)?;
+
+ let descriptor_set_layouts = desc
+ .bind_group_layouts
+ .iter()
+ .map(|&id| &bgl_guard.get(id).unwrap().raw);
+ let push_constants = desc
+ .push_constant_ranges
+ .iter()
+ .map(|pc| (conv::map_shader_stage_flags(pc.stages), pc.range.clone()));
+
+ let raw = unsafe {
+ let raw_layout = self
+ .raw
+ .create_pipeline_layout(descriptor_set_layouts, push_constants)
+ .or(Err(DeviceError::OutOfMemory))?;
+ if let Some(_) = desc.label {
+ //TODO-0.6: needs gfx changes published
+ //self.raw.set_pipeline_layout_name(&mut raw_layout, label);
+ }
+ raw_layout
+ };
+
+ Ok(binding_model::PipelineLayout {
+ raw,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ bind_group_layout_ids: desc
+ .bind_group_layouts
+ .iter()
+ .map(|&id| {
+ bgl_guard.get(id).unwrap().multi_ref_count.inc();
+ id::Valid(id)
+ })
+ .collect(),
+ push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
+ })
+ }
+
+ //TODO: refactor this. It's the only method of `Device` that registers new objects
+ // (the pipeline layout).
+ fn derive_pipeline_layout<G: GlobalIdentityHandlerFactory>(
+ &self,
+ self_id: id::DeviceId,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ mut derived_group_layouts: ArrayVec<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>,
+ bgl_guard: &mut Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
+ pipeline_layout_guard: &mut Storage<binding_model::PipelineLayout<B>, id::PipelineLayoutId>,
+ hub: &Hub<B, G>,
+ ) -> Result<
+ (id::PipelineLayoutId, pipeline::ImplicitBindGroupCount),
+ pipeline::ImplicitLayoutError,
+ > {
+ let derived_bind_group_count =
+ derived_group_layouts.len() as pipeline::ImplicitBindGroupCount;
+
+ while derived_group_layouts
+ .last()
+ .map_or(false, |map| map.is_empty())
+ {
+ derived_group_layouts.pop();
+ }
+ let ids = implicit_pipeline_ids
+ .as_ref()
+ .ok_or(pipeline::ImplicitLayoutError::MissingIds(0))?;
+ if ids.group_ids.len() < derived_group_layouts.len() {
+ tracing::error!(
+ "Not enough bind group IDs ({}) specified for the implicit layout ({})",
+ ids.group_ids.len(),
+ derived_group_layouts.len()
+ );
+ return Err(pipeline::ImplicitLayoutError::MissingIds(
+ derived_bind_group_count,
+ ));
+ }
+
+ let mut derived_group_layout_ids =
+ ArrayVec::<[id::BindGroupLayoutId; MAX_BIND_GROUPS]>::new();
+ for (bgl_id, map) in ids.group_ids.iter().zip(derived_group_layouts) {
+ let processed_id = match Device::deduplicate_bind_group_layout(self_id, &map, bgl_guard)
+ {
+ Some(dedup_id) => dedup_id,
+ None => {
+ #[cfg(feature = "trace")]
+ let bgl_desc = binding_model::BindGroupLayoutDescriptor {
+ label: None,
+ entries: if self.trace.is_some() {
+ Cow::Owned(map.values().cloned().collect())
+ } else {
+ Cow::Borrowed(&[])
+ },
+ };
+ let bgl = self.create_bind_group_layout(self_id, None, map)?;
+ let out_id = hub.bind_group_layouts.register_identity_locked(
+ bgl_id.clone(),
+ bgl,
+ bgl_guard,
+ );
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = self.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateBindGroupLayout(out_id.0, bgl_desc));
+ }
+ out_id.0
+ }
+ };
+ derived_group_layout_ids.push(processed_id);
+ }
+
+ let layout_desc = binding_model::PipelineLayoutDescriptor {
+ label: None,
+ bind_group_layouts: Cow::Borrowed(&derived_group_layout_ids),
+ push_constant_ranges: Cow::Borrowed(&[]), //TODO?
+ };
+ let layout = self.create_pipeline_layout(self_id, &layout_desc, bgl_guard)?;
+ let layout_id = hub.pipeline_layouts.register_identity_locked(
+ ids.root_id.clone(),
+ layout,
+ pipeline_layout_guard,
+ );
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = self.trace {
+ trace.lock().add(trace::Action::CreatePipelineLayout(
+ layout_id.0,
+ layout_desc,
+ ));
+ }
+ Ok((layout_id.0, derived_bind_group_count))
+ }
+
+ fn create_compute_pipeline<G: GlobalIdentityHandlerFactory>(
+ &self,
+ self_id: id::DeviceId,
+ desc: &pipeline::ComputePipelineDescriptor,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ hub: &Hub<B, G>,
+ token: &mut Token<Self>,
+ ) -> Result<
+ (
+ pipeline::ComputePipeline<B>,
+ pipeline::ImplicitBindGroupCount,
+ id::PipelineLayoutId,
+ ),
+ pipeline::CreateComputePipelineError,
+ > {
+ //TODO: only lock mutable if the layout is derived
+ let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
+ let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
+
+ let mut derived_group_layouts =
+ ArrayVec::<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>::new();
+
+ let interface = validation::StageInterface::default();
+ let pipeline_stage = &desc.compute_stage;
+ let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
+
+ let entry_point_name = &pipeline_stage.entry_point;
+ let shader_module = shader_module_guard
+ .get(pipeline_stage.module)
+ .map_err(|_| {
+ pipeline::CreateComputePipelineError::Stage(validation::StageError::InvalidModule)
+ })?;
+
+ let flag = wgt::ShaderStage::COMPUTE;
+ if let Some(ref module) = shader_module.module {
+ let group_layouts = match desc.layout {
+ Some(pipeline_layout_id) => Device::get_introspection_bind_group_layouts(
+ pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?,
+ &*bgl_guard,
+ ),
+ None => {
+ for _ in 0..self.limits.max_bind_groups {
+ derived_group_layouts.push(binding_model::BindEntryMap::default());
+ }
+ validation::IntrospectionBindGroupLayouts::Derived(&mut derived_group_layouts)
+ }
+ };
+ let _ =
+ validation::check_stage(module, group_layouts, &entry_point_name, flag, interface)
+ .map_err(pipeline::CreateComputePipelineError::Stage)?;
+ } else if desc.layout.is_none() {
+ return Err(pipeline::ImplicitLayoutError::ReflectionError(flag).into());
+ }
+
+ let shader = hal::pso::EntryPoint::<B> {
+ entry: &entry_point_name, // TODO
+ module: &shader_module.raw,
+ specialization: hal::pso::Specialization::EMPTY,
+ };
+
+ // TODO
+ let flags = hal::pso::PipelineCreationFlags::empty();
+ // TODO
+ let parent = hal::pso::BasePipeline::None;
+
+ let (pipeline_layout_id, derived_bind_group_count) = match desc.layout {
+ Some(id) => (id, 0),
+ None => self.derive_pipeline_layout(
+ self_id,
+ implicit_pipeline_ids,
+ derived_group_layouts,
+ &mut *bgl_guard,
+ &mut *pipeline_layout_guard,
+ &hub,
+ )?,
+ };
+ let layout = pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?;
+
+ let pipeline_desc = hal::pso::ComputePipelineDesc {
+ shader,
+ layout: &layout.raw,
+ flags,
+ parent,
+ };
+
+ let raw = match unsafe { self.raw.create_compute_pipeline(&pipeline_desc, None) } {
+ Ok(pipeline) => pipeline,
+ Err(hal::pso::CreationError::OutOfMemory(_)) => {
+ return Err(pipeline::CreateComputePipelineError::Device(
+ DeviceError::OutOfMemory,
+ ))
+ }
+ other => panic!("Compute pipeline creation error: {:?}", other),
+ };
+ if let Some(_) = desc.label {
+ //TODO-0.6: self.raw.set_compute_pipeline_name(&mut raw, label);
+ }
+
+ let pipeline = pipeline::ComputePipeline {
+ raw,
+ layout_id: Stored {
+ value: id::Valid(pipeline_layout_id),
+ ref_count: layout.life_guard.add_ref(),
+ },
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ };
+ Ok((pipeline, derived_bind_group_count, pipeline_layout_id))
+ }
+
+ fn create_render_pipeline<G: GlobalIdentityHandlerFactory>(
+ &self,
+ self_id: id::DeviceId,
+ desc: &pipeline::RenderPipelineDescriptor,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ hub: &Hub<B, G>,
+ token: &mut Token<Self>,
+ ) -> Result<
+ (
+ pipeline::RenderPipeline<B>,
+ pipeline::ImplicitBindGroupCount,
+ id::PipelineLayoutId,
+ ),
+ pipeline::CreateRenderPipelineError,
+ > {
+ //TODO: only lock mutable if the layout is derived
+ let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
+ let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
+
+ let mut derived_group_layouts =
+ ArrayVec::<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>::new();
+
+ let samples = {
+ let sc = desc.sample_count;
+ if sc == 0 || sc > 32 || !conv::is_power_of_two(sc) {
+ return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc));
+ }
+ sc as u8
+ };
+
+ let color_states = &desc.color_states;
+ let depth_stencil_state = desc.depth_stencil_state.as_ref();
+
+ let rasterization_state = desc
+ .rasterization_state
+ .as_ref()
+ .cloned()
+ .unwrap_or_default();
+ let rasterizer = conv::map_rasterization_state_descriptor(&rasterization_state);
+
+ let mut interface = validation::StageInterface::default();
+ let mut validated_stages = wgt::ShaderStage::empty();
+
+ let desc_vbs = &desc.vertex_state.vertex_buffers;
+ let mut vertex_strides = Vec::with_capacity(desc_vbs.len());
+ let mut vertex_buffers = Vec::with_capacity(desc_vbs.len());
+ let mut attributes = Vec::new();
+ for (i, vb_state) in desc_vbs.iter().enumerate() {
+ vertex_strides
+ .alloc()
+ .init((vb_state.stride, vb_state.step_mode));
+ if vb_state.attributes.is_empty() {
+ continue;
+ }
+ if vb_state.stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 {
+ return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride {
+ index: i as u32,
+ stride: vb_state.stride,
+ });
+ }
+ vertex_buffers.alloc().init(hal::pso::VertexBufferDesc {
+ binding: i as u32,
+ stride: vb_state.stride as u32,
+ rate: match vb_state.step_mode {
+ InputStepMode::Vertex => hal::pso::VertexInputRate::Vertex,
+ InputStepMode::Instance => hal::pso::VertexInputRate::Instance(1),
+ },
+ });
+ let desc_atts = &vb_state.attributes;
+ for attribute in desc_atts.iter() {
+ if attribute.offset >= 0x10000000 {
+ return Err(
+ pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset {
+ location: attribute.shader_location,
+ offset: attribute.offset,
+ },
+ );
+ }
+ attributes.alloc().init(hal::pso::AttributeDesc {
+ location: attribute.shader_location,
+ binding: i as u32,
+ element: hal::pso::Element {
+ format: conv::map_vertex_format(attribute.format),
+ offset: attribute.offset as u32,
+ },
+ });
+ interface.insert(
+ attribute.shader_location,
+ validation::MaybeOwned::Owned(validation::map_vertex_format(attribute.format)),
+ );
+ }
+ }
+
+ let input_assembler = hal::pso::InputAssemblerDesc {
+ primitive: conv::map_primitive_topology(desc.primitive_topology),
+ with_adjacency: false,
+ restart_index: None, //TODO
+ };
+
+ let blender = hal::pso::BlendDesc {
+ logic_op: None, // TODO
+ targets: color_states
+ .iter()
+ .map(conv::map_color_state_descriptor)
+ .collect(),
+ };
+ let depth_stencil = depth_stencil_state
+ .map(conv::map_depth_stencil_state_descriptor)
+ .unwrap_or_default();
+
+ let multisampling: Option<hal::pso::Multisampling> = if samples == 1 {
+ None
+ } else {
+ Some(hal::pso::Multisampling {
+ rasterization_samples: samples,
+ sample_shading: None,
+ sample_mask: desc.sample_mask as u64,
+ alpha_coverage: desc.alpha_to_coverage_enabled,
+ alpha_to_one: false,
+ })
+ };
+
+ // TODO
+ let baked_states = hal::pso::BakedStates {
+ viewport: None,
+ scissor: None,
+ blend_color: None,
+ depth_bounds: None,
+ };
+
+ if rasterization_state.clamp_depth && !self.features.contains(wgt::Features::DEPTH_CLAMPING)
+ {
+ return Err(pipeline::CreateRenderPipelineError::MissingFeature(
+ wgt::Features::DEPTH_CLAMPING,
+ ));
+ }
+ if rasterization_state.polygon_mode != wgt::PolygonMode::Fill
+ && !self.features.contains(wgt::Features::NON_FILL_POLYGON_MODE)
+ {
+ return Err(pipeline::CreateRenderPipelineError::MissingFeature(
+ wgt::Features::NON_FILL_POLYGON_MODE,
+ ));
+ }
+
+ if desc.layout.is_none() {
+ for _ in 0..self.limits.max_bind_groups {
+ derived_group_layouts.push(binding_model::BindEntryMap::default());
+ }
+ }
+
+ let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
+
+ let rp_key = RenderPassKey {
+ colors: color_states
+ .iter()
+ .map(|state| {
+ let at = hal::pass::Attachment {
+ format: Some(conv::map_texture_format(
+ state.format,
+ self.private_features,
+ )),
+ samples,
+ ops: hal::pass::AttachmentOps::PRESERVE,
+ stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
+ layouts: hal::image::Layout::General..hal::image::Layout::General,
+ };
+ (at, hal::image::Layout::ColorAttachmentOptimal)
+ })
+ .collect(),
+ // We can ignore the resolves as the vulkan specs says:
+ // As an additional special case, if two render passes have a single subpass,
+ // they are compatible even if they have different resolve attachment references
+ // or depth/stencil resolve modes but satisfy the other compatibility conditions.
+ resolves: ArrayVec::new(),
+ depth_stencil: depth_stencil_state.map(|state| {
+ let at = hal::pass::Attachment {
+ format: Some(conv::map_texture_format(
+ state.format,
+ self.private_features,
+ )),
+ samples,
+ ops: hal::pass::AttachmentOps::PRESERVE,
+ stencil_ops: hal::pass::AttachmentOps::PRESERVE,
+ layouts: hal::image::Layout::General..hal::image::Layout::General,
+ };
+ (at, hal::image::Layout::DepthStencilAttachmentOptimal)
+ }),
+ };
+
+ let vertex = {
+ let entry_point_name = &desc.vertex_stage.entry_point;
+ let flag = wgt::ShaderStage::VERTEX;
+
+ let shader_module =
+ shader_module_guard
+ .get(desc.vertex_stage.module)
+ .map_err(|_| pipeline::CreateRenderPipelineError::Stage {
+ flag,
+ error: validation::StageError::InvalidModule,
+ })?;
+
+ if let Some(ref module) = shader_module.module {
+ let group_layouts = match desc.layout {
+ Some(pipeline_layout_id) => Device::get_introspection_bind_group_layouts(
+ pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?,
+ &*bgl_guard,
+ ),
+ None => validation::IntrospectionBindGroupLayouts::Derived(
+ &mut derived_group_layouts,
+ ),
+ };
+
+ interface = validation::check_stage(
+ module,
+ group_layouts,
+ &entry_point_name,
+ flag,
+ interface,
+ )
+ .map_err(|error| pipeline::CreateRenderPipelineError::Stage { flag, error })?;
+ validated_stages |= flag;
+ }
+
+ hal::pso::EntryPoint::<B> {
+ entry: &entry_point_name, // TODO
+ module: &shader_module.raw,
+ specialization: hal::pso::Specialization::EMPTY,
+ }
+ };
+
+ let fragment = match &desc.fragment_stage {
+ Some(stage) => {
+ let entry_point_name = &stage.entry_point;
+ let flag = wgt::ShaderStage::FRAGMENT;
+
+ let shader_module = shader_module_guard.get(stage.module).map_err(|_| {
+ pipeline::CreateRenderPipelineError::Stage {
+ flag,
+ error: validation::StageError::InvalidModule,
+ }
+ })?;
+
+ let group_layouts = match desc.layout {
+ Some(pipeline_layout_id) => Device::get_introspection_bind_group_layouts(
+ pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?,
+ &*bgl_guard,
+ ),
+ None => validation::IntrospectionBindGroupLayouts::Derived(
+ &mut derived_group_layouts,
+ ),
+ };
+
+ if validated_stages == wgt::ShaderStage::VERTEX {
+ if let Some(ref module) = shader_module.module {
+ interface = validation::check_stage(
+ module,
+ group_layouts,
+ &entry_point_name,
+ flag,
+ interface,
+ )
+ .map_err(|error| {
+ pipeline::CreateRenderPipelineError::Stage { flag, error }
+ })?;
+ validated_stages |= flag;
+ }
+ }
+
+ Some(hal::pso::EntryPoint::<B> {
+ entry: &entry_point_name,
+ module: &shader_module.raw,
+ specialization: hal::pso::Specialization::EMPTY,
+ })
+ }
+ None => None,
+ };
+
+ if validated_stages.contains(wgt::ShaderStage::FRAGMENT) {
+ for (i, state) in color_states.iter().enumerate() {
+ match interface.get(&(i as wgt::ShaderLocation)) {
+ Some(output) if validation::check_texture_format(state.format, output) => {}
+ Some(output) => {
+ tracing::warn!(
+ "Incompatible fragment output[{}] from shader: {:?}, expected {:?}",
+ i,
+ &**output,
+ state.format,
+ );
+ return Err(
+ pipeline::CreateRenderPipelineError::IncompatibleOutputFormat {
+ index: i as u8,
+ },
+ );
+ }
+ None if state.write_mask.is_empty() => {}
+ None => {
+ tracing::warn!("Missing fragment output[{}], expected {:?}", i, state,);
+ return Err(pipeline::CreateRenderPipelineError::MissingOutput {
+ index: i as u8,
+ });
+ }
+ }
+ }
+ }
+ let last_stage = match desc.fragment_stage {
+ Some(_) => wgt::ShaderStage::FRAGMENT,
+ None => wgt::ShaderStage::VERTEX,
+ };
+ if desc.layout.is_none() && !validated_stages.contains(last_stage) {
+ return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into());
+ }
+
+ let primitive_assembler = hal::pso::PrimitiveAssemblerDesc::Vertex {
+ buffers: &vertex_buffers,
+ attributes: &attributes,
+ input_assembler,
+ vertex,
+ tessellation: None,
+ geometry: None,
+ };
+
+ // TODO
+ let flags = hal::pso::PipelineCreationFlags::empty();
+ // TODO
+ let parent = hal::pso::BasePipeline::None;
+
+ let (pipeline_layout_id, derived_bind_group_count) = match desc.layout {
+ Some(id) => (id, 0),
+ None => self.derive_pipeline_layout(
+ self_id,
+ implicit_pipeline_ids,
+ derived_group_layouts,
+ &mut *bgl_guard,
+ &mut *pipeline_layout_guard,
+ &hub,
+ )?,
+ };
+ let layout = pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?;
+
+ let mut render_pass_cache = self.render_passes.lock();
+ let pipeline_desc = hal::pso::GraphicsPipelineDesc {
+ primitive_assembler,
+ rasterizer,
+ fragment,
+ blender,
+ depth_stencil,
+ multisampling,
+ baked_states,
+ layout: &layout.raw,
+ subpass: hal::pass::Subpass {
+ index: 0,
+ main_pass: match render_pass_cache.entry(rp_key) {
+ Entry::Occupied(e) => e.into_mut(),
+ Entry::Vacant(e) => {
+ let pass = self
+ .create_compatible_render_pass(e.key())
+ .or(Err(DeviceError::OutOfMemory))?;
+ e.insert(pass)
+ }
+ },
+ },
+ flags,
+ parent,
+ };
+ // TODO: cache
+ let raw = unsafe {
+ self.raw
+ .create_graphics_pipeline(&pipeline_desc, None)
+ .map_err(|err| match err {
+ hal::pso::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create graphics pipeline: {}", err),
+ })?
+ };
+ if let Some(_) = desc.label {
+ //TODO-0.6: self.set_graphics_pipeline_name(&mut raw, label)
+ }
+
+ let pass_context = RenderPassContext {
+ attachments: AttachmentData {
+ colors: color_states.iter().map(|state| state.format).collect(),
+ resolves: ArrayVec::new(),
+ depth_stencil: depth_stencil_state
+ .as_ref()
+ .map(|state| state.format.clone()),
+ },
+ sample_count: samples,
+ };
+
+ let mut flags = pipeline::PipelineFlags::empty();
+ for state in color_states.iter() {
+ if state.color_blend.uses_color() | state.alpha_blend.uses_color() {
+ flags |= pipeline::PipelineFlags::BLEND_COLOR;
+ }
+ }
+ if let Some(ds) = depth_stencil_state.as_ref() {
+ if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() {
+ flags |= pipeline::PipelineFlags::STENCIL_REFERENCE;
+ }
+ if !ds.is_read_only() {
+ flags |= pipeline::PipelineFlags::WRITES_DEPTH_STENCIL;
+ }
+ }
+
+ let pipeline = pipeline::RenderPipeline {
+ raw,
+ layout_id: Stored {
+ value: id::Valid(pipeline_layout_id),
+ ref_count: layout.life_guard.add_ref(),
+ },
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ pass_context,
+ flags,
+ index_format: desc.vertex_state.index_format,
+ vertex_strides,
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ };
+ Ok((pipeline, derived_bind_group_count, pipeline_layout_id))
+ }
+
+ fn wait_for_submit(
+ &self,
+ submission_index: SubmissionIndex,
+ token: &mut Token<Self>,
+ ) -> Result<(), WaitIdleError> {
+ if self.last_completed_submission_index() <= submission_index {
+ tracing::info!("Waiting for submission {:?}", submission_index);
+ self.lock_life(token)
+ .triage_submissions(&self.raw, true)
+ .map(|_| ())
+ } else {
+ Ok(())
+ }
+ }
+}
+
+impl<B: hal::Backend> Device<B> {
+ pub(crate) fn destroy_bind_group(&self, bind_group: binding_model::BindGroup<B>) {
+ self.desc_allocator
+ .lock()
+ .free(&self.raw, iter::once(bind_group.raw));
+ }
+
+ pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer<B>) {
+ if let Some((raw, memory)) = buffer.raw {
+ unsafe {
+ self.mem_allocator.lock().free(&self.raw, memory);
+ self.raw.destroy_buffer(raw);
+ }
+ }
+ }
+
+ pub(crate) fn destroy_texture(&self, texture: resource::Texture<B>) {
+ if let Some((raw, memory)) = texture.raw {
+ unsafe {
+ self.mem_allocator.lock().free(&self.raw, memory);
+ self.raw.destroy_image(raw);
+ }
+ }
+ }
+
+ /// Wait for idle and remove resources that we can, before we die.
+ pub(crate) fn prepare_to_die(&mut self) {
+ let mut life_tracker = self.life_tracker.lock();
+ if let Err(error) = life_tracker.triage_submissions(&self.raw, true) {
+ tracing::error!("failed to triage submissions: {}", error);
+ }
+ life_tracker.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator);
+ }
+
+ pub(crate) fn dispose(self) {
+ let mut desc_alloc = self.desc_allocator.into_inner();
+ let mut mem_alloc = self.mem_allocator.into_inner();
+ self.pending_writes
+ .dispose(&self.raw, &self.cmd_allocator, &mut mem_alloc);
+ self.cmd_allocator.destroy(&self.raw);
+ unsafe {
+ desc_alloc.cleanup(&self.raw);
+ mem_alloc.clear(&self.raw);
+ for (_, rp) in self.render_passes.lock().drain() {
+ self.raw.destroy_render_pass(rp);
+ }
+ for (_, fbo) in self.framebuffers.lock().drain() {
+ self.raw.destroy_framebuffer(fbo);
+ }
+ }
+ }
+}
+
+impl<B: hal::Backend> crate::hub::Resource for Device<B> {
+ const TYPE: &'static str = "Device";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+#[error("device is invalid")]
+#[derive(Clone, Debug, Error)]
+pub struct InvalidDevice;
+
+#[derive(Clone, Debug, Error)]
+pub enum DeviceError {
+ #[error("parent device is invalid")]
+ Invalid,
+ #[error("parent device is lost")]
+ Lost,
+ #[error("not enough memory left")]
+ OutOfMemory,
+}
+
+impl From<hal::device::OomOrDeviceLost> for DeviceError {
+ fn from(err: hal::device::OomOrDeviceLost) -> Self {
+ match err {
+ hal::device::OomOrDeviceLost::OutOfMemory(_) => Self::OutOfMemory,
+ hal::device::OomOrDeviceLost::DeviceLost(_) => Self::Lost,
+ }
+ }
+}
+
+impl From<gpu_alloc::MapError> for DeviceError {
+ fn from(err: gpu_alloc::MapError) -> Self {
+ match err {
+ gpu_alloc::MapError::OutOfDeviceMemory | gpu_alloc::MapError::OutOfHostMemory => {
+ DeviceError::OutOfMemory
+ }
+ _ => panic!("failed to map buffer: {}", err),
+ }
+ }
+}
+
+impl DeviceError {
+ fn from_bind(err: hal::device::BindError) -> Self {
+ match err {
+ hal::device::BindError::OutOfMemory(_) => Self::OutOfMemory,
+ _ => panic!("failed to bind memory: {}", err),
+ }
+ }
+}
+
+pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> {
+ pub root_id: Input<G, id::PipelineLayoutId>,
+ pub group_ids: &'a [Input<G, id::BindGroupLayoutId>],
+}
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn device_features<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ ) -> Result<wgt::Features, InvalidDevice> {
+ span!(_guard, INFO, "Device::features");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, _) = hub.devices.read(&mut token);
+ let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
+
+ Ok(device.features)
+ }
+
+ pub fn device_limits<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ ) -> Result<wgt::Limits, InvalidDevice> {
+ span!(_guard, INFO, "Device::limits");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, _) = hub.devices.read(&mut token);
+ let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
+
+ Ok(device.limits.clone())
+ }
+
+ pub fn device_create_buffer<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &resource::BufferDescriptor,
+ id_in: Input<G, id::BufferId>,
+ ) -> (id::BufferId, Option<resource::CreateBufferError>) {
+ span!(_guard, INFO, "Device::create_buffer");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ tracing::info!("Create buffer {:?} with ID {:?}", desc, id_in);
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let mut buffer = match device.create_buffer(device_id, desc, false) {
+ Ok(buffer) => buffer,
+ Err(e) => break e,
+ };
+ let ref_count = buffer.life_guard.add_ref();
+
+ let buffer_use = if !desc.mapped_at_creation {
+ resource::BufferUse::EMPTY
+ } else if desc.usage.contains(wgt::BufferUsage::MAP_WRITE) {
+ // buffer is mappable, so we are just doing that at start
+ let map_size = buffer.size;
+ let ptr = match map_buffer(&device.raw, &mut buffer, 0, map_size, HostMap::Write) {
+ Ok(ptr) => ptr,
+ Err(e) => {
+ let (raw, memory) = buffer.raw.unwrap();
+ device.lock_life(&mut token).schedule_resource_destruction(
+ queue::TempResource::Buffer(raw),
+ memory,
+ !0,
+ );
+ break e.into();
+ }
+ };
+ buffer.map_state = resource::BufferMapState::Active {
+ ptr,
+ sub_range: hal::buffer::SubRange::WHOLE,
+ host: HostMap::Write,
+ };
+ resource::BufferUse::MAP_WRITE
+ } else {
+ // buffer needs staging area for initialization only
+ let stage_desc = wgt::BufferDescriptor {
+ label: Some(Cow::Borrowed("<init_buffer>")),
+ size: desc.size,
+ usage: wgt::BufferUsage::MAP_WRITE | wgt::BufferUsage::COPY_SRC,
+ mapped_at_creation: false,
+ };
+ let stage = match device.create_buffer(device_id, &stage_desc, true) {
+ Ok(stage) => stage,
+ Err(e) => {
+ let (raw, memory) = buffer.raw.unwrap();
+ device.lock_life(&mut token).schedule_resource_destruction(
+ queue::TempResource::Buffer(raw),
+ memory,
+ !0,
+ );
+ break e;
+ }
+ };
+ let (stage_buffer, mut stage_memory) = stage.raw.unwrap();
+ let ptr = match stage_memory.map(&device.raw, 0, stage.size) {
+ Ok(ptr) => ptr,
+ Err(e) => {
+ let (raw, memory) = buffer.raw.unwrap();
+ let mut life_lock = device.lock_life(&mut token);
+ life_lock.schedule_resource_destruction(
+ queue::TempResource::Buffer(raw),
+ memory,
+ !0,
+ );
+ life_lock.schedule_resource_destruction(
+ queue::TempResource::Buffer(stage_buffer),
+ stage_memory,
+ !0,
+ );
+ break e.into();
+ }
+ };
+ buffer.map_state = resource::BufferMapState::Init {
+ ptr,
+ needs_flush: !stage_memory.is_coherent(),
+ stage_buffer,
+ stage_memory,
+ };
+ resource::BufferUse::COPY_DST
+ };
+
+ let id = hub.buffers.register_identity(id_in, buffer, &mut token);
+ tracing::info!("Created buffer {:?} with {:?}", id, desc);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut desc = desc.clone();
+ let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false);
+ if mapped_at_creation && !desc.usage.contains(wgt::BufferUsage::MAP_WRITE) {
+ desc.usage |= wgt::BufferUsage::COPY_DST;
+ }
+ trace.lock().add(trace::Action::CreateBuffer(id.0, desc));
+ }
+
+ device
+ .trackers
+ .lock()
+ .buffers
+ .init(id, ref_count, BufferState::with_usage(buffer_use))
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .buffers
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ #[cfg(feature = "replay")]
+ pub fn device_wait_for_buffer<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ buffer_id: id::BufferId,
+ ) -> Result<(), WaitIdleError> {
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let last_submission = {
+ let (buffer_guard, _) = hub.buffers.write(&mut token);
+ match buffer_guard.get(buffer_id) {
+ Ok(buffer) => buffer.life_guard.submission_index.load(Ordering::Acquire),
+ Err(_) => return Ok(()),
+ }
+ };
+
+ device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?
+ .wait_for_submit(last_submission, &mut token)
+ }
+
+ pub fn device_set_buffer_sub_data<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ data: &[u8],
+ ) -> Result<(), resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::set_buffer_sub_data");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let device = device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+ check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_WRITE)?;
+ //assert!(buffer isn't used by the GPU);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data_path = trace.make_binary("bin", data);
+ trace.add(trace::Action::WriteBuffer {
+ id: buffer_id,
+ data: data_path,
+ range: offset..offset + data.len() as BufferAddress,
+ queued: false,
+ });
+ }
+
+ let (_, block) = buffer.raw.as_mut().unwrap();
+ block.write_bytes(&device.raw, offset, data)?;
+
+ Ok(())
+ }
+
+ pub fn device_get_buffer_sub_data<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ data: &mut [u8],
+ ) -> Result<(), resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::get_buffer_sub_data");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let device = device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+ check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_READ)?;
+ //assert!(buffer isn't used by the GPU);
+
+ let (_, block) = buffer.raw.as_mut().unwrap();
+ block.read_bytes(&device.raw, offset, data)?;
+
+ Ok(())
+ }
+
+ pub fn buffer_label<B: GfxBackend>(&self, id: id::BufferId) -> String {
+ B::hub(self).buffers.label_for_resource(id)
+ }
+
+ pub fn buffer_destroy<B: GfxBackend>(
+ &self,
+ buffer_id: id::BufferId,
+ ) -> Result<(), resource::DestroyError> {
+ span!(_guard, INFO, "Buffer::destroy");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ //TODO: lock pending writes separately, keep the device read-only
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+
+ tracing::info!("Buffer {:?} is destroyed", buffer_id);
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::DestroyError::Invalid)?;
+
+ let device = &mut device_guard[buffer.device_id.value];
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::FreeBuffer(buffer_id));
+ }
+
+ let (raw, memory) = buffer
+ .raw
+ .take()
+ .ok_or(resource::DestroyError::AlreadyDestroyed)?;
+ let temp = queue::TempResource::Buffer(raw);
+
+ if device.pending_writes.dst_buffers.contains(&buffer_id) {
+ device.pending_writes.temp_resources.push((temp, memory));
+ } else {
+ let last_submit_index = buffer.life_guard.submission_index.load(Ordering::Acquire);
+ drop(buffer_guard);
+ device.lock_life(&mut token).schedule_resource_destruction(
+ temp,
+ memory,
+ last_submit_index,
+ );
+ }
+
+ Ok(())
+ }
+
+ pub fn buffer_drop<B: GfxBackend>(&self, buffer_id: id::BufferId, wait: bool) {
+ span!(_guard, INFO, "Buffer::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ tracing::info!("Buffer {:?} is dropped", buffer_id);
+ let (ref_count, last_submit_index, device_id) = {
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ match buffer_guard.get_mut(buffer_id) {
+ Ok(buffer) => {
+ let ref_count = buffer.life_guard.ref_count.take().unwrap();
+ let last_submit_index =
+ buffer.life_guard.submission_index.load(Ordering::Acquire);
+ (ref_count, last_submit_index, buffer.device_id.value)
+ }
+ Err(InvalidId) => {
+ hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device = &device_guard[device_id];
+ {
+ let mut life_lock = device.lock_life(&mut token);
+ if device.pending_writes.dst_buffers.contains(&buffer_id) {
+ life_lock.future_suspected_buffers.push(Stored {
+ value: id::Valid(buffer_id),
+ ref_count,
+ });
+ } else {
+ drop(ref_count);
+ life_lock
+ .suspected_resources
+ .buffers
+ .push(id::Valid(buffer_id));
+ }
+ }
+
+ if wait {
+ match device.wait_for_submit(last_submit_index, &mut token) {
+ Ok(()) => (),
+ Err(e) => tracing::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e),
+ }
+ }
+ }
+
+ pub fn device_create_texture<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &resource::TextureDescriptor,
+ id_in: Input<G, id::TextureId>,
+ ) -> (id::TextureId, Option<resource::CreateTextureError>) {
+ span!(_guard, INFO, "Device::create_texture");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let texture = match device.create_texture(device_id, desc) {
+ Ok(texture) => texture,
+ Err(error) => break error,
+ };
+ let num_levels = texture.full_range.levels.end;
+ let num_layers = texture.full_range.layers.end;
+ let ref_count = texture.life_guard.add_ref();
+
+ let id = hub.textures.register_identity(id_in, texture, &mut token);
+ tracing::info!("Created texture {:?} with {:?}", id, desc);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateTexture(id.0, desc.clone()));
+ }
+
+ device
+ .trackers
+ .lock()
+ .textures
+ .init(id, ref_count, TextureState::new(num_levels, num_layers))
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .textures
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn texture_label<B: GfxBackend>(&self, id: id::TextureId) -> String {
+ B::hub(self).textures.label_for_resource(id)
+ }
+
+ pub fn texture_destroy<B: GfxBackend>(
+ &self,
+ texture_id: id::TextureId,
+ ) -> Result<(), resource::DestroyError> {
+ span!(_guard, INFO, "Texture::destroy");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ //TODO: lock pending writes separately, keep the device read-only
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+
+ tracing::info!("Buffer {:?} is destroyed", texture_id);
+ let (mut texture_guard, _) = hub.textures.write(&mut token);
+ let texture = texture_guard
+ .get_mut(texture_id)
+ .map_err(|_| resource::DestroyError::Invalid)?;
+
+ let device = &mut device_guard[texture.device_id.value];
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::FreeTexture(texture_id));
+ }
+
+ let (raw, memory) = texture
+ .raw
+ .take()
+ .ok_or(resource::DestroyError::AlreadyDestroyed)?;
+ let temp = queue::TempResource::Image(raw);
+
+ if device.pending_writes.dst_textures.contains(&texture_id) {
+ device.pending_writes.temp_resources.push((temp, memory));
+ } else {
+ let last_submit_index = texture.life_guard.submission_index.load(Ordering::Acquire);
+ drop(texture_guard);
+ device.lock_life(&mut token).schedule_resource_destruction(
+ temp,
+ memory,
+ last_submit_index,
+ );
+ }
+
+ Ok(())
+ }
+
+ pub fn texture_drop<B: GfxBackend>(&self, texture_id: id::TextureId, wait: bool) {
+ span!(_guard, INFO, "Texture::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (ref_count, last_submit_index, device_id) = {
+ let (mut texture_guard, _) = hub.textures.write(&mut token);
+ match texture_guard.get_mut(texture_id) {
+ Ok(texture) => {
+ let ref_count = texture.life_guard.ref_count.take().unwrap();
+ let last_submit_index =
+ texture.life_guard.submission_index.load(Ordering::Acquire);
+ (ref_count, last_submit_index, texture.device_id.value)
+ }
+ Err(InvalidId) => {
+ hub.textures
+ .unregister_locked(texture_id, &mut *texture_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device = &device_guard[device_id];
+ {
+ let mut life_lock = device.lock_life(&mut token);
+ if device.pending_writes.dst_textures.contains(&texture_id) {
+ life_lock.future_suspected_textures.push(Stored {
+ value: id::Valid(texture_id),
+ ref_count,
+ });
+ } else {
+ drop(ref_count);
+ life_lock
+ .suspected_resources
+ .textures
+ .push(id::Valid(texture_id));
+ }
+ }
+
+ if wait {
+ match device.wait_for_submit(last_submit_index, &mut token) {
+ Ok(()) => (),
+ Err(e) => tracing::error!("Failed to wait for texture {:?}: {:?}", texture_id, e),
+ }
+ }
+ }
+
+ pub fn texture_create_view<B: GfxBackend>(
+ &self,
+ texture_id: id::TextureId,
+ desc: &resource::TextureViewDescriptor,
+ id_in: Input<G, id::TextureViewId>,
+ ) -> (id::TextureViewId, Option<resource::CreateTextureViewError>) {
+ span!(_guard, INFO, "Texture::create_view");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (texture_guard, mut token) = hub.textures.read(&mut token);
+ let error = loop {
+ let texture = match texture_guard.get(texture_id) {
+ Ok(texture) => texture,
+ Err(_) => break resource::CreateTextureViewError::InvalidTexture,
+ };
+ let device = &device_guard[texture.device_id.value];
+
+ let view = match device.create_texture_view(texture, texture_id, desc) {
+ Ok(view) => view,
+ Err(e) => break e,
+ };
+ let ref_count = view.life_guard.add_ref();
+
+ let id = hub.texture_views.register_identity(id_in, view, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::CreateTextureView {
+ id: id.0,
+ parent_id: texture_id,
+ desc: desc.clone(),
+ });
+ }
+
+ device
+ .trackers
+ .lock()
+ .views
+ .init(id, ref_count, PhantomData)
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id =
+ hub.texture_views
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn texture_view_label<B: GfxBackend>(&self, id: id::TextureViewId) -> String {
+ B::hub(self).texture_views.label_for_resource(id)
+ }
+
+ pub fn texture_view_drop<B: GfxBackend>(
+ &self,
+ texture_view_id: id::TextureViewId,
+ ) -> Result<(), resource::TextureViewDestroyError> {
+ span!(_guard, INFO, "TextureView::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let device_id = {
+ let (texture_guard, mut token) = hub.textures.read(&mut token);
+ let (mut texture_view_guard, _) = hub.texture_views.write(&mut token);
+
+ match texture_view_guard.get_mut(texture_view_id) {
+ Ok(view) => {
+ view.life_guard.ref_count.take();
+ match view.inner {
+ resource::TextureViewInner::Native { ref source_id, .. } => {
+ texture_guard[source_id.value].device_id.value
+ }
+ resource::TextureViewInner::SwapChain { .. } => {
+ return Err(resource::TextureViewDestroyError::SwapChainImage)
+ }
+ }
+ }
+ Err(InvalidId) => {
+ hub.texture_views
+ .unregister_locked(texture_view_id, &mut *texture_view_guard);
+ return Ok(());
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .texture_views
+ .push(id::Valid(texture_view_id));
+ Ok(())
+ }
+
+ pub fn device_create_sampler<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &resource::SamplerDescriptor,
+ id_in: Input<G, id::SamplerId>,
+ ) -> (id::SamplerId, Option<resource::CreateSamplerError>) {
+ span!(_guard, INFO, "Device::create_sampler");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+
+ let sampler = match device.create_sampler(device_id, desc) {
+ Ok(sampler) => sampler,
+ Err(e) => break e,
+ };
+ let ref_count = sampler.life_guard.add_ref();
+
+ let id = hub.samplers.register_identity(id_in, sampler, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateSampler(id.0, desc.clone()));
+ }
+
+ device
+ .trackers
+ .lock()
+ .samplers
+ .init(id, ref_count, PhantomData)
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .samplers
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn sampler_label<B: GfxBackend>(&self, id: id::SamplerId) -> String {
+ B::hub(self).samplers.label_for_resource(id)
+ }
+
+ pub fn sampler_drop<B: GfxBackend>(&self, sampler_id: id::SamplerId) {
+ span!(_guard, INFO, "Sampler::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let device_id = {
+ let (mut sampler_guard, _) = hub.samplers.write(&mut token);
+ match sampler_guard.get_mut(sampler_id) {
+ Ok(sampler) => {
+ sampler.life_guard.ref_count.take();
+ sampler.device_id.value
+ }
+ Err(InvalidId) => {
+ hub.samplers
+ .unregister_locked(sampler_id, &mut *sampler_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .samplers
+ .push(id::Valid(sampler_id));
+ }
+
+ pub fn device_create_bind_group_layout<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &binding_model::BindGroupLayoutDescriptor,
+ id_in: Input<G, id::BindGroupLayoutId>,
+ ) -> (
+ id::BindGroupLayoutId,
+ Option<binding_model::CreateBindGroupLayoutError>,
+ ) {
+ span!(_guard, INFO, "Device::create_bind_group_layout");
+
+ let mut token = Token::root();
+ let hub = B::hub(self);
+
+ let error = 'outer: loop {
+ let mut entry_map = FastHashMap::default();
+ for entry in desc.entries.iter() {
+ if entry_map.insert(entry.binding, entry.clone()).is_some() {
+ break 'outer binding_model::CreateBindGroupLayoutError::ConflictBinding(
+ entry.binding,
+ );
+ }
+ }
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+
+ // If there is an equivalent BGL, just bump the refcount and return it.
+ // This is only applicable for identity filters that are generating new IDs,
+ // so their inputs are `PhantomData` of size 0.
+ if mem::size_of::<Input<G, id::BindGroupLayoutId>>() == 0 {
+ let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
+ if let Some(id) =
+ Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard)
+ {
+ return (id, None);
+ }
+ }
+
+ let layout = match device.create_bind_group_layout(
+ device_id,
+ desc.label.as_ref().map(|cow| cow.as_ref()),
+ entry_map,
+ ) {
+ Ok(layout) => layout,
+ Err(e) => break e,
+ };
+
+ let id = hub
+ .bind_group_layouts
+ .register_identity(id_in, layout, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateBindGroupLayout(id.0, desc.clone()));
+ }
+ return (id.0, None);
+ };
+
+ let id = hub.bind_group_layouts.register_error(
+ id_in,
+ desc.label.borrow_or_default(),
+ &mut token,
+ );
+ (id, Some(error))
+ }
+
+ pub fn bind_group_layout_label<B: GfxBackend>(&self, id: id::BindGroupLayoutId) -> String {
+ B::hub(self).bind_group_layouts.label_for_resource(id)
+ }
+
+ pub fn bind_group_layout_drop<B: GfxBackend>(
+ &self,
+ bind_group_layout_id: id::BindGroupLayoutId,
+ ) {
+ span!(_guard, INFO, "BindGroupLayout::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let device_id = {
+ let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token);
+ match bind_group_layout_guard.get_mut(bind_group_layout_id) {
+ Ok(layout) => layout.device_id.value,
+ Err(InvalidId) => {
+ hub.bind_group_layouts
+ .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .bind_group_layouts
+ .push(id::Valid(bind_group_layout_id));
+ }
+
+ pub fn device_create_pipeline_layout<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &binding_model::PipelineLayoutDescriptor,
+ id_in: Input<G, id::PipelineLayoutId>,
+ ) -> (
+ id::PipelineLayoutId,
+ Option<binding_model::CreatePipelineLayoutError>,
+ ) {
+ span!(_guard, INFO, "Device::create_pipeline_layout");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+
+ let layout = {
+ let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
+ match device.create_pipeline_layout(device_id, desc, &*bgl_guard) {
+ Ok(layout) => layout,
+ Err(e) => break e,
+ }
+ };
+
+ let id = hub
+ .pipeline_layouts
+ .register_identity(id_in, layout, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreatePipelineLayout(id.0, desc.clone()));
+ }
+ return (id.0, None);
+ };
+
+ let id =
+ hub.pipeline_layouts
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn pipeline_layout_label<B: GfxBackend>(&self, id: id::PipelineLayoutId) -> String {
+ B::hub(self).pipeline_layouts.label_for_resource(id)
+ }
+
+ pub fn pipeline_layout_drop<B: GfxBackend>(&self, pipeline_layout_id: id::PipelineLayoutId) {
+ span!(_guard, INFO, "PipelineLayout::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_id, ref_count) = {
+ let (mut pipeline_layout_guard, _) = hub.pipeline_layouts.write(&mut token);
+ match pipeline_layout_guard.get_mut(pipeline_layout_id) {
+ Ok(layout) => (
+ layout.device_id.value,
+ layout.life_guard.ref_count.take().unwrap(),
+ ),
+ Err(InvalidId) => {
+ hub.pipeline_layouts
+ .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .pipeline_layouts
+ .push(Stored {
+ value: id::Valid(pipeline_layout_id),
+ ref_count,
+ });
+ }
+
+ pub fn device_create_bind_group<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &binding_model::BindGroupDescriptor,
+ id_in: Input<G, id::BindGroupId>,
+ ) -> (id::BindGroupId, Option<binding_model::CreateBindGroupError>) {
+ span!(_guard, INFO, "Device::create_bind_group");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token);
+
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let bind_group_layout = match bind_group_layout_guard.get(desc.layout) {
+ Ok(layout) => layout,
+ Err(_) => break binding_model::CreateBindGroupError::InvalidLayout,
+ };
+
+ let bind_group = match device.create_bind_group(
+ device_id,
+ bind_group_layout,
+ desc,
+ &hub,
+ &mut token,
+ ) {
+ Ok(bind_group) => bind_group,
+ Err(e) => break e,
+ };
+ let ref_count = bind_group.life_guard.add_ref();
+
+ let id = hub
+ .bind_groups
+ .register_identity(id_in, bind_group, &mut token);
+ tracing::debug!(
+ "Bind group {:?} {:#?}",
+ id,
+ hub.bind_groups.read(&mut token).0[id].used
+ );
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateBindGroup(id.0, desc.clone()));
+ }
+
+ device
+ .trackers
+ .lock()
+ .bind_groups
+ .init(id, ref_count, PhantomData)
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .bind_groups
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn bind_group_label<B: GfxBackend>(&self, id: id::BindGroupId) -> String {
+ B::hub(self).bind_groups.label_for_resource(id)
+ }
+
+ pub fn bind_group_drop<B: GfxBackend>(&self, bind_group_id: id::BindGroupId) {
+ span!(_guard, INFO, "BindGroup::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let device_id = {
+ let (mut bind_group_guard, _) = hub.bind_groups.write(&mut token);
+ match bind_group_guard.get_mut(bind_group_id) {
+ Ok(bind_group) => {
+ bind_group.life_guard.ref_count.take();
+ bind_group.device_id.value
+ }
+ Err(InvalidId) => {
+ hub.bind_groups
+ .unregister_locked(bind_group_id, &mut *bind_group_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .bind_groups
+ .push(id::Valid(bind_group_id));
+ }
+
+ pub fn device_create_shader_module<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &pipeline::ShaderModuleDescriptor,
+ id_in: Input<G, id::ShaderModuleId>,
+ ) -> (
+ id::ShaderModuleId,
+ Option<pipeline::CreateShaderModuleError>,
+ ) {
+ span!(_guard, INFO, "Device::create_shader_module");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let (shader, spv) = match device.create_shader_module(device_id, desc) {
+ Ok(pair) => pair,
+ Err(e) => break e,
+ };
+
+ let id = hub
+ .shader_modules
+ .register_identity(id_in, shader, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data = trace.make_binary("spv", unsafe {
+ std::slice::from_raw_parts(spv.as_ptr() as *const u8, spv.len() * 4)
+ });
+ let label = desc.label.clone();
+ trace.add(trace::Action::CreateShaderModule {
+ id: id.0,
+ data,
+ label,
+ });
+ }
+
+ let _ = spv;
+ return (id.0, None);
+ };
+
+ let id =
+ hub.shader_modules
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn shader_module_label<B: GfxBackend>(&self, id: id::ShaderModuleId) -> String {
+ B::hub(self).shader_modules.label_for_resource(id)
+ }
+
+ pub fn shader_module_drop<B: GfxBackend>(&self, shader_module_id: id::ShaderModuleId) {
+ span!(_guard, INFO, "ShaderModule::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token);
+ if let Some(module) = module {
+ let device = &device_guard[module.device_id.value];
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::DestroyShaderModule(shader_module_id));
+ }
+ unsafe {
+ device.raw.destroy_shader_module(module.raw);
+ }
+ }
+ }
+
+ pub fn device_create_command_encoder<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &wgt::CommandEncoderDescriptor<Label>,
+ id_in: Input<G, id::CommandEncoderId>,
+ ) -> (id::CommandEncoderId, Option<command::CommandAllocatorError>) {
+ span!(_guard, INFO, "Device::create_command_encoder");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+
+ let dev_stored = Stored {
+ value: id::Valid(device_id),
+ ref_count: device.life_guard.add_ref(),
+ };
+
+ let mut command_buffer = match device.cmd_allocator.allocate(
+ dev_stored,
+ &device.raw,
+ device.limits.clone(),
+ device.private_features,
+ &desc.label,
+ #[cfg(feature = "trace")]
+ device.trace.is_some(),
+ ) {
+ Ok(cmd_buf) => cmd_buf,
+ Err(e) => break e,
+ };
+
+ unsafe {
+ let raw_command_buffer = command_buffer.raw.last_mut().unwrap();
+ if let Some(ref label) = desc.label {
+ device
+ .raw
+ .set_command_buffer_name(raw_command_buffer, label);
+ }
+ raw_command_buffer.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
+ }
+
+ let id = hub
+ .command_buffers
+ .register_identity(id_in, command_buffer, &mut token);
+
+ return (id.0, None);
+ };
+
+ let id = B::hub(self).command_buffers.register_error(
+ id_in,
+ desc.label.borrow_or_default(),
+ &mut token,
+ );
+ (id, Some(error))
+ }
+
+ pub fn command_buffer_label<B: GfxBackend>(&self, id: id::CommandBufferId) -> String {
+ B::hub(self).command_buffers.label_for_resource(id)
+ }
+
+ pub fn command_encoder_drop<B: GfxBackend>(&self, command_encoder_id: id::CommandEncoderId) {
+ span!(_guard, INFO, "CommandEncoder::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let (cmdbuf, _) = hub
+ .command_buffers
+ .unregister(command_encoder_id, &mut token);
+ if let Some(cmdbuf) = cmdbuf {
+ let device = &mut device_guard[cmdbuf.device_id.value];
+ device.untrack::<G>(&hub, &cmdbuf.trackers, &mut token);
+ device.cmd_allocator.discard(cmdbuf);
+ }
+ }
+
+ pub fn command_buffer_drop<B: GfxBackend>(&self, command_buffer_id: id::CommandBufferId) {
+ span!(_guard, INFO, "CommandBuffer::drop");
+ self.command_encoder_drop::<B>(command_buffer_id)
+ }
+
+ pub fn device_create_render_bundle_encoder(
+ &self,
+ device_id: id::DeviceId,
+ desc: &command::RenderBundleEncoderDescriptor,
+ ) -> (
+ id::RenderBundleEncoderId,
+ Option<command::CreateRenderBundleError>,
+ ) {
+ span!(_guard, INFO, "Device::create_render_bundle_encoder");
+ let (encoder, error) = match command::RenderBundleEncoder::new(desc, device_id, None) {
+ Ok(encoder) => (encoder, None),
+ Err(e) => (command::RenderBundleEncoder::dummy(device_id), Some(e)),
+ };
+ (Box::into_raw(Box::new(encoder)), error)
+ }
+
+ pub fn render_bundle_encoder_finish<B: GfxBackend>(
+ &self,
+ bundle_encoder: command::RenderBundleEncoder,
+ desc: &command::RenderBundleDescriptor,
+ id_in: Input<G, id::RenderBundleId>,
+ ) -> (id::RenderBundleId, Option<command::RenderBundleError>) {
+ span!(_guard, INFO, "RenderBundleEncoder::finish");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+
+ let error = loop {
+ let device = match device_guard.get(bundle_encoder.parent()) {
+ Ok(device) => device,
+ Err(_) => break command::RenderBundleError::INVALID_DEVICE,
+ };
+
+ let render_bundle = match bundle_encoder.finish(desc, device, &hub, &mut token) {
+ Ok(bundle) => bundle,
+ Err(e) => break e,
+ };
+
+ tracing::debug!("Render bundle {:?} = {:#?}", id_in, render_bundle.used);
+
+ let ref_count = render_bundle.life_guard.add_ref();
+ let id = hub
+ .render_bundles
+ .register_identity(id_in, render_bundle, &mut token);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let (bundle_guard, _) = hub.render_bundles.read(&mut token);
+ let bundle = &bundle_guard[id];
+ let label = desc.label.as_ref().map(|l| l.as_ref());
+ trace.lock().add(trace::Action::CreateRenderBundle {
+ id: id.0,
+ desc: trace::new_render_bundle_encoder_descriptor(label, &bundle.context),
+ base: bundle.to_base_pass(),
+ });
+ }
+
+ device
+ .trackers
+ .lock()
+ .bundles
+ .init(id, ref_count, PhantomData)
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = B::hub(self).render_bundles.register_error(
+ id_in,
+ desc.label.borrow_or_default(),
+ &mut token,
+ );
+ (id, Some(error))
+ }
+
+ pub fn render_bundle_label<B: GfxBackend>(&self, id: id::RenderBundleId) -> String {
+ B::hub(self).render_bundles.label_for_resource(id)
+ }
+
+ pub fn render_bundle_drop<B: GfxBackend>(&self, render_bundle_id: id::RenderBundleId) {
+ span!(_guard, INFO, "RenderBundle::drop");
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device_id = {
+ let (mut bundle_guard, _) = hub.render_bundles.write(&mut token);
+ match bundle_guard.get_mut(render_bundle_id) {
+ Ok(bundle) => {
+ bundle.life_guard.ref_count.take();
+ bundle.device_id.value
+ }
+ Err(InvalidId) => {
+ hub.render_bundles
+ .unregister_locked(render_bundle_id, &mut *bundle_guard);
+ return;
+ }
+ }
+ };
+
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .render_bundles
+ .push(id::Valid(render_bundle_id));
+ }
+
+ pub fn device_create_render_pipeline<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &pipeline::RenderPipelineDescriptor,
+ id_in: Input<G, id::RenderPipelineId>,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ ) -> (
+ id::RenderPipelineId,
+ pipeline::ImplicitBindGroupCount,
+ Option<pipeline::CreateRenderPipelineError>,
+ ) {
+ span!(_guard, INFO, "Device::create_render_pipeline");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let (pipeline, derived_bind_group_count, layout_id) = match device
+ .create_render_pipeline(device_id, desc, implicit_pipeline_ids, &hub, &mut token)
+ {
+ Ok(pair) => pair,
+ Err(e) => break e,
+ };
+
+ let id = hub
+ .render_pipelines
+ .register_identity(id_in, pipeline, &mut token);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::CreateRenderPipeline(
+ id.0,
+ pipeline::RenderPipelineDescriptor {
+ layout: Some(layout_id),
+ ..desc.clone()
+ },
+ ));
+ }
+ let _ = layout_id;
+ return (id.0, derived_bind_group_count, None);
+ };
+
+ let id =
+ hub.render_pipelines
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, 0, Some(error))
+ }
+
+ /// Get an ID of one of the bind group layouts. The ID adds a refcount,
+ /// which needs to be released by calling `bind_group_layout_drop`.
+ pub fn render_pipeline_get_bind_group_layout<B: GfxBackend>(
+ &self,
+ pipeline_id: id::RenderPipelineId,
+ index: u32,
+ id_in: Input<G, id::BindGroupLayoutId>,
+ ) -> (
+ id::BindGroupLayoutId,
+ Option<binding_model::GetBindGroupLayoutError>,
+ ) {
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
+
+ let error = loop {
+ let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
+ let (_, mut token) = hub.bind_groups.read(&mut token);
+ let (pipeline_guard, _) = hub.render_pipelines.read(&mut token);
+
+ let pipeline = match pipeline_guard.get(pipeline_id) {
+ Ok(pipeline) => pipeline,
+ Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
+ };
+ let id = match pipeline_layout_guard[pipeline.layout_id.value]
+ .bind_group_layout_ids
+ .get(index as usize)
+ {
+ Some(id) => id,
+ None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
+ };
+
+ bgl_guard[*id].multi_ref_count.inc();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .bind_group_layouts
+ .register_error(id_in, "<derived>", &mut token);
+ (id, Some(error))
+ }
+
+ pub fn render_pipeline_label<B: GfxBackend>(&self, id: id::RenderPipelineId) -> String {
+ B::hub(self).render_pipelines.label_for_resource(id)
+ }
+
+ pub fn render_pipeline_drop<B: GfxBackend>(&self, render_pipeline_id: id::RenderPipelineId) {
+ span!(_guard, INFO, "RenderPipeline::drop");
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+
+ let (device_id, layout_id) = {
+ let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token);
+ match pipeline_guard.get_mut(render_pipeline_id) {
+ Ok(pipeline) => {
+ pipeline.life_guard.ref_count.take();
+ (pipeline.device_id.value, pipeline.layout_id.clone())
+ }
+ Err(InvalidId) => {
+ hub.render_pipelines
+ .unregister_locked(render_pipeline_id, &mut *pipeline_guard);
+ return;
+ }
+ }
+ };
+
+ let mut life_lock = device_guard[device_id].lock_life(&mut token);
+ life_lock
+ .suspected_resources
+ .render_pipelines
+ .push(id::Valid(render_pipeline_id));
+ life_lock
+ .suspected_resources
+ .pipeline_layouts
+ .push(layout_id);
+ }
+
+ pub fn device_create_compute_pipeline<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &pipeline::ComputePipelineDescriptor,
+ id_in: Input<G, id::ComputePipelineId>,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ ) -> (
+ id::ComputePipelineId,
+ pipeline::ImplicitBindGroupCount,
+ Option<pipeline::CreateComputePipelineError>,
+ ) {
+ span!(_guard, INFO, "Device::create_compute_pipeline");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let (pipeline, derived_bind_group_count, layout_id) = match device
+ .create_compute_pipeline(device_id, desc, implicit_pipeline_ids, &hub, &mut token)
+ {
+ Ok(pair) => pair,
+ Err(e) => break e,
+ };
+
+ let id = hub
+ .compute_pipelines
+ .register_identity(id_in, pipeline, &mut token);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::CreateComputePipeline(
+ id.0,
+ pipeline::ComputePipelineDescriptor {
+ layout: Some(layout_id),
+ ..desc.clone()
+ },
+ ));
+ }
+ let _ = layout_id;
+ return (id.0, derived_bind_group_count, None);
+ };
+
+ let id =
+ hub.compute_pipelines
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, 0, Some(error))
+ }
+
+ /// Get an ID of one of the bind group layouts. The ID adds a refcount,
+ /// which needs to be released by calling `bind_group_layout_drop`.
+ pub fn compute_pipeline_get_bind_group_layout<B: GfxBackend>(
+ &self,
+ pipeline_id: id::ComputePipelineId,
+ index: u32,
+ id_in: Input<G, id::BindGroupLayoutId>,
+ ) -> (
+ id::BindGroupLayoutId,
+ Option<binding_model::GetBindGroupLayoutError>,
+ ) {
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
+
+ let error = loop {
+ let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
+ let (_, mut token) = hub.bind_groups.read(&mut token);
+ let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token);
+
+ let pipeline = match pipeline_guard.get(pipeline_id) {
+ Ok(pipeline) => pipeline,
+ Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
+ };
+ let id = match pipeline_layout_guard[pipeline.layout_id.value]
+ .bind_group_layout_ids
+ .get(index as usize)
+ {
+ Some(id) => id,
+ None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
+ };
+
+ bgl_guard[*id].multi_ref_count.inc();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .bind_group_layouts
+ .register_error(id_in, "<derived>", &mut token);
+ (id, Some(error))
+ }
+
+ pub fn compute_pipeline_label<B: GfxBackend>(&self, id: id::ComputePipelineId) -> String {
+ B::hub(self).compute_pipelines.label_for_resource(id)
+ }
+
+ pub fn compute_pipeline_drop<B: GfxBackend>(&self, compute_pipeline_id: id::ComputePipelineId) {
+ span!(_guard, INFO, "ComputePipeline::drop");
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+
+ let (device_id, layout_id) = {
+ let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token);
+ match pipeline_guard.get_mut(compute_pipeline_id) {
+ Ok(pipeline) => {
+ pipeline.life_guard.ref_count.take();
+ (pipeline.device_id.value, pipeline.layout_id.clone())
+ }
+ Err(InvalidId) => {
+ hub.compute_pipelines
+ .unregister_locked(compute_pipeline_id, &mut *pipeline_guard);
+ return;
+ }
+ }
+ };
+
+ let mut life_lock = device_guard[device_id].lock_life(&mut token);
+ life_lock
+ .suspected_resources
+ .compute_pipelines
+ .push(id::Valid(compute_pipeline_id));
+ life_lock
+ .suspected_resources
+ .pipeline_layouts
+ .push(layout_id);
+ }
+
+ pub fn device_get_swap_chain_preferred_format<B: GfxBackend>(
+ &self,
+ _device_id: id::DeviceId,
+ ) -> Result<TextureFormat, InvalidDevice> {
+ span!(_guard, INFO, "Device::get_swap_chain_preferred_format");
+ //TODO: we can query the formats like done in `device_create_swapchain`,
+ // but its not clear which format in the list to return.
+ // For now, return `Bgra8UnormSrgb` that we know is supported everywhere.
+ Ok(TextureFormat::Bgra8UnormSrgb)
+ }
+
+ pub fn device_create_swap_chain<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ surface_id: id::SurfaceId,
+ desc: &wgt::SwapChainDescriptor,
+ ) -> Result<id::SwapChainId, swap_chain::CreateSwapChainError> {
+ span!(_guard, INFO, "Device::create_swap_chain");
+
+ fn validate_swap_chain_descriptor(
+ config: &mut hal::window::SwapchainConfig,
+ caps: &hal::window::SurfaceCapabilities,
+ ) {
+ let width = config.extent.width;
+ let height = config.extent.height;
+ if width < caps.extents.start().width
+ || width > caps.extents.end().width
+ || height < caps.extents.start().height
+ || height > caps.extents.end().height
+ {
+ tracing::warn!(
+ "Requested size {}x{} is outside of the supported range: {:?}",
+ width,
+ height,
+ caps.extents
+ );
+ }
+ if !caps.present_modes.contains(config.present_mode) {
+ tracing::warn!(
+ "Surface does not support present mode: {:?}, falling back to {:?}",
+ config.present_mode,
+ hal::window::PresentMode::FIFO
+ );
+ config.present_mode = hal::window::PresentMode::FIFO;
+ }
+ }
+
+ tracing::info!("creating swap chain {:?}", desc);
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
+ let (adapter_guard, mut token) = hub.adapters.read(&mut token);
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (mut swap_chain_guard, _) = hub.swap_chains.write(&mut token);
+ let device = device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let surface = surface_guard
+ .get_mut(surface_id)
+ .map_err(|_| swap_chain::CreateSwapChainError::InvalidSurface)?;
+
+ let (caps, formats) = {
+ let surface = B::get_surface_mut(surface);
+ let adapter = &adapter_guard[device.adapter_id.value];
+ let queue_family = &adapter.raw.queue_families[0];
+ if !surface.supports_queue_family(queue_family) {
+ return Err(swap_chain::CreateSwapChainError::UnsupportedQueueFamily);
+ }
+ let formats = surface.supported_formats(&adapter.raw.physical_device);
+ let caps = surface.capabilities(&adapter.raw.physical_device);
+ (caps, formats)
+ };
+ let num_frames = swap_chain::DESIRED_NUM_FRAMES
+ .max(*caps.image_count.start())
+ .min(*caps.image_count.end());
+ let mut config =
+ swap_chain::swap_chain_descriptor_to_hal(&desc, num_frames, device.private_features);
+ if let Some(formats) = formats {
+ if !formats.contains(&config.format) {
+ return Err(swap_chain::CreateSwapChainError::UnsupportedFormat {
+ requested: config.format,
+ available: formats,
+ });
+ }
+ }
+ validate_swap_chain_descriptor(&mut config, &caps);
+
+ unsafe {
+ B::get_surface_mut(surface)
+ .configure_swapchain(&device.raw, config)
+ .map_err(|err| match err {
+ hal::window::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ hal::window::CreationError::DeviceLost(_) => DeviceError::Lost,
+ _ => panic!("failed to configure swap chain on creation: {}", err),
+ })?;
+ }
+
+ let sc_id = surface_id.to_swap_chain_id(B::VARIANT);
+ if let Some(sc) = swap_chain_guard.try_remove(sc_id) {
+ if !sc.acquired_view_id.is_none() {
+ return Err(swap_chain::CreateSwapChainError::SwapChainOutputExists);
+ }
+ unsafe {
+ device.raw.destroy_semaphore(sc.semaphore);
+ }
+ }
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(Action::CreateSwapChain(sc_id, desc.clone()));
+ }
+
+ let swap_chain = swap_chain::SwapChain {
+ life_guard: LifeGuard::new("<SwapChain>"),
+ device_id: Stored {
+ value: id::Valid(device_id),
+ ref_count: device.life_guard.add_ref(),
+ },
+ desc: desc.clone(),
+ num_frames,
+ semaphore: device
+ .raw
+ .create_semaphore()
+ .or(Err(DeviceError::OutOfMemory))?,
+ acquired_view_id: None,
+ acquired_framebuffers: Vec::new(),
+ active_submission_index: 0,
+ };
+ swap_chain_guard.insert(sc_id, swap_chain);
+ Ok(sc_id)
+ }
+
+ #[cfg(feature = "replay")]
+ /// Only triange suspected resource IDs. This helps us to avoid ID collisions
+ /// upon creating new resources when re-playing a trace.
+ pub fn device_maintain_ids<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ ) -> Result<(), InvalidDevice> {
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
+ device.lock_life(&mut token).triage_suspected(
+ &hub,
+ &device.trackers,
+ #[cfg(feature = "trace")]
+ None,
+ &mut token,
+ );
+ Ok(())
+ }
+
+ pub fn device_poll<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ force_wait: bool,
+ ) -> Result<(), WaitIdleError> {
+ span!(_guard, INFO, "Device::poll");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let callbacks = {
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?
+ .maintain(&hub, force_wait, &mut token)?
+ };
+ fire_map_callbacks(callbacks);
+ Ok(())
+ }
+
+ fn poll_devices<B: GfxBackend>(
+ &self,
+ force_wait: bool,
+ callbacks: &mut Vec<BufferMapPendingCallback>,
+ ) -> Result<(), WaitIdleError> {
+ span!(_guard, INFO, "Device::poll_devices");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ for (_, device) in device_guard.iter(B::VARIANT) {
+ let cbs = device.maintain(&hub, force_wait, &mut token)?;
+ callbacks.extend(cbs);
+ }
+ Ok(())
+ }
+
+ pub fn poll_all_devices(&self, force_wait: bool) -> Result<(), WaitIdleError> {
+ use crate::backend;
+ let mut callbacks = Vec::new();
+
+ #[cfg(vulkan)]
+ {
+ self.poll_devices::<backend::Vulkan>(force_wait, &mut callbacks)?;
+ }
+ #[cfg(metal)]
+ {
+ self.poll_devices::<backend::Metal>(force_wait, &mut callbacks)?;
+ }
+ #[cfg(dx12)]
+ {
+ self.poll_devices::<backend::Dx12>(force_wait, &mut callbacks)?;
+ }
+ #[cfg(dx11)]
+ {
+ self.poll_devices::<backend::Dx11>(force_wait, &mut callbacks)?;
+ }
+
+ fire_map_callbacks(callbacks);
+
+ Ok(())
+ }
+
+ pub fn device_label<B: GfxBackend>(&self, id: id::DeviceId) -> String {
+ B::hub(self).devices.label_for_resource(id)
+ }
+
+ pub fn device_drop<B: GfxBackend>(&self, device_id: id::DeviceId) {
+ span!(_guard, INFO, "Device::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device, _) = hub.devices.unregister(device_id, &mut token);
+ if let Some(mut device) = device {
+ device.prepare_to_die();
+
+ // Adapter is only referenced by the device and itself.
+ // This isn't a robust way to destroy them, we should find a better one.
+ if device.adapter_id.ref_count.load() == 1 {
+ let (_adapter, _) = hub
+ .adapters
+ .unregister(device.adapter_id.value.0, &mut token);
+ }
+
+ device.dispose();
+ }
+ }
+
+ pub fn buffer_map_async<B: GfxBackend>(
+ &self,
+ buffer_id: id::BufferId,
+ range: Range<BufferAddress>,
+ op: resource::BufferMapOperation,
+ ) -> Result<(), resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::buffer_map_async");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (pub_usage, internal_use) = match op.host {
+ HostMap::Read => (wgt::BufferUsage::MAP_READ, resource::BufferUse::MAP_READ),
+ HostMap::Write => (wgt::BufferUsage::MAP_WRITE, resource::BufferUse::MAP_WRITE),
+ };
+
+ if range.start % wgt::COPY_BUFFER_ALIGNMENT != 0
+ || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0
+ {
+ return Err(resource::BufferAccessError::UnalignedRange);
+ }
+
+ let (device_id, ref_count) = {
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+
+ check_buffer_usage(buffer.usage, pub_usage)?;
+ buffer.map_state = match buffer.map_state {
+ resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => {
+ return Err(resource::BufferAccessError::AlreadyMapped);
+ }
+ resource::BufferMapState::Waiting(_) => {
+ op.call_error();
+ return Ok(());
+ }
+ resource::BufferMapState::Idle => {
+ resource::BufferMapState::Waiting(resource::BufferPendingMapping {
+ range,
+ op,
+ parent_ref_count: buffer.life_guard.add_ref(),
+ })
+ }
+ };
+ tracing::debug!("Buffer {:?} map state -> Waiting", buffer_id);
+
+ (buffer.device_id.value, buffer.life_guard.add_ref())
+ };
+
+ let device = &device_guard[device_id];
+ device.trackers.lock().buffers.change_replace(
+ id::Valid(buffer_id),
+ &ref_count,
+ (),
+ internal_use,
+ );
+
+ device
+ .lock_life(&mut token)
+ .map(id::Valid(buffer_id), ref_count);
+
+ Ok(())
+ }
+
+ pub fn buffer_get_mapped_range<B: GfxBackend>(
+ &self,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ _size: Option<BufferSize>,
+ ) -> Result<*mut u8, resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::buffer_get_mapped_range");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (buffer_guard, _) = hub.buffers.read(&mut token);
+ let buffer = buffer_guard
+ .get(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+
+ match buffer.map_state {
+ resource::BufferMapState::Init { ptr, .. }
+ | resource::BufferMapState::Active { ptr, .. } => unsafe {
+ Ok(ptr.as_ptr().offset(offset as isize))
+ },
+ resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => {
+ Err(resource::BufferAccessError::NotMapped)
+ }
+ }
+ }
+
+ pub fn buffer_unmap<B: GfxBackend>(
+ &self,
+ buffer_id: id::BufferId,
+ ) -> Result<(), resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::buffer_unmap");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+ let device = &mut device_guard[buffer.device_id.value];
+
+ tracing::debug!("Buffer {:?} map state -> Idle", buffer_id);
+ match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) {
+ resource::BufferMapState::Init {
+ ptr,
+ stage_buffer,
+ stage_memory,
+ needs_flush,
+ } => {
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data = trace.make_binary("bin", unsafe {
+ std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize)
+ });
+ trace.add(trace::Action::WriteBuffer {
+ id: buffer_id,
+ data,
+ range: 0..buffer.size,
+ queued: true,
+ });
+ }
+ let _ = ptr;
+
+ if needs_flush {
+ stage_memory.flush_range(&device.raw, 0, None)?;
+ }
+
+ let &(ref buf_raw, _) = buffer
+ .raw
+ .as_ref()
+ .ok_or(resource::BufferAccessError::Destroyed)?;
+
+ buffer.life_guard.use_at(device.active_submission_index + 1);
+ let region = hal::command::BufferCopy {
+ src: 0,
+ dst: 0,
+ size: buffer.size,
+ };
+ let transition_src = hal::memory::Barrier::Buffer {
+ states: hal::buffer::Access::HOST_WRITE..hal::buffer::Access::TRANSFER_READ,
+ target: &stage_buffer,
+ range: hal::buffer::SubRange::WHOLE,
+ families: None,
+ };
+ let transition_dst = hal::memory::Barrier::Buffer {
+ states: hal::buffer::Access::empty()..hal::buffer::Access::TRANSFER_WRITE,
+ target: buf_raw,
+ range: hal::buffer::SubRange::WHOLE,
+ families: None,
+ };
+ unsafe {
+ let cmdbuf = device.borrow_pending_writes();
+ cmdbuf.pipeline_barrier(
+ hal::pso::PipelineStage::HOST..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ iter::once(transition_src).chain(iter::once(transition_dst)),
+ );
+ if buffer.size > 0 {
+ cmdbuf.copy_buffer(&stage_buffer, buf_raw, iter::once(region));
+ }
+ }
+ device
+ .pending_writes
+ .consume_temp(queue::TempResource::Buffer(stage_buffer), stage_memory);
+ device.pending_writes.dst_buffers.insert(buffer_id);
+ }
+ resource::BufferMapState::Idle => {
+ return Err(resource::BufferAccessError::NotMapped);
+ }
+ resource::BufferMapState::Waiting(_) => {}
+ resource::BufferMapState::Active {
+ ptr,
+ sub_range,
+ host,
+ } => {
+ if host == HostMap::Write {
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let size = sub_range.size_to(buffer.size);
+ let data = trace.make_binary("bin", unsafe {
+ std::slice::from_raw_parts(ptr.as_ptr(), size as usize)
+ });
+ trace.add(trace::Action::WriteBuffer {
+ id: buffer_id,
+ data,
+ range: sub_range.offset..sub_range.offset + size,
+ queued: false,
+ });
+ }
+ let _ = (ptr, sub_range);
+ }
+ unmap_buffer(&device.raw, buffer)?;
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/queue.rs b/gfx/wgpu/wgpu-core/src/device/queue.rs
new file mode 100644
index 0000000000..ccd55b185e
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/queue.rs
@@ -0,0 +1,696 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#[cfg(feature = "trace")]
+use crate::device::trace::Action;
+use crate::{
+ command::{
+ texture_copy_view_to_hal, validate_linear_texture_data, validate_texture_copy_range,
+ CommandAllocator, CommandBuffer, CopySide, TextureCopyView, TransferError, BITS_PER_BYTE,
+ },
+ conv,
+ device::{alloc, DeviceError, WaitIdleError},
+ hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
+ id,
+ resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse},
+ span, FastHashSet,
+};
+
+use hal::{command::CommandBuffer as _, device::Device as _, queue::CommandQueue as _};
+use smallvec::SmallVec;
+use std::{iter, ptr};
+use thiserror::Error;
+
+struct StagingData<B: hal::Backend> {
+ buffer: B::Buffer,
+ memory: alloc::MemoryBlock<B>,
+ cmdbuf: B::CommandBuffer,
+}
+
+#[derive(Debug)]
+pub enum TempResource<B: hal::Backend> {
+ Buffer(B::Buffer),
+ Image(B::Image),
+}
+
+#[derive(Debug)]
+pub(crate) struct PendingWrites<B: hal::Backend> {
+ pub command_buffer: Option<B::CommandBuffer>,
+ pub temp_resources: Vec<(TempResource<B>, alloc::MemoryBlock<B>)>,
+ pub dst_buffers: FastHashSet<id::BufferId>,
+ pub dst_textures: FastHashSet<id::TextureId>,
+}
+
+impl<B: hal::Backend> PendingWrites<B> {
+ pub fn new() -> Self {
+ Self {
+ command_buffer: None,
+ temp_resources: Vec::new(),
+ dst_buffers: FastHashSet::default(),
+ dst_textures: FastHashSet::default(),
+ }
+ }
+
+ pub fn dispose(
+ self,
+ device: &B::Device,
+ cmd_allocator: &CommandAllocator<B>,
+ mem_allocator: &mut alloc::MemoryAllocator<B>,
+ ) {
+ if let Some(raw) = self.command_buffer {
+ cmd_allocator.discard_internal(raw);
+ }
+ for (resource, memory) in self.temp_resources {
+ mem_allocator.free(device, memory);
+ match resource {
+ TempResource::Buffer(buffer) => unsafe {
+ device.destroy_buffer(buffer);
+ },
+ TempResource::Image(image) => unsafe {
+ device.destroy_image(image);
+ },
+ }
+ }
+ }
+
+ pub fn consume_temp(&mut self, resource: TempResource<B>, memory: alloc::MemoryBlock<B>) {
+ self.temp_resources.push((resource, memory));
+ }
+
+ fn consume(&mut self, stage: StagingData<B>) {
+ self.temp_resources
+ .push((TempResource::Buffer(stage.buffer), stage.memory));
+ self.command_buffer = Some(stage.cmdbuf);
+ }
+
+ #[must_use]
+ fn finish(&mut self) -> Option<B::CommandBuffer> {
+ self.dst_buffers.clear();
+ self.dst_textures.clear();
+ self.command_buffer.take().map(|mut cmd_buf| unsafe {
+ cmd_buf.finish();
+ cmd_buf
+ })
+ }
+}
+
+impl<B: hal::Backend> super::Device<B> {
+ pub fn borrow_pending_writes(&mut self) -> &mut B::CommandBuffer {
+ if self.pending_writes.command_buffer.is_none() {
+ let mut cmdbuf = self.cmd_allocator.allocate_internal();
+ unsafe {
+ cmdbuf.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
+ }
+ self.pending_writes.command_buffer = Some(cmdbuf);
+ }
+ self.pending_writes.command_buffer.as_mut().unwrap()
+ }
+
+ fn prepare_stage(&mut self, size: wgt::BufferAddress) -> Result<StagingData<B>, DeviceError> {
+ let mut buffer = unsafe {
+ self.raw
+ .create_buffer(size, hal::buffer::Usage::TRANSFER_SRC)
+ .map_err(|err| match err {
+ hal::buffer::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create staging buffer: {}", err),
+ })?
+ };
+ //TODO: do we need to transition into HOST_WRITE access first?
+ let requirements = unsafe {
+ self.raw.set_buffer_name(&mut buffer, "<write_buffer_temp>");
+ self.raw.get_buffer_requirements(&buffer)
+ };
+
+ let block = self.mem_allocator.lock().allocate(
+ &self.raw,
+ requirements,
+ gpu_alloc::UsageFlags::UPLOAD | gpu_alloc::UsageFlags::TRANSIENT,
+ )?;
+ block.bind_buffer(&self.raw, &mut buffer)?;
+
+ let cmdbuf = match self.pending_writes.command_buffer.take() {
+ Some(cmdbuf) => cmdbuf,
+ None => {
+ let mut cmdbuf = self.cmd_allocator.allocate_internal();
+ unsafe {
+ cmdbuf.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
+ }
+ cmdbuf
+ }
+ };
+ Ok(StagingData {
+ buffer,
+ memory: block,
+ cmdbuf,
+ })
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum QueueWriteError {
+ #[error(transparent)]
+ Queue(#[from] DeviceError),
+ #[error(transparent)]
+ Transfer(#[from] TransferError),
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum QueueSubmitError {
+ #[error(transparent)]
+ Queue(#[from] DeviceError),
+ #[error("command buffer {0:?} is invalid")]
+ InvalidCommandBuffer(id::CommandBufferId),
+ #[error("buffer {0:?} is destroyed")]
+ DestroyedBuffer(id::BufferId),
+ #[error("texture {0:?} is destroyed")]
+ DestroyedTexture(id::TextureId),
+ #[error(transparent)]
+ Unmap(#[from] BufferAccessError),
+ #[error("swap chain output was dropped before the command buffer got submitted")]
+ SwapChainOutputDropped,
+ #[error("GPU got stuck :(")]
+ StuckGpu,
+}
+
+//TODO: move out common parts of write_xxx.
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn queue_write_buffer<B: GfxBackend>(
+ &self,
+ queue_id: id::QueueId,
+ buffer_id: id::BufferId,
+ buffer_offset: wgt::BufferAddress,
+ data: &[u8],
+ ) -> Result<(), QueueWriteError> {
+ span!(_guard, INFO, "Queue::write_buffer");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let device = device_guard
+ .get_mut(queue_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let (buffer_guard, _) = hub.buffers.read(&mut token);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data_path = trace.make_binary("bin", data);
+ trace.add(Action::WriteBuffer {
+ id: buffer_id,
+ data: data_path,
+ range: buffer_offset..buffer_offset + data.len() as wgt::BufferAddress,
+ queued: true,
+ });
+ }
+
+ let data_size = data.len() as wgt::BufferAddress;
+ if data_size == 0 {
+ tracing::trace!("Ignoring write_buffer of size 0");
+ return Ok(());
+ }
+
+ let mut stage = device.prepare_stage(data_size)?;
+ stage.memory.write_bytes(&device.raw, 0, data)?;
+
+ let mut trackers = device.trackers.lock();
+ let (dst, transition) = trackers
+ .buffers
+ .use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST)
+ .map_err(TransferError::InvalidBuffer)?;
+ let &(ref dst_raw, _) = dst
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidBuffer(buffer_id))?;
+ if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
+ Err(TransferError::MissingCopyDstUsageFlag(
+ Some(buffer_id),
+ None,
+ ))?;
+ }
+ dst.life_guard.use_at(device.active_submission_index + 1);
+
+ if data_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ Err(TransferError::UnalignedCopySize(data_size))?
+ }
+ if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ Err(TransferError::UnalignedBufferOffset(buffer_offset))?
+ }
+ if buffer_offset + data_size > dst.size {
+ Err(TransferError::BufferOverrun {
+ start_offset: buffer_offset,
+ end_offset: buffer_offset + data_size,
+ buffer_size: dst.size,
+ side: CopySide::Destination,
+ })?
+ }
+
+ let region = hal::command::BufferCopy {
+ src: 0,
+ dst: buffer_offset,
+ size: data.len() as _,
+ };
+ unsafe {
+ stage.cmdbuf.pipeline_barrier(
+ super::all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ iter::once(hal::memory::Barrier::Buffer {
+ states: hal::buffer::Access::HOST_WRITE..hal::buffer::Access::TRANSFER_READ,
+ target: &stage.buffer,
+ range: hal::buffer::SubRange::WHOLE,
+ families: None,
+ })
+ .chain(transition.map(|pending| pending.into_hal(dst))),
+ );
+ stage
+ .cmdbuf
+ .copy_buffer(&stage.buffer, dst_raw, iter::once(region));
+ }
+
+ device.pending_writes.consume(stage);
+ device.pending_writes.dst_buffers.insert(buffer_id);
+
+ Ok(())
+ }
+
+ pub fn queue_write_texture<B: GfxBackend>(
+ &self,
+ queue_id: id::QueueId,
+ destination: &TextureCopyView,
+ data: &[u8],
+ data_layout: &wgt::TextureDataLayout,
+ size: &wgt::Extent3d,
+ ) -> Result<(), QueueWriteError> {
+ span!(_guard, INFO, "Queue::write_texture");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let device = device_guard
+ .get_mut(queue_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let (texture_guard, _) = hub.textures.read(&mut token);
+ let (image_layers, image_range, image_offset) =
+ texture_copy_view_to_hal(destination, size, &*texture_guard)?;
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data_path = trace.make_binary("bin", data);
+ trace.add(Action::WriteTexture {
+ to: destination.clone(),
+ data: data_path,
+ layout: data_layout.clone(),
+ size: *size,
+ });
+ }
+
+ if size.width == 0 || size.height == 0 || size.depth == 0 {
+ tracing::trace!("Ignoring write_texture of size 0");
+ return Ok(());
+ }
+
+ let texture_format = texture_guard.get(destination.texture).unwrap().format;
+ let bytes_per_block = conv::map_texture_format(texture_format, device.private_features)
+ .surface_desc()
+ .bits as u32
+ / BITS_PER_BYTE;
+ validate_linear_texture_data(
+ data_layout,
+ texture_format,
+ data.len() as wgt::BufferAddress,
+ CopySide::Source,
+ bytes_per_block as wgt::BufferAddress,
+ size,
+ )?;
+ let (block_width, block_height) = conv::texture_block_size(texture_format);
+ if !conv::is_valid_copy_dst_texture_format(texture_format) {
+ Err(TransferError::CopyToForbiddenTextureFormat(texture_format))?
+ }
+ let width_blocks = size.width / block_width;
+ let height_blocks = size.height / block_width;
+
+ let texel_rows_per_image = data_layout.rows_per_image;
+ let block_rows_per_image = data_layout.rows_per_image / block_height;
+
+ let bytes_per_row_alignment = get_lowest_common_denom(
+ device.hal_limits.optimal_buffer_copy_pitch_alignment as u32,
+ bytes_per_block,
+ );
+ let stage_bytes_per_row = align_to(bytes_per_block * width_blocks, bytes_per_row_alignment);
+
+ let block_rows_in_copy = (size.depth - 1) * block_rows_per_image + height_blocks;
+ let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64;
+ let mut stage = device.prepare_stage(stage_size)?;
+
+ let mut trackers = device.trackers.lock();
+ let (dst, transition) = trackers
+ .textures
+ .use_replace(
+ &*texture_guard,
+ destination.texture,
+ image_range,
+ TextureUse::COPY_DST,
+ )
+ .unwrap();
+ let &(ref dst_raw, _) = dst
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidTexture(destination.texture))?;
+
+ if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
+ Err(TransferError::MissingCopyDstUsageFlag(
+ None,
+ Some(destination.texture),
+ ))?
+ }
+ validate_texture_copy_range(
+ destination,
+ dst.format,
+ dst.kind,
+ CopySide::Destination,
+ size,
+ )?;
+ dst.life_guard.use_at(device.active_submission_index + 1);
+
+ let ptr = stage.memory.map(&device.raw, 0, stage_size)?;
+ unsafe {
+ //TODO: https://github.com/zakarumych/gpu-alloc/issues/13
+ if stage_bytes_per_row == data_layout.bytes_per_row {
+ // Fast path if the data isalready being aligned optimally.
+ ptr::copy_nonoverlapping(data.as_ptr(), ptr.as_ptr(), stage_size as usize);
+ } else {
+ // Copy row by row into the optimal alignment.
+ let copy_bytes_per_row =
+ stage_bytes_per_row.min(data_layout.bytes_per_row) as usize;
+ for layer in 0..size.depth {
+ let rows_offset = layer * block_rows_per_image;
+ for row in 0..height_blocks {
+ ptr::copy_nonoverlapping(
+ data.as_ptr().offset(
+ (rows_offset + row) as isize * data_layout.bytes_per_row as isize,
+ ),
+ ptr.as_ptr().offset(
+ (rows_offset + row) as isize * stage_bytes_per_row as isize,
+ ),
+ copy_bytes_per_row,
+ );
+ }
+ }
+ }
+ }
+ stage.memory.unmap(&device.raw);
+ if !stage.memory.is_coherent() {
+ stage.memory.flush_range(&device.raw, 0, None)?;
+ }
+
+ let region = hal::command::BufferImageCopy {
+ buffer_offset: 0,
+ buffer_width: (stage_bytes_per_row / bytes_per_block) * block_width,
+ buffer_height: texel_rows_per_image,
+ image_layers,
+ image_offset,
+ image_extent: conv::map_extent(size, dst.dimension),
+ };
+ unsafe {
+ stage.cmdbuf.pipeline_barrier(
+ super::all_image_stages() | hal::pso::PipelineStage::HOST
+ ..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ iter::once(hal::memory::Barrier::Buffer {
+ states: hal::buffer::Access::HOST_WRITE..hal::buffer::Access::TRANSFER_READ,
+ target: &stage.buffer,
+ range: hal::buffer::SubRange::WHOLE,
+ families: None,
+ })
+ .chain(transition.map(|pending| pending.into_hal(dst))),
+ );
+ stage.cmdbuf.copy_buffer_to_image(
+ &stage.buffer,
+ dst_raw,
+ hal::image::Layout::TransferDstOptimal,
+ iter::once(region),
+ );
+ }
+
+ device.pending_writes.consume(stage);
+ device
+ .pending_writes
+ .dst_textures
+ .insert(destination.texture);
+
+ Ok(())
+ }
+
+ pub fn queue_submit<B: GfxBackend>(
+ &self,
+ queue_id: id::QueueId,
+ command_buffer_ids: &[id::CommandBufferId],
+ ) -> Result<(), QueueSubmitError> {
+ span!(_guard, INFO, "Queue::submit");
+
+ let hub = B::hub(self);
+
+ let callbacks = {
+ let mut token = Token::root();
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let device = device_guard
+ .get_mut(queue_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let pending_write_command_buffer = device.pending_writes.finish();
+ device.temp_suspected.clear();
+ device.active_submission_index += 1;
+ let submit_index = device.active_submission_index;
+
+ let fence = {
+ let mut signal_swapchain_semaphores = SmallVec::<[_; 1]>::new();
+ let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
+ let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token);
+
+ {
+ let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
+ let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
+ let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
+ let (mut buffer_guard, mut token) = hub.buffers.write(&mut token);
+ let (texture_guard, mut token) = hub.textures.read(&mut token);
+ let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
+ let (sampler_guard, _) = hub.samplers.read(&mut token);
+
+ //Note: locking the trackers has to be done after the storages
+ let mut trackers = device.trackers.lock();
+
+ //TODO: if multiple command buffers are submitted, we can re-use the last
+ // native command buffer of the previous chain instead of always creating
+ // a temporary one, since the chains are not finished.
+
+ // finish all the command buffers first
+ for &cmb_id in command_buffer_ids {
+ let cmdbuf = command_buffer_guard
+ .get_mut(cmb_id)
+ .map_err(|_| QueueSubmitError::InvalidCommandBuffer(cmb_id))?;
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(Action::Submit(
+ submit_index,
+ cmdbuf.commands.take().unwrap(),
+ ));
+ }
+
+ if let Some((sc_id, fbo)) = cmdbuf.used_swap_chain.take() {
+ let sc = &mut swap_chain_guard[sc_id.value];
+ sc.active_submission_index = submit_index;
+ if sc.acquired_view_id.is_none() {
+ return Err(QueueSubmitError::SwapChainOutputDropped);
+ }
+ // For each swapchain, we only want to have at most 1 signaled semaphore.
+ if sc.acquired_framebuffers.is_empty() {
+ // Only add a signal if this is the first time for this swapchain
+ // to be used in the submission.
+ signal_swapchain_semaphores.push(sc_id.value);
+ }
+ sc.acquired_framebuffers.push(fbo);
+ }
+
+ // optimize the tracked states
+ cmdbuf.trackers.optimize();
+
+ // update submission IDs
+ for id in cmdbuf.trackers.buffers.used() {
+ let buffer = &mut buffer_guard[id];
+ if buffer.raw.is_none() {
+ return Err(QueueSubmitError::DestroyedBuffer(id.0))?;
+ }
+ if !buffer.life_guard.use_at(submit_index) {
+ if let BufferMapState::Active { .. } = buffer.map_state {
+ tracing::warn!("Dropped buffer has a pending mapping.");
+ super::unmap_buffer(&device.raw, buffer)?;
+ }
+ device.temp_suspected.buffers.push(id);
+ } else {
+ match buffer.map_state {
+ BufferMapState::Idle => (),
+ _ => panic!("Buffer {:?} is still mapped", id),
+ }
+ }
+ }
+ for id in cmdbuf.trackers.textures.used() {
+ let texture = &texture_guard[id];
+ if texture.raw.is_none() {
+ return Err(QueueSubmitError::DestroyedTexture(id.0))?;
+ }
+ if !texture.life_guard.use_at(submit_index) {
+ device.temp_suspected.textures.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.views.used() {
+ if !texture_view_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.texture_views.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.bind_groups.used() {
+ if !bind_group_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.bind_groups.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.samplers.used() {
+ if !sampler_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.samplers.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.compute_pipes.used() {
+ if !compute_pipe_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.compute_pipelines.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.render_pipes.used() {
+ if !render_pipe_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.render_pipelines.push(id);
+ }
+ }
+
+ // execute resource transitions
+ let mut transit = device.cmd_allocator.extend(cmdbuf);
+ unsafe {
+ // the last buffer was open, closing now
+ cmdbuf.raw.last_mut().unwrap().finish();
+ transit
+ .begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
+ }
+ tracing::trace!("Stitching command buffer {:?} before submission", cmb_id);
+ CommandBuffer::insert_barriers(
+ &mut transit,
+ &mut *trackers,
+ &cmdbuf.trackers,
+ &*buffer_guard,
+ &*texture_guard,
+ );
+ unsafe {
+ transit.finish();
+ }
+ cmdbuf.raw.insert(0, transit);
+ }
+
+ tracing::trace!("Device after submission {}: {:#?}", submit_index, trackers);
+ }
+
+ // now prepare the GPU submission
+ let fence = device
+ .raw
+ .create_fence(false)
+ .or(Err(DeviceError::OutOfMemory))?;
+ let submission = hal::queue::Submission {
+ command_buffers: pending_write_command_buffer.as_ref().into_iter().chain(
+ command_buffer_ids
+ .iter()
+ .flat_map(|&cmb_id| &command_buffer_guard.get(cmb_id).unwrap().raw),
+ ),
+ wait_semaphores: Vec::new(),
+ signal_semaphores: signal_swapchain_semaphores
+ .into_iter()
+ .map(|sc_id| &swap_chain_guard[sc_id].semaphore),
+ };
+
+ unsafe {
+ device.queue_group.queues[0].submit(submission, Some(&fence));
+ }
+ fence
+ };
+
+ if let Some(comb_raw) = pending_write_command_buffer {
+ device
+ .cmd_allocator
+ .after_submit_internal(comb_raw, submit_index);
+ }
+
+ let callbacks = match device.maintain(&hub, false, &mut token) {
+ Ok(callbacks) => callbacks,
+ Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
+ Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
+ };
+ super::Device::lock_life_internal(&device.life_tracker, &mut token).track_submission(
+ submit_index,
+ fence,
+ &device.temp_suspected,
+ device.pending_writes.temp_resources.drain(..),
+ );
+
+ // finally, return the command buffers to the allocator
+ for &cmb_id in command_buffer_ids {
+ if let (Some(cmd_buf), _) = hub.command_buffers.unregister(cmb_id, &mut token) {
+ device.cmd_allocator.after_submit(cmd_buf, submit_index);
+ }
+ }
+
+ callbacks
+ };
+
+ super::fire_map_callbacks(callbacks);
+
+ Ok(())
+ }
+}
+
+fn get_lowest_common_denom(a: u32, b: u32) -> u32 {
+ let gcd = if a >= b {
+ get_greatest_common_divisor(a, b)
+ } else {
+ get_greatest_common_divisor(b, a)
+ };
+ a * b / gcd
+}
+
+fn get_greatest_common_divisor(mut a: u32, mut b: u32) -> u32 {
+ assert!(a >= b);
+ loop {
+ let c = a % b;
+ if c == 0 {
+ return b;
+ } else {
+ a = b;
+ b = c;
+ }
+ }
+}
+
+fn align_to(value: u32, alignment: u32) -> u32 {
+ match value % alignment {
+ 0 => value,
+ other => value - other + alignment,
+ }
+}
+
+#[test]
+fn test_lcd() {
+ assert_eq!(get_lowest_common_denom(2, 2), 2);
+ assert_eq!(get_lowest_common_denom(2, 3), 6);
+ assert_eq!(get_lowest_common_denom(6, 4), 12);
+}
+
+#[test]
+fn test_gcd() {
+ assert_eq!(get_greatest_common_divisor(5, 1), 1);
+ assert_eq!(get_greatest_common_divisor(4, 2), 2);
+ assert_eq!(get_greatest_common_divisor(6, 4), 2);
+ assert_eq!(get_greatest_common_divisor(7, 7), 7);
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/trace.rs b/gfx/wgpu/wgpu-core/src/device/trace.rs
new file mode 100644
index 0000000000..8fbd08526d
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/trace.rs
@@ -0,0 +1,192 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::id;
+use std::ops::Range;
+#[cfg(feature = "trace")]
+use std::{borrow::Cow, io::Write as _};
+
+//TODO: consider a readable Id that doesn't include the backend
+
+type FileName = String;
+
+pub const FILE_NAME: &str = "trace.ron";
+
+#[cfg(feature = "trace")]
+pub(crate) fn new_render_bundle_encoder_descriptor<'a>(
+ label: Option<&'a str>,
+ context: &'a super::RenderPassContext,
+) -> crate::command::RenderBundleEncoderDescriptor<'a> {
+ crate::command::RenderBundleEncoderDescriptor {
+ label: label.map(Cow::Borrowed),
+ color_formats: Cow::Borrowed(&context.attachments.colors),
+ depth_stencil_format: context.attachments.depth_stencil,
+ sample_count: context.sample_count as u32,
+ }
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum Action<'a> {
+ Init {
+ desc: crate::device::DeviceDescriptor<'a>,
+ backend: wgt::Backend,
+ },
+ CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>),
+ FreeBuffer(id::BufferId),
+ DestroyBuffer(id::BufferId),
+ CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>),
+ FreeTexture(id::TextureId),
+ DestroyTexture(id::TextureId),
+ CreateTextureView {
+ id: id::TextureViewId,
+ parent_id: id::TextureId,
+ desc: crate::resource::TextureViewDescriptor<'a>,
+ },
+ DestroyTextureView(id::TextureViewId),
+ CreateSampler(id::SamplerId, crate::resource::SamplerDescriptor<'a>),
+ DestroySampler(id::SamplerId),
+ CreateSwapChain(id::SwapChainId, wgt::SwapChainDescriptor),
+ GetSwapChainTexture {
+ id: Option<id::TextureViewId>,
+ parent_id: id::SwapChainId,
+ },
+ PresentSwapChain(id::SwapChainId),
+ CreateBindGroupLayout(
+ id::BindGroupLayoutId,
+ crate::binding_model::BindGroupLayoutDescriptor<'a>,
+ ),
+ DestroyBindGroupLayout(id::BindGroupLayoutId),
+ CreatePipelineLayout(
+ id::PipelineLayoutId,
+ crate::binding_model::PipelineLayoutDescriptor<'a>,
+ ),
+ DestroyPipelineLayout(id::PipelineLayoutId),
+ CreateBindGroup(
+ id::BindGroupId,
+ crate::binding_model::BindGroupDescriptor<'a>,
+ ),
+ DestroyBindGroup(id::BindGroupId),
+ CreateShaderModule {
+ id: id::ShaderModuleId,
+ label: crate::Label<'a>,
+ data: FileName,
+ },
+ DestroyShaderModule(id::ShaderModuleId),
+ CreateComputePipeline(
+ id::ComputePipelineId,
+ crate::pipeline::ComputePipelineDescriptor<'a>,
+ ),
+ DestroyComputePipeline(id::ComputePipelineId),
+ CreateRenderPipeline(
+ id::RenderPipelineId,
+ crate::pipeline::RenderPipelineDescriptor<'a>,
+ ),
+ DestroyRenderPipeline(id::RenderPipelineId),
+ CreateRenderBundle {
+ id: id::RenderBundleId,
+ desc: crate::command::RenderBundleEncoderDescriptor<'a>,
+ base: crate::command::BasePass<crate::command::RenderCommand>,
+ },
+ DestroyRenderBundle(id::RenderBundleId),
+ WriteBuffer {
+ id: id::BufferId,
+ data: FileName,
+ range: Range<wgt::BufferAddress>,
+ queued: bool,
+ },
+ WriteTexture {
+ to: crate::command::TextureCopyView,
+ data: FileName,
+ layout: wgt::TextureDataLayout,
+ size: wgt::Extent3d,
+ },
+ Submit(crate::SubmissionIndex, Vec<Command>),
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum Command {
+ CopyBufferToBuffer {
+ src: id::BufferId,
+ src_offset: wgt::BufferAddress,
+ dst: id::BufferId,
+ dst_offset: wgt::BufferAddress,
+ size: wgt::BufferAddress,
+ },
+ CopyBufferToTexture {
+ src: crate::command::BufferCopyView,
+ dst: crate::command::TextureCopyView,
+ size: wgt::Extent3d,
+ },
+ CopyTextureToBuffer {
+ src: crate::command::TextureCopyView,
+ dst: crate::command::BufferCopyView,
+ size: wgt::Extent3d,
+ },
+ CopyTextureToTexture {
+ src: crate::command::TextureCopyView,
+ dst: crate::command::TextureCopyView,
+ size: wgt::Extent3d,
+ },
+ RunComputePass {
+ base: crate::command::BasePass<crate::command::ComputeCommand>,
+ },
+ RunRenderPass {
+ base: crate::command::BasePass<crate::command::RenderCommand>,
+ target_colors: Vec<crate::command::ColorAttachmentDescriptor>,
+ target_depth_stencil: Option<crate::command::DepthStencilAttachmentDescriptor>,
+ },
+}
+
+#[cfg(feature = "trace")]
+#[derive(Debug)]
+pub struct Trace {
+ path: std::path::PathBuf,
+ file: std::fs::File,
+ config: ron::ser::PrettyConfig,
+ binary_id: usize,
+}
+
+#[cfg(feature = "trace")]
+impl Trace {
+ pub fn new(path: &std::path::Path) -> Result<Self, std::io::Error> {
+ tracing::info!("Tracing into '{:?}'", path);
+ let mut file = std::fs::File::create(path.join(FILE_NAME))?;
+ file.write_all(b"[\n")?;
+ Ok(Self {
+ path: path.to_path_buf(),
+ file,
+ config: ron::ser::PrettyConfig::default(),
+ binary_id: 0,
+ })
+ }
+
+ pub fn make_binary(&mut self, kind: &str, data: &[u8]) -> String {
+ self.binary_id += 1;
+ let name = format!("data{}.{}", self.binary_id, kind);
+ let _ = std::fs::write(self.path.join(&name), data);
+ name
+ }
+
+ pub(crate) fn add(&mut self, action: Action) {
+ match ron::ser::to_string_pretty(&action, self.config.clone()) {
+ Ok(string) => {
+ let _ = writeln!(self.file, "{},", string);
+ }
+ Err(e) => {
+ tracing::warn!("RON serialization failure: {:?}", e);
+ }
+ }
+ }
+}
+
+#[cfg(feature = "trace")]
+impl Drop for Trace {
+ fn drop(&mut self) {
+ let _ = self.file.write_all(b"]");
+ }
+}