summaryrefslogtreecommitdiffstats
path: root/third_party/rust/wgpu-core/src/track
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/wgpu-core/src/track')
-rw-r--r--third_party/rust/wgpu-core/src/track/buffer.rs838
-rw-r--r--third_party/rust/wgpu-core/src/track/metadata.rs243
-rw-r--r--third_party/rust/wgpu-core/src/track/mod.rs617
-rw-r--r--third_party/rust/wgpu-core/src/track/range.rs206
-rw-r--r--third_party/rust/wgpu-core/src/track/stateless.rs238
-rw-r--r--third_party/rust/wgpu-core/src/track/texture.rs1492
6 files changed, 3634 insertions, 0 deletions
diff --git a/third_party/rust/wgpu-core/src/track/buffer.rs b/third_party/rust/wgpu-core/src/track/buffer.rs
new file mode 100644
index 0000000000..323d2dab9d
--- /dev/null
+++ b/third_party/rust/wgpu-core/src/track/buffer.rs
@@ -0,0 +1,838 @@
+/*! Buffer Trackers
+ *
+ * Buffers are represented by a single state for the whole resource,
+ * a 16 bit bitflag of buffer usages. Because there is only ever
+ * one subresource, they have no selector.
+!*/
+
+use std::{borrow::Cow, marker::PhantomData, sync::Arc};
+
+use super::{PendingTransition, ResourceTracker};
+use crate::{
+ hal_api::HalApi,
+ id::BufferId,
+ resource::{Buffer, Resource},
+ snatch::SnatchGuard,
+ storage::Storage,
+ track::{
+ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider,
+ ResourceUses, UsageConflict,
+ },
+};
+use hal::{BufferBarrier, BufferUses};
+use parking_lot::Mutex;
+use wgt::{strict_assert, strict_assert_eq};
+
+impl ResourceUses for BufferUses {
+ const EXCLUSIVE: Self = Self::EXCLUSIVE;
+
+ type Selector = ();
+
+ fn bits(self) -> u16 {
+ Self::bits(&self)
+ }
+
+ fn all_ordered(self) -> bool {
+ Self::ORDERED.contains(self)
+ }
+
+ fn any_exclusive(self) -> bool {
+ self.intersects(Self::EXCLUSIVE)
+ }
+}
+
+/// Stores all the buffers that a bind group stores.
+#[derive(Debug)]
+pub(crate) struct BufferBindGroupState<A: HalApi> {
+ buffers: Mutex<Vec<(Arc<Buffer<A>>, BufferUses)>>,
+
+ _phantom: PhantomData<A>,
+}
+impl<A: HalApi> BufferBindGroupState<A> {
+ pub fn new() -> Self {
+ Self {
+ buffers: Mutex::new(Vec::new()),
+
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Optimize the buffer bind group state by sorting it by ID.
+ ///
+ /// When this list of states is merged into a tracker, the memory
+ /// accesses will be in a constant ascending order.
+ #[allow(clippy::pattern_type_mismatch)]
+ pub(crate) fn optimize(&self) {
+ let mut buffers = self.buffers.lock();
+ buffers.sort_unstable_by_key(|(b, _)| b.as_info().id().unzip().0);
+ }
+
+ /// Returns a list of all buffers tracked. May contain duplicates.
+ #[allow(clippy::pattern_type_mismatch)]
+ pub fn used_ids(&self) -> impl Iterator<Item = BufferId> + '_ {
+ let buffers = self.buffers.lock();
+ buffers
+ .iter()
+ .map(|(ref b, _)| b.as_info().id())
+ .collect::<Vec<_>>()
+ .into_iter()
+ }
+
+ /// Returns a list of all buffers tracked. May contain duplicates.
+ pub fn drain_resources(&self) -> impl Iterator<Item = Arc<Buffer<A>>> + '_ {
+ let mut buffers = self.buffers.lock();
+ buffers
+ .drain(..)
+ .map(|(buffer, _u)| buffer)
+ .collect::<Vec<_>>()
+ .into_iter()
+ }
+
+ /// Adds the given resource with the given state.
+ pub fn add_single<'a>(
+ &self,
+ storage: &'a Storage<Buffer<A>>,
+ id: BufferId,
+ state: BufferUses,
+ ) -> Option<&'a Arc<Buffer<A>>> {
+ let buffer = storage.get(id).ok()?;
+
+ let mut buffers = self.buffers.lock();
+ buffers.push((buffer.clone(), state));
+
+ Some(buffer)
+ }
+}
+
+/// Stores all buffer state within a single usage scope.
+#[derive(Debug)]
+pub(crate) struct BufferUsageScope<A: HalApi> {
+ state: Vec<BufferUses>,
+
+ metadata: ResourceMetadata<Buffer<A>>,
+}
+
+impl<A: HalApi> BufferUsageScope<A> {
+ pub fn new() -> Self {
+ Self {
+ state: Vec::new(),
+
+ metadata: ResourceMetadata::new(),
+ }
+ }
+
+ fn tracker_assert_in_bounds(&self, index: usize) {
+ strict_assert!(index < self.state.len());
+ self.metadata.tracker_assert_in_bounds(index);
+ }
+
+ /// Sets the size of all the vectors inside the tracker.
+ ///
+ /// Must be called with the highest possible Buffer ID before
+ /// all unsafe functions are called.
+ pub fn set_size(&mut self, size: usize) {
+ self.state.resize(size, BufferUses::empty());
+ self.metadata.set_size(size);
+ }
+
+ /// Extend the vectors to let the given index be valid.
+ fn allow_index(&mut self, index: usize) {
+ if index >= self.state.len() {
+ self.set_size(index + 1);
+ }
+ }
+
+ /// Drains all buffers tracked.
+ pub fn drain_resources(&mut self) -> impl Iterator<Item = Arc<Buffer<A>>> + '_ {
+ let resources = self.metadata.drain_resources();
+ self.state.clear();
+ resources.into_iter()
+ }
+
+ pub fn get(&self, id: BufferId) -> Option<&Arc<Buffer<A>>> {
+ let index = id.unzip().0 as usize;
+ if index > self.metadata.size() {
+ return None;
+ }
+ self.tracker_assert_in_bounds(index);
+ unsafe {
+ if self.metadata.contains_unchecked(index) {
+ return Some(self.metadata.get_resource_unchecked(index));
+ }
+ }
+ None
+ }
+
+ /// Merge the list of buffer states in the given bind group into this usage scope.
+ ///
+ /// If any of the resulting states is invalid, stops the merge and returns a usage
+ /// conflict with the details of the invalid state.
+ ///
+ /// Because bind groups do not check if the union of all their states is valid,
+ /// this method is allowed to return Err on the first bind group bound.
+ ///
+ /// # Safety
+ ///
+ /// [`Self::set_size`] must be called with the maximum possible Buffer ID before this
+ /// method is called.
+ pub unsafe fn merge_bind_group(
+ &mut self,
+ bind_group: &BufferBindGroupState<A>,
+ ) -> Result<(), UsageConflict> {
+ let buffers = bind_group.buffers.lock();
+ for &(ref resource, state) in &*buffers {
+ let index = resource.as_info().id().unzip().0 as usize;
+
+ unsafe {
+ insert_or_merge(
+ None,
+ &mut self.state,
+ &mut self.metadata,
+ index as _,
+ index,
+ BufferStateProvider::Direct { state },
+ ResourceMetadataProvider::Direct {
+ resource: Cow::Borrowed(resource),
+ },
+ )?
+ };
+ }
+
+ Ok(())
+ }
+
+ /// Merge the list of buffer states in the given usage scope into this UsageScope.
+ ///
+ /// If any of the resulting states is invalid, stops the merge and returns a usage
+ /// conflict with the details of the invalid state.
+ ///
+ /// If the given tracker uses IDs higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn merge_usage_scope(&mut self, scope: &Self) -> Result<(), UsageConflict> {
+ let incoming_size = scope.state.len();
+ if incoming_size > self.state.len() {
+ self.set_size(incoming_size);
+ }
+
+ for index in scope.metadata.owned_indices() {
+ self.tracker_assert_in_bounds(index);
+ scope.tracker_assert_in_bounds(index);
+
+ unsafe {
+ insert_or_merge(
+ None,
+ &mut self.state,
+ &mut self.metadata,
+ index as u32,
+ index,
+ BufferStateProvider::Indirect {
+ state: &scope.state,
+ },
+ ResourceMetadataProvider::Indirect {
+ metadata: &scope.metadata,
+ },
+ )?;
+ };
+ }
+
+ Ok(())
+ }
+
+ /// Merge a single state into the UsageScope.
+ ///
+ /// If the resulting state is invalid, returns a usage
+ /// conflict with the details of the invalid state.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn merge_single<'a>(
+ &mut self,
+ storage: &'a Storage<Buffer<A>>,
+ id: BufferId,
+ new_state: BufferUses,
+ ) -> Result<&'a Arc<Buffer<A>>, UsageConflict> {
+ let buffer = storage
+ .get(id)
+ .map_err(|_| UsageConflict::BufferInvalid { id })?;
+
+ let index = id.unzip().0 as usize;
+
+ self.allow_index(index);
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ insert_or_merge(
+ None,
+ &mut self.state,
+ &mut self.metadata,
+ index as _,
+ index,
+ BufferStateProvider::Direct { state: new_state },
+ ResourceMetadataProvider::Direct {
+ resource: Cow::Owned(buffer.clone()),
+ },
+ )?;
+ }
+
+ Ok(buffer)
+ }
+}
+
+pub(crate) type SetSingleResult<A> =
+ Option<(Arc<Buffer<A>>, Option<PendingTransition<BufferUses>>)>;
+
+/// Stores all buffer state within a command buffer or device.
+pub(crate) struct BufferTracker<A: HalApi> {
+ start: Vec<BufferUses>,
+ end: Vec<BufferUses>,
+
+ metadata: ResourceMetadata<Buffer<A>>,
+
+ temp: Vec<PendingTransition<BufferUses>>,
+}
+
+impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
+ /// Try to remove the buffer `id` from this tracker if it is otherwise unused.
+ ///
+ /// A buffer is 'otherwise unused' when the only references to it are:
+ ///
+ /// 1) the `Arc` that our caller, `LifetimeTracker::triage_suspected`, has just
+ /// drained from `LifetimeTracker::suspected_resources`,
+ ///
+ /// 2) its `Arc` in [`self.metadata`] (owned by [`Device::trackers`]), and
+ ///
+ /// 3) its `Arc` in the [`Hub::buffers`] registry.
+ ///
+ /// If the buffer is indeed unused, this function removes 2), and
+ /// `triage_suspected` will remove 3), leaving 1) as the sole
+ /// remaining reference.
+ ///
+ /// Returns true if the resource was removed or if not existing in metadata.
+ ///
+ /// [`Device::trackers`]: crate::device::Device
+ /// [`self.metadata`]: BufferTracker::metadata
+ /// [`Hub::buffers`]: crate::hub::Hub::buffers
+ fn remove_abandoned(&mut self, id: BufferId) -> bool {
+ let index = id.unzip().0 as usize;
+
+ if index > self.metadata.size() {
+ return false;
+ }
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ if self.metadata.contains_unchecked(index) {
+ let existing_ref_count = self.metadata.get_ref_count_unchecked(index);
+ //RefCount 2 means that resource is hold just by DeviceTracker and this suspected resource itself
+ //so it's already been released from user and so it's not inside Registry\Storage
+ if existing_ref_count <= 2 {
+ self.metadata.remove(index);
+ log::trace!("Buffer {:?} is not tracked anymore", id,);
+ return true;
+ } else {
+ log::trace!(
+ "Buffer {:?} is still referenced from {}",
+ id,
+ existing_ref_count
+ );
+ return false;
+ }
+ }
+ }
+ true
+ }
+}
+
+impl<A: HalApi> BufferTracker<A> {
+ pub fn new() -> Self {
+ Self {
+ start: Vec::new(),
+ end: Vec::new(),
+
+ metadata: ResourceMetadata::new(),
+
+ temp: Vec::new(),
+ }
+ }
+
+ fn tracker_assert_in_bounds(&self, index: usize) {
+ strict_assert!(index < self.start.len());
+ strict_assert!(index < self.end.len());
+ self.metadata.tracker_assert_in_bounds(index);
+ }
+
+ /// Sets the size of all the vectors inside the tracker.
+ ///
+ /// Must be called with the highest possible Buffer ID before
+ /// all unsafe functions are called.
+ pub fn set_size(&mut self, size: usize) {
+ self.start.resize(size, BufferUses::empty());
+ self.end.resize(size, BufferUses::empty());
+
+ self.metadata.set_size(size);
+ }
+
+ /// Extend the vectors to let the given index be valid.
+ fn allow_index(&mut self, index: usize) {
+ if index >= self.start.len() {
+ self.set_size(index + 1);
+ }
+ }
+
+ /// Returns a list of all buffers tracked.
+ pub fn used_resources(&self) -> impl Iterator<Item = Arc<Buffer<A>>> + '_ {
+ self.metadata.owned_resources()
+ }
+
+ /// Drains all currently pending transitions.
+ pub fn drain_transitions<'a, 'b: 'a>(
+ &'b mut self,
+ snatch_guard: &'a SnatchGuard<'a>,
+ ) -> impl Iterator<Item = BufferBarrier<'a, A>> {
+ let buffer_barriers = self.temp.drain(..).map(|pending| {
+ let buf = unsafe { self.metadata.get_resource_unchecked(pending.id as _) };
+ pending.into_hal(buf, snatch_guard)
+ });
+ buffer_barriers
+ }
+
+ /// Inserts a single buffer and its state into the resource tracker.
+ ///
+ /// If the resource already exists in the tracker, this will panic.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn insert_single(&mut self, id: BufferId, resource: Arc<Buffer<A>>, state: BufferUses) {
+ let index = id.unzip().0 as usize;
+
+ self.allow_index(index);
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ let currently_owned = self.metadata.contains_unchecked(index);
+
+ if currently_owned {
+ panic!("Tried to insert buffer already tracked");
+ }
+
+ insert(
+ Some(&mut self.start),
+ &mut self.end,
+ &mut self.metadata,
+ index,
+ BufferStateProvider::Direct { state },
+ None,
+ ResourceMetadataProvider::Direct {
+ resource: Cow::Owned(resource),
+ },
+ )
+ }
+ }
+
+ /// Sets the state of a single buffer.
+ ///
+ /// If a transition is needed to get the buffer into the given state, that transition
+ /// is returned. No more than one transition is needed.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn set_single(&mut self, buffer: &Arc<Buffer<A>>, state: BufferUses) -> SetSingleResult<A> {
+ let index: usize = buffer.as_info().id().unzip().0 as usize;
+
+ self.allow_index(index);
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ insert_or_barrier_update(
+ Some(&mut self.start),
+ &mut self.end,
+ &mut self.metadata,
+ index,
+ BufferStateProvider::Direct { state },
+ None,
+ ResourceMetadataProvider::Direct {
+ resource: Cow::Owned(buffer.clone()),
+ },
+ &mut self.temp,
+ )
+ };
+
+ strict_assert!(self.temp.len() <= 1);
+
+ Some((buffer.clone(), self.temp.pop()))
+ }
+
+ /// Sets the given state for all buffers in the given tracker.
+ ///
+ /// If a transition is needed to get the buffers into the needed state,
+ /// those transitions are stored within the tracker. A subsequent
+ /// call to [`Self::drain_transitions`] is needed to get those transitions.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn set_from_tracker(&mut self, tracker: &Self) {
+ let incoming_size = tracker.start.len();
+ if incoming_size > self.start.len() {
+ self.set_size(incoming_size);
+ }
+
+ for index in tracker.metadata.owned_indices() {
+ self.tracker_assert_in_bounds(index);
+ tracker.tracker_assert_in_bounds(index);
+ unsafe {
+ insert_or_barrier_update(
+ Some(&mut self.start),
+ &mut self.end,
+ &mut self.metadata,
+ index,
+ BufferStateProvider::Indirect {
+ state: &tracker.start,
+ },
+ Some(BufferStateProvider::Indirect {
+ state: &tracker.end,
+ }),
+ ResourceMetadataProvider::Indirect {
+ metadata: &tracker.metadata,
+ },
+ &mut self.temp,
+ )
+ }
+ }
+ }
+
+ /// Sets the given state for all buffers in the given UsageScope.
+ ///
+ /// If a transition is needed to get the buffers into the needed state,
+ /// those transitions are stored within the tracker. A subsequent
+ /// call to [`Self::drain_transitions`] is needed to get those transitions.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn set_from_usage_scope(&mut self, scope: &BufferUsageScope<A>) {
+ let incoming_size = scope.state.len();
+ if incoming_size > self.start.len() {
+ self.set_size(incoming_size);
+ }
+
+ for index in scope.metadata.owned_indices() {
+ self.tracker_assert_in_bounds(index);
+ scope.tracker_assert_in_bounds(index);
+ unsafe {
+ insert_or_barrier_update(
+ Some(&mut self.start),
+ &mut self.end,
+ &mut self.metadata,
+ index,
+ BufferStateProvider::Indirect {
+ state: &scope.state,
+ },
+ None,
+ ResourceMetadataProvider::Indirect {
+ metadata: &scope.metadata,
+ },
+ &mut self.temp,
+ )
+ }
+ }
+ }
+
+ /// Iterates through all buffers in the given bind group and adopts
+ /// the state given for those buffers in the UsageScope. It also
+ /// removes all touched buffers from the usage scope.
+ ///
+ /// If a transition is needed to get the buffers into the needed state,
+ /// those transitions are stored within the tracker. A subsequent
+ /// call to [`Self::drain_transitions`] is needed to get those transitions.
+ ///
+ /// This is a really funky method used by Compute Passes to generate
+ /// barriers after a call to dispatch without needing to iterate
+ /// over all elements in the usage scope. We use each the
+ /// a given iterator of ids as a source of which IDs to look at.
+ /// All the IDs must have first been added to the usage scope.
+ ///
+ /// # Safety
+ ///
+ /// [`Self::set_size`] must be called with the maximum possible Buffer ID before this
+ /// method is called.
+ pub unsafe fn set_and_remove_from_usage_scope_sparse(
+ &mut self,
+ scope: &mut BufferUsageScope<A>,
+ id_source: impl IntoIterator<Item = BufferId>,
+ ) {
+ let incoming_size = scope.state.len();
+ if incoming_size > self.start.len() {
+ self.set_size(incoming_size);
+ }
+
+ for id in id_source {
+ let (index32, _, _) = id.unzip();
+ let index = index32 as usize;
+
+ scope.tracker_assert_in_bounds(index);
+
+ if unsafe { !scope.metadata.contains_unchecked(index) } {
+ continue;
+ }
+ unsafe {
+ insert_or_barrier_update(
+ Some(&mut self.start),
+ &mut self.end,
+ &mut self.metadata,
+ index,
+ BufferStateProvider::Indirect {
+ state: &scope.state,
+ },
+ None,
+ ResourceMetadataProvider::Indirect {
+ metadata: &scope.metadata,
+ },
+ &mut self.temp,
+ )
+ };
+
+ unsafe { scope.metadata.remove(index) };
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn get(&self, id: BufferId) -> Option<&Arc<Buffer<A>>> {
+ let index = id.unzip().0 as usize;
+ if index > self.metadata.size() {
+ return None;
+ }
+ self.tracker_assert_in_bounds(index);
+ unsafe {
+ if self.metadata.contains_unchecked(index) {
+ return Some(self.metadata.get_resource_unchecked(index));
+ }
+ }
+ None
+ }
+}
+
+/// Source of Buffer State.
+#[derive(Debug, Clone)]
+enum BufferStateProvider<'a> {
+ /// Get a state that was provided directly.
+ Direct { state: BufferUses },
+ /// Get a state from an an array of states.
+ Indirect { state: &'a [BufferUses] },
+}
+impl BufferStateProvider<'_> {
+ /// Gets the state from the provider, given a resource ID index.
+ ///
+ /// # Safety
+ ///
+ /// Index must be in bounds for the indirect source iff this is in the indirect state.
+ #[inline(always)]
+ unsafe fn get_state(&self, index: usize) -> BufferUses {
+ match *self {
+ BufferStateProvider::Direct { state } => state,
+ BufferStateProvider::Indirect { state } => {
+ strict_assert!(index < state.len());
+ *unsafe { state.get_unchecked(index) }
+ }
+ }
+ }
+}
+
+/// Does an insertion operation if the index isn't tracked
+/// in the current metadata, otherwise merges the given state
+/// with the current state. If the merging would cause
+/// a conflict, returns that usage conflict.
+///
+/// # Safety
+///
+/// Indexes must be valid indexes into all arrays passed in
+/// to this function, either directly or via metadata or provider structs.
+#[inline(always)]
+unsafe fn insert_or_merge<A: HalApi>(
+ start_states: Option<&mut [BufferUses]>,
+ current_states: &mut [BufferUses],
+ resource_metadata: &mut ResourceMetadata<Buffer<A>>,
+ index32: u32,
+ index: usize,
+ state_provider: BufferStateProvider<'_>,
+ metadata_provider: ResourceMetadataProvider<'_, Buffer<A>>,
+) -> Result<(), UsageConflict> {
+ let currently_owned = unsafe { resource_metadata.contains_unchecked(index) };
+
+ if !currently_owned {
+ unsafe {
+ insert(
+ start_states,
+ current_states,
+ resource_metadata,
+ index,
+ state_provider,
+ None,
+ metadata_provider,
+ )
+ };
+ return Ok(());
+ }
+
+ unsafe {
+ merge(
+ current_states,
+ index32,
+ index,
+ state_provider,
+ metadata_provider,
+ )
+ }
+}
+
+/// If the resource isn't tracked
+/// - Inserts the given resource.
+/// - Uses the `start_state_provider` to populate `start_states`
+/// - Uses either `end_state_provider` or `start_state_provider`
+/// to populate `current_states`.
+/// If the resource is tracked
+/// - Inserts barriers from the state in `current_states`
+/// to the state provided by `start_state_provider`.
+/// - Updates the `current_states` with either the state from
+/// `end_state_provider` or `start_state_provider`.
+///
+/// Any barriers are added to the barrier vector.
+///
+/// # Safety
+///
+/// Indexes must be valid indexes into all arrays passed in
+/// to this function, either directly or via metadata or provider structs.
+#[inline(always)]
+unsafe fn insert_or_barrier_update<A: HalApi>(
+ start_states: Option<&mut [BufferUses]>,
+ current_states: &mut [BufferUses],
+ resource_metadata: &mut ResourceMetadata<Buffer<A>>,
+ index: usize,
+ start_state_provider: BufferStateProvider<'_>,
+ end_state_provider: Option<BufferStateProvider<'_>>,
+ metadata_provider: ResourceMetadataProvider<'_, Buffer<A>>,
+ barriers: &mut Vec<PendingTransition<BufferUses>>,
+) {
+ let currently_owned = unsafe { resource_metadata.contains_unchecked(index) };
+
+ if !currently_owned {
+ unsafe {
+ insert(
+ start_states,
+ current_states,
+ resource_metadata,
+ index,
+ start_state_provider,
+ end_state_provider,
+ metadata_provider,
+ )
+ };
+ return;
+ }
+
+ let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone());
+ unsafe { barrier(current_states, index, start_state_provider, barriers) };
+
+ unsafe { update(current_states, index, update_state_provider) };
+}
+
+#[inline(always)]
+unsafe fn insert<A: HalApi>(
+ start_states: Option<&mut [BufferUses]>,
+ current_states: &mut [BufferUses],
+ resource_metadata: &mut ResourceMetadata<Buffer<A>>,
+ index: usize,
+ start_state_provider: BufferStateProvider<'_>,
+ end_state_provider: Option<BufferStateProvider<'_>>,
+ metadata_provider: ResourceMetadataProvider<'_, Buffer<A>>,
+) {
+ let new_start_state = unsafe { start_state_provider.get_state(index) };
+ let new_end_state =
+ end_state_provider.map_or(new_start_state, |p| unsafe { p.get_state(index) });
+
+ // This should only ever happen with a wgpu bug, but let's just double
+ // check that resource states don't have any conflicts.
+ strict_assert_eq!(invalid_resource_state(new_start_state), false);
+ strict_assert_eq!(invalid_resource_state(new_end_state), false);
+
+ log::trace!("\tbuf {index}: insert {new_start_state:?}..{new_end_state:?}");
+
+ unsafe {
+ if let Some(&mut ref mut start_state) = start_states {
+ *start_state.get_unchecked_mut(index) = new_start_state;
+ }
+ *current_states.get_unchecked_mut(index) = new_end_state;
+
+ let resource = metadata_provider.get_own(index);
+ resource_metadata.insert(index, resource);
+ }
+}
+
+#[inline(always)]
+unsafe fn merge<A: HalApi>(
+ current_states: &mut [BufferUses],
+ index32: u32,
+ index: usize,
+ state_provider: BufferStateProvider<'_>,
+ metadata_provider: ResourceMetadataProvider<'_, Buffer<A>>,
+) -> Result<(), UsageConflict> {
+ let current_state = unsafe { current_states.get_unchecked_mut(index) };
+ let new_state = unsafe { state_provider.get_state(index) };
+
+ let merged_state = *current_state | new_state;
+
+ if invalid_resource_state(merged_state) {
+ return Err(UsageConflict::from_buffer(
+ BufferId::zip(
+ index32,
+ unsafe { metadata_provider.get_epoch(index) },
+ A::VARIANT,
+ ),
+ *current_state,
+ new_state,
+ ));
+ }
+
+ log::trace!("\tbuf {index32}: merge {current_state:?} + {new_state:?}");
+
+ *current_state = merged_state;
+
+ Ok(())
+}
+
+#[inline(always)]
+unsafe fn barrier(
+ current_states: &mut [BufferUses],
+ index: usize,
+ state_provider: BufferStateProvider<'_>,
+ barriers: &mut Vec<PendingTransition<BufferUses>>,
+) {
+ let current_state = unsafe { *current_states.get_unchecked(index) };
+ let new_state = unsafe { state_provider.get_state(index) };
+
+ if skip_barrier(current_state, new_state) {
+ return;
+ }
+
+ barriers.push(PendingTransition {
+ id: index as _,
+ selector: (),
+ usage: current_state..new_state,
+ });
+
+ log::trace!("\tbuf {index}: transition {current_state:?} -> {new_state:?}");
+}
+
+#[inline(always)]
+unsafe fn update(
+ current_states: &mut [BufferUses],
+ index: usize,
+ state_provider: BufferStateProvider<'_>,
+) {
+ let current_state = unsafe { current_states.get_unchecked_mut(index) };
+ let new_state = unsafe { state_provider.get_state(index) };
+
+ *current_state = new_state;
+}
diff --git a/third_party/rust/wgpu-core/src/track/metadata.rs b/third_party/rust/wgpu-core/src/track/metadata.rs
new file mode 100644
index 0000000000..e5f4d5e969
--- /dev/null
+++ b/third_party/rust/wgpu-core/src/track/metadata.rs
@@ -0,0 +1,243 @@
+//! The `ResourceMetadata` type.
+
+use crate::{resource::Resource, Epoch};
+use bit_vec::BitVec;
+use std::{borrow::Cow, mem, sync::Arc};
+use wgt::strict_assert;
+
+/// A set of resources, holding a `Arc<T>` and epoch for each member.
+///
+/// Testing for membership is fast, and iterating over members is
+/// reasonably fast in practice. Storage consumption is proportional
+/// to the largest id index of any member, not to the number of
+/// members, but a bit vector tracks occupancy, so iteration touches
+/// only occupied elements.
+#[derive(Debug)]
+pub(super) struct ResourceMetadata<T: Resource> {
+ /// If the resource with index `i` is a member, `owned[i]` is `true`.
+ owned: BitVec<usize>,
+
+ /// A vector holding clones of members' `T`s.
+ resources: Vec<Option<Arc<T>>>,
+}
+
+impl<T: Resource> ResourceMetadata<T> {
+ pub(super) fn new() -> Self {
+ Self {
+ owned: BitVec::default(),
+ resources: Vec::new(),
+ }
+ }
+
+ /// Returns the number of indices we can accommodate.
+ pub(super) fn size(&self) -> usize {
+ self.owned.len()
+ }
+
+ pub(super) fn set_size(&mut self, size: usize) {
+ self.resources.resize(size, None);
+ resize_bitvec(&mut self.owned, size);
+ }
+
+ /// Ensures a given index is in bounds for all arrays and does
+ /// sanity checks of the presence of a refcount.
+ ///
+ /// In release mode this function is completely empty and is removed.
+ #[cfg_attr(not(feature = "strict_asserts"), allow(unused_variables))]
+ pub(super) fn tracker_assert_in_bounds(&self, index: usize) {
+ strict_assert!(index < self.owned.len());
+ strict_assert!(index < self.resources.len());
+ strict_assert!(if self.contains(index) {
+ self.resources[index].is_some()
+ } else {
+ true
+ });
+ }
+
+ /// Returns true if the tracker owns no resources.
+ ///
+ /// This is a O(n) operation.
+ pub(super) fn is_empty(&self) -> bool {
+ !self.owned.any()
+ }
+
+ /// Returns true if the set contains the resource with the given index.
+ pub(super) fn contains(&self, index: usize) -> bool {
+ self.owned[index]
+ }
+
+ /// Returns true if the set contains the resource with the given index.
+ ///
+ /// # Safety
+ ///
+ /// The given `index` must be in bounds for this `ResourceMetadata`'s
+ /// existing tables. See `tracker_assert_in_bounds`.
+ #[inline(always)]
+ pub(super) unsafe fn contains_unchecked(&self, index: usize) -> bool {
+ unsafe { self.owned.get(index).unwrap_unchecked() }
+ }
+
+ /// Insert a resource into the set.
+ ///
+ /// Add the resource with the given index, epoch, and reference count to the
+ /// set.
+ ///
+ /// # Safety
+ ///
+ /// The given `index` must be in bounds for this `ResourceMetadata`'s
+ /// existing tables. See `tracker_assert_in_bounds`.
+ #[inline(always)]
+ pub(super) unsafe fn insert(&mut self, index: usize, resource: Arc<T>) {
+ self.owned.set(index, true);
+ unsafe {
+ *self.resources.get_unchecked_mut(index) = Some(resource);
+ }
+ }
+
+ /// Get the resource with the given index.
+ ///
+ /// # Safety
+ ///
+ /// The given `index` must be in bounds for this `ResourceMetadata`'s
+ /// existing tables. See `tracker_assert_in_bounds`.
+ #[inline(always)]
+ pub(super) unsafe fn get_resource_unchecked(&self, index: usize) -> &Arc<T> {
+ unsafe {
+ self.resources
+ .get_unchecked(index)
+ .as_ref()
+ .unwrap_unchecked()
+ }
+ }
+
+ /// Get the reference count of the resource with the given index.
+ ///
+ /// # Safety
+ ///
+ /// The given `index` must be in bounds for this `ResourceMetadata`'s
+ /// existing tables. See `tracker_assert_in_bounds`.
+ #[inline(always)]
+ pub(super) unsafe fn get_ref_count_unchecked(&self, index: usize) -> usize {
+ unsafe { Arc::strong_count(self.get_resource_unchecked(index)) }
+ }
+
+ /// Returns an iterator over the resources owned by `self`.
+ pub(super) fn owned_resources(&self) -> impl Iterator<Item = Arc<T>> + '_ {
+ if !self.owned.is_empty() {
+ self.tracker_assert_in_bounds(self.owned.len() - 1)
+ };
+ iterate_bitvec_indices(&self.owned).map(move |index| {
+ let resource = unsafe { self.resources.get_unchecked(index) };
+ resource.as_ref().unwrap().clone()
+ })
+ }
+
+ /// Returns an iterator over the resources owned by `self`.
+ pub(super) fn drain_resources(&mut self) -> Vec<Arc<T>> {
+ if !self.owned.is_empty() {
+ self.tracker_assert_in_bounds(self.owned.len() - 1)
+ };
+ let mut resources = Vec::new();
+ iterate_bitvec_indices(&self.owned).for_each(|index| {
+ let resource = unsafe { self.resources.get_unchecked(index) };
+ resources.push(resource.as_ref().unwrap().clone());
+ });
+ self.owned.clear();
+ self.resources.clear();
+ resources
+ }
+
+ /// Returns an iterator over the indices of all resources owned by `self`.
+ pub(super) fn owned_indices(&self) -> impl Iterator<Item = usize> + '_ {
+ if !self.owned.is_empty() {
+ self.tracker_assert_in_bounds(self.owned.len() - 1)
+ };
+ iterate_bitvec_indices(&self.owned)
+ }
+
+ /// Remove the resource with the given index from the set.
+ pub(super) unsafe fn remove(&mut self, index: usize) {
+ unsafe {
+ *self.resources.get_unchecked_mut(index) = None;
+ }
+ self.owned.set(index, false);
+ }
+}
+
+/// A source of resource metadata.
+///
+/// This is used to abstract over the various places
+/// trackers can get new resource metadata from.
+pub(super) enum ResourceMetadataProvider<'a, T: Resource> {
+ /// Comes directly from explicit values.
+ Direct { resource: Cow<'a, Arc<T>> },
+ /// Comes from another metadata tracker.
+ Indirect { metadata: &'a ResourceMetadata<T> },
+}
+impl<T: Resource> ResourceMetadataProvider<'_, T> {
+ /// Get the epoch and an owned refcount from this.
+ ///
+ /// # Safety
+ ///
+ /// - The index must be in bounds of the metadata tracker if this uses an indirect source.
+ /// - info must be Some if this uses a Resource source.
+ #[inline(always)]
+ pub(super) unsafe fn get_own(self, index: usize) -> Arc<T> {
+ match self {
+ ResourceMetadataProvider::Direct { resource } => resource.into_owned(),
+ ResourceMetadataProvider::Indirect { metadata } => {
+ metadata.tracker_assert_in_bounds(index);
+ {
+ let resource = unsafe { metadata.resources.get_unchecked(index) };
+ unsafe { resource.clone().unwrap_unchecked() }
+ }
+ }
+ }
+ }
+ /// Get the epoch from this.
+ ///
+ /// # Safety
+ ///
+ /// - The index must be in bounds of the metadata tracker if this uses an indirect source.
+ #[inline(always)]
+ pub(super) unsafe fn get_epoch(self, index: usize) -> Epoch {
+ unsafe { self.get_own(index).as_info().id().unzip().1 }
+ }
+}
+
+/// Resizes the given bitvec to the given size. I'm not sure why this is hard to do but it is.
+fn resize_bitvec<B: bit_vec::BitBlock>(vec: &mut BitVec<B>, size: usize) {
+ let owned_size_to_grow = size.checked_sub(vec.len());
+ if let Some(delta) = owned_size_to_grow {
+ if delta != 0 {
+ vec.grow(delta, false);
+ }
+ } else {
+ vec.truncate(size);
+ }
+}
+
+/// Produces an iterator that yields the indexes of all bits that are set in the bitvec.
+///
+/// Will skip entire usize's worth of bits if they are all false.
+fn iterate_bitvec_indices(ownership: &BitVec<usize>) -> impl Iterator<Item = usize> + '_ {
+ const BITS_PER_BLOCK: usize = mem::size_of::<usize>() * 8;
+
+ let size = ownership.len();
+
+ ownership
+ .blocks()
+ .enumerate()
+ .filter(|&(_, word)| word != 0)
+ .flat_map(move |(word_index, mut word)| {
+ let bit_start = word_index * BITS_PER_BLOCK;
+ let bit_end = (bit_start + BITS_PER_BLOCK).min(size);
+
+ (bit_start..bit_end).filter(move |_| {
+ let active = word & 0b1 != 0;
+ word >>= 1;
+
+ active
+ })
+ })
+}
diff --git a/third_party/rust/wgpu-core/src/track/mod.rs b/third_party/rust/wgpu-core/src/track/mod.rs
new file mode 100644
index 0000000000..a36280d03b
--- /dev/null
+++ b/third_party/rust/wgpu-core/src/track/mod.rs
@@ -0,0 +1,617 @@
+/*! Resource State and Lifetime Trackers
+
+These structures are responsible for keeping track of resource state,
+generating barriers where needed, and making sure resources are kept
+alive until the trackers die.
+
+## General Architecture
+
+Tracking is some of the hottest code in the entire codebase, so the trackers
+are designed to be as cache efficient as possible. They store resource state
+in flat vectors, storing metadata SOA style, one vector per type of metadata.
+
+A lot of the tracker code is deeply unsafe, using unchecked accesses all over
+to make performance as good as possible. However, for all unsafe accesses, there
+is a corresponding debug assert the checks if that access is valid. This helps
+get bugs caught fast, while still letting users not need to pay for the bounds
+checks.
+
+In wgpu, each resource ID includes a bitfield holding an index.
+Indices are allocated and re-used, so they will always be as low as
+reasonably possible. This allows us to use IDs to index into an array
+of tracking information.
+
+## Statefulness
+
+There are two main types of trackers, stateful and stateless.
+
+Stateful trackers are for buffers and textures. They both have
+resource state attached to them which needs to be used to generate
+automatic synchronization. Because of the different requirements of
+buffers and textures, they have two separate tracking structures.
+
+Stateless trackers only store metadata and own the given resource.
+
+## Use Case
+
+Within each type of tracker, the trackers are further split into 3 different
+use cases, Bind Group, Usage Scope, and a full Tracker.
+
+Bind Group trackers are just a list of different resources, their refcount,
+and how they are used. Textures are used via a selector and a usage type.
+Buffers by just a usage type. Stateless resources don't have a usage type.
+
+Usage Scope trackers are only for stateful resources. These trackers represent
+a single [`UsageScope`] in the spec. When a use is added to a usage scope,
+it is merged with all other uses of that resource in that scope. If there
+is a usage conflict, merging will fail and an error will be reported.
+
+Full trackers represent a before and after state of a resource. These
+are used for tracking on the device and on command buffers. The before
+state represents the state the resource is first used as in the command buffer,
+the after state is the state the command buffer leaves the resource in.
+These double ended buffers can then be used to generate the needed transitions
+between command buffers.
+
+## Dense Datastructure with Sparse Data
+
+This tracking system is based on having completely dense data, but trackers do
+not always contain every resource. Some resources (or even most resources) go
+unused in any given command buffer. So to help speed up the process of iterating
+through possibly thousands of resources, we use a bit vector to represent if
+a resource is in the buffer or not. This allows us extremely efficient memory
+utilization, as well as being able to bail out of whole blocks of 32-64 resources
+with a single usize comparison with zero. In practice this means that merging
+partially resident buffers is extremely quick.
+
+The main advantage of this dense datastructure is that we can do merging
+of trackers in an extremely efficient fashion that results in us doing linear
+scans down a couple of buffers. CPUs and their caches absolutely eat this up.
+
+## Stateful Resource Operations
+
+All operations on stateful trackers boil down to one of four operations:
+- `insert(tracker, new_state)` adds a resource with a given state to the tracker
+ for the first time.
+- `merge(tracker, new_state)` merges this new state with the previous state, checking
+ for usage conflicts.
+- `barrier(tracker, new_state)` compares the given state to the existing state and
+ generates the needed barriers.
+- `update(tracker, new_state)` takes the given new state and overrides the old state.
+
+This allows us to compose the operations to form the various kinds of tracker merges
+that need to happen in the codebase. For each resource in the given merger, the following
+operation applies:
+
+```text
+UsageScope <- Resource = insert(scope, usage) OR merge(scope, usage)
+UsageScope <- UsageScope = insert(scope, scope) OR merge(scope, scope)
+CommandBuffer <- UsageScope = insert(buffer.start, buffer.end, scope)
+ OR barrier(buffer.end, scope) + update(buffer.end, scope)
+Device <- CommandBuffer = insert(device.start, device.end, buffer.start, buffer.end)
+ OR barrier(device.end, buffer.start) + update(device.end, buffer.end)
+```
+
+[`UsageScope`]: https://gpuweb.github.io/gpuweb/#programming-model-synchronization
+*/
+
+mod buffer;
+mod metadata;
+mod range;
+mod stateless;
+mod texture;
+
+use crate::{
+ binding_model, command, conv,
+ hal_api::HalApi,
+ id::{self, Id},
+ pipeline, resource,
+ snatch::SnatchGuard,
+ storage::Storage,
+};
+
+use parking_lot::RwLock;
+use std::{fmt, ops};
+use thiserror::Error;
+
+pub(crate) use buffer::{BufferBindGroupState, BufferTracker, BufferUsageScope};
+use metadata::{ResourceMetadata, ResourceMetadataProvider};
+pub(crate) use stateless::{StatelessBindGroupSate, StatelessTracker};
+pub(crate) use texture::{
+ TextureBindGroupState, TextureSelector, TextureTracker, TextureUsageScope,
+};
+use wgt::strict_assert_ne;
+
+/// A structure containing all the information about a particular resource
+/// transition. User code should be able to generate a pipeline barrier
+/// based on the contents.
+#[derive(Debug, PartialEq)]
+pub(crate) struct PendingTransition<S: ResourceUses> {
+ pub id: u32,
+ pub selector: S::Selector,
+ pub usage: ops::Range<S>,
+}
+
+pub(crate) type PendingTransitionList = Vec<PendingTransition<hal::TextureUses>>;
+
+impl PendingTransition<hal::BufferUses> {
+ /// Produce the hal barrier corresponding to the transition.
+ pub fn into_hal<'a, A: HalApi>(
+ self,
+ buf: &'a resource::Buffer<A>,
+ snatch_guard: &'a SnatchGuard<'a>,
+ ) -> hal::BufferBarrier<'a, A> {
+ let buffer = buf.raw.get(snatch_guard).expect("Buffer is destroyed");
+ hal::BufferBarrier {
+ buffer,
+ usage: self.usage,
+ }
+ }
+}
+
+impl PendingTransition<hal::TextureUses> {
+ /// Produce the hal barrier corresponding to the transition.
+ pub fn into_hal<'a, A: HalApi>(self, texture: &'a A::Texture) -> hal::TextureBarrier<'a, A> {
+ // These showing up in a barrier is always a bug
+ strict_assert_ne!(self.usage.start, hal::TextureUses::UNKNOWN);
+ strict_assert_ne!(self.usage.end, hal::TextureUses::UNKNOWN);
+
+ let mip_count = self.selector.mips.end - self.selector.mips.start;
+ strict_assert_ne!(mip_count, 0);
+ let layer_count = self.selector.layers.end - self.selector.layers.start;
+ strict_assert_ne!(layer_count, 0);
+
+ hal::TextureBarrier {
+ texture,
+ range: wgt::ImageSubresourceRange {
+ aspect: wgt::TextureAspect::All,
+ base_mip_level: self.selector.mips.start,
+ mip_level_count: Some(mip_count),
+ base_array_layer: self.selector.layers.start,
+ array_layer_count: Some(layer_count),
+ },
+ usage: self.usage,
+ }
+ }
+}
+
+/// The uses that a resource or subresource can be in.
+pub(crate) trait ResourceUses:
+ fmt::Debug + ops::BitAnd<Output = Self> + ops::BitOr<Output = Self> + PartialEq + Sized + Copy
+{
+ /// All flags that are exclusive.
+ const EXCLUSIVE: Self;
+
+ /// The selector used by this resource.
+ type Selector: fmt::Debug;
+
+ /// Turn the resource into a pile of bits.
+ fn bits(self) -> u16;
+ /// Returns true if the all the uses are ordered.
+ fn all_ordered(self) -> bool;
+ /// Returns true if any of the uses are exclusive.
+ fn any_exclusive(self) -> bool;
+}
+
+/// Returns true if the given states violates the usage scope rule
+/// of any(inclusive) XOR one(exclusive)
+fn invalid_resource_state<T: ResourceUses>(state: T) -> bool {
+ // Is power of two also means "is one bit set". We check for this as if
+ // we're in any exclusive state, we must only be in a single state.
+ state.any_exclusive() && !conv::is_power_of_two_u16(state.bits())
+}
+
+/// Returns true if the transition from one state to another does not require
+/// a barrier.
+fn skip_barrier<T: ResourceUses>(old_state: T, new_state: T) -> bool {
+ // If the state didn't change and all the usages are ordered, the hardware
+ // will guarantee the order of accesses, so we do not need to issue a barrier at all
+ old_state == new_state && old_state.all_ordered()
+}
+
+#[derive(Clone, Debug, Error, Eq, PartialEq)]
+pub enum UsageConflict {
+ #[error("Attempted to use invalid buffer")]
+ BufferInvalid { id: id::BufferId },
+ #[error("Attempted to use invalid texture")]
+ TextureInvalid { id: id::TextureId },
+ #[error("Attempted to use buffer with {invalid_use}.")]
+ Buffer {
+ id: id::BufferId,
+ invalid_use: InvalidUse<hal::BufferUses>,
+ },
+ #[error("Attempted to use a texture (mips {mip_levels:?} layers {array_layers:?}) with {invalid_use}.")]
+ Texture {
+ id: id::TextureId,
+ mip_levels: ops::Range<u32>,
+ array_layers: ops::Range<u32>,
+ invalid_use: InvalidUse<hal::TextureUses>,
+ },
+}
+
+impl UsageConflict {
+ fn from_buffer(
+ id: id::BufferId,
+ current_state: hal::BufferUses,
+ new_state: hal::BufferUses,
+ ) -> Self {
+ Self::Buffer {
+ id,
+ invalid_use: InvalidUse {
+ current_state,
+ new_state,
+ },
+ }
+ }
+
+ fn from_texture(
+ id: id::TextureId,
+ selector: TextureSelector,
+ current_state: hal::TextureUses,
+ new_state: hal::TextureUses,
+ ) -> Self {
+ Self::Texture {
+ id,
+ mip_levels: selector.mips,
+ array_layers: selector.layers,
+ invalid_use: InvalidUse {
+ current_state,
+ new_state,
+ },
+ }
+ }
+}
+
+impl crate::error::PrettyError for UsageConflict {
+ fn fmt_pretty(&self, fmt: &mut crate::error::ErrorFormatter) {
+ fmt.error(self);
+ match *self {
+ Self::BufferInvalid { id } => {
+ fmt.buffer_label(&id);
+ }
+ Self::TextureInvalid { id } => {
+ fmt.texture_label(&id);
+ }
+ Self::Buffer { id, .. } => {
+ fmt.buffer_label(&id);
+ }
+ Self::Texture { id, .. } => {
+ fmt.texture_label(&id);
+ }
+ }
+ }
+}
+
+/// Pretty print helper that shows helpful descriptions of a conflicting usage.
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct InvalidUse<T> {
+ current_state: T,
+ new_state: T,
+}
+
+impl<T: ResourceUses> fmt::Display for InvalidUse<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let current = self.current_state;
+ let new = self.new_state;
+
+ let current_exclusive = current & T::EXCLUSIVE;
+ let new_exclusive = new & T::EXCLUSIVE;
+
+ let exclusive = current_exclusive | new_exclusive;
+
+ // The text starts with "tried to use X resource with {self}"
+ write!(
+ f,
+ "conflicting usages. Current usage {current:?} and new usage {new:?}. \
+ {exclusive:?} is an exclusive usage and cannot be used with any other \
+ usages within the usage scope (renderpass or compute dispatch)"
+ )
+ }
+}
+
+/// All the usages that a bind group contains. The uses are not deduplicated in any way
+/// and may include conflicting uses. This is fully compliant by the WebGPU spec.
+///
+/// All bind group states are sorted by their ID so that when adding to a tracker,
+/// they are added in the most efficient order possible (ascending order).
+#[derive(Debug)]
+pub(crate) struct BindGroupStates<A: HalApi> {
+ pub buffers: BufferBindGroupState<A>,
+ pub textures: TextureBindGroupState<A>,
+ pub views: StatelessBindGroupSate<resource::TextureView<A>>,
+ pub samplers: StatelessBindGroupSate<resource::Sampler<A>>,
+}
+
+impl<A: HalApi> BindGroupStates<A> {
+ pub fn new() -> Self {
+ Self {
+ buffers: BufferBindGroupState::new(),
+ textures: TextureBindGroupState::new(),
+ views: StatelessBindGroupSate::new(),
+ samplers: StatelessBindGroupSate::new(),
+ }
+ }
+
+ /// Optimize the bind group states by sorting them by ID.
+ ///
+ /// When this list of states is merged into a tracker, the memory
+ /// accesses will be in a constant ascending order.
+ pub fn optimize(&mut self) {
+ self.buffers.optimize();
+ self.textures.optimize();
+ self.views.optimize();
+ self.samplers.optimize();
+ }
+}
+
+/// This is a render bundle specific usage scope. It includes stateless resources
+/// that are not normally included in a usage scope, but are used by render bundles
+/// and need to be owned by the render bundles.
+#[derive(Debug)]
+pub(crate) struct RenderBundleScope<A: HalApi> {
+ pub buffers: RwLock<BufferUsageScope<A>>,
+ pub textures: RwLock<TextureUsageScope<A>>,
+ // Don't need to track views and samplers, they are never used directly, only by bind groups.
+ pub bind_groups: RwLock<StatelessTracker<binding_model::BindGroup<A>>>,
+ pub render_pipelines: RwLock<StatelessTracker<pipeline::RenderPipeline<A>>>,
+ pub query_sets: RwLock<StatelessTracker<resource::QuerySet<A>>>,
+}
+
+impl<A: HalApi> RenderBundleScope<A> {
+ /// Create the render bundle scope and pull the maximum IDs from the hubs.
+ pub fn new(
+ buffers: &Storage<resource::Buffer<A>>,
+ textures: &Storage<resource::Texture<A>>,
+ bind_groups: &Storage<binding_model::BindGroup<A>>,
+ render_pipelines: &Storage<pipeline::RenderPipeline<A>>,
+ query_sets: &Storage<resource::QuerySet<A>>,
+ ) -> Self {
+ let value = Self {
+ buffers: RwLock::new(BufferUsageScope::new()),
+ textures: RwLock::new(TextureUsageScope::new()),
+ bind_groups: RwLock::new(StatelessTracker::new()),
+ render_pipelines: RwLock::new(StatelessTracker::new()),
+ query_sets: RwLock::new(StatelessTracker::new()),
+ };
+
+ value.buffers.write().set_size(buffers.len());
+ value.textures.write().set_size(textures.len());
+ value.bind_groups.write().set_size(bind_groups.len());
+ value
+ .render_pipelines
+ .write()
+ .set_size(render_pipelines.len());
+ value.query_sets.write().set_size(query_sets.len());
+
+ value
+ }
+
+ /// Merge the inner contents of a bind group into the render bundle tracker.
+ ///
+ /// Only stateful things are merged in here, all other resources are owned
+ /// indirectly by the bind group.
+ ///
+ /// # Safety
+ ///
+ /// The maximum ID given by each bind group resource must be less than the
+ /// length of the storage given at the call to `new`.
+ pub unsafe fn merge_bind_group(
+ &mut self,
+ bind_group: &BindGroupStates<A>,
+ ) -> Result<(), UsageConflict> {
+ unsafe { self.buffers.write().merge_bind_group(&bind_group.buffers)? };
+ unsafe {
+ self.textures
+ .write()
+ .merge_bind_group(&bind_group.textures)?
+ };
+
+ Ok(())
+ }
+}
+
+/// A usage scope tracker. Only needs to store stateful resources as stateless
+/// resources cannot possibly have a usage conflict.
+#[derive(Debug)]
+pub(crate) struct UsageScope<A: HalApi> {
+ pub buffers: BufferUsageScope<A>,
+ pub textures: TextureUsageScope<A>,
+}
+
+impl<A: HalApi> UsageScope<A> {
+ /// Create the render bundle scope and pull the maximum IDs from the hubs.
+ pub fn new(
+ buffers: &Storage<resource::Buffer<A>>,
+ textures: &Storage<resource::Texture<A>>,
+ ) -> Self {
+ let mut value = Self {
+ buffers: BufferUsageScope::new(),
+ textures: TextureUsageScope::new(),
+ };
+
+ value.buffers.set_size(buffers.len());
+ value.textures.set_size(textures.len());
+
+ value
+ }
+
+ /// Merge the inner contents of a bind group into the usage scope.
+ ///
+ /// Only stateful things are merged in here, all other resources are owned
+ /// indirectly by the bind group.
+ ///
+ /// # Safety
+ ///
+ /// The maximum ID given by each bind group resource must be less than the
+ /// length of the storage given at the call to `new`.
+ pub unsafe fn merge_bind_group(
+ &mut self,
+ bind_group: &BindGroupStates<A>,
+ ) -> Result<(), UsageConflict> {
+ unsafe {
+ self.buffers.merge_bind_group(&bind_group.buffers)?;
+ self.textures.merge_bind_group(&bind_group.textures)?;
+ }
+
+ Ok(())
+ }
+
+ /// Merge the inner contents of a bind group into the usage scope.
+ ///
+ /// Only stateful things are merged in here, all other resources are owned
+ /// indirectly by a bind group or are merged directly into the command buffer tracker.
+ ///
+ /// # Safety
+ ///
+ /// The maximum ID given by each bind group resource must be less than the
+ /// length of the storage given at the call to `new`.
+ pub unsafe fn merge_render_bundle(
+ &mut self,
+ render_bundle: &RenderBundleScope<A>,
+ ) -> Result<(), UsageConflict> {
+ self.buffers
+ .merge_usage_scope(&*render_bundle.buffers.read())?;
+ self.textures
+ .merge_usage_scope(&*render_bundle.textures.read())?;
+
+ Ok(())
+ }
+}
+
+pub(crate) trait ResourceTracker<R>
+where
+ R: resource::Resource,
+{
+ fn remove_abandoned(&mut self, id: Id<R::Marker>) -> bool;
+}
+
+/// A full double sided tracker used by CommandBuffers and the Device.
+pub(crate) struct Tracker<A: HalApi> {
+ pub buffers: BufferTracker<A>,
+ pub textures: TextureTracker<A>,
+ pub views: StatelessTracker<resource::TextureView<A>>,
+ pub samplers: StatelessTracker<resource::Sampler<A>>,
+ pub bind_groups: StatelessTracker<binding_model::BindGroup<A>>,
+ pub compute_pipelines: StatelessTracker<pipeline::ComputePipeline<A>>,
+ pub render_pipelines: StatelessTracker<pipeline::RenderPipeline<A>>,
+ pub bundles: StatelessTracker<command::RenderBundle<A>>,
+ pub query_sets: StatelessTracker<resource::QuerySet<A>>,
+}
+
+impl<A: HalApi> Tracker<A> {
+ pub fn new() -> Self {
+ Self {
+ buffers: BufferTracker::new(),
+ textures: TextureTracker::new(),
+ views: StatelessTracker::new(),
+ samplers: StatelessTracker::new(),
+ bind_groups: StatelessTracker::new(),
+ compute_pipelines: StatelessTracker::new(),
+ render_pipelines: StatelessTracker::new(),
+ bundles: StatelessTracker::new(),
+ query_sets: StatelessTracker::new(),
+ }
+ }
+
+ /// Pull the maximum IDs from the hubs.
+ pub fn set_size(
+ &mut self,
+ buffers: Option<&Storage<resource::Buffer<A>>>,
+ textures: Option<&Storage<resource::Texture<A>>>,
+ views: Option<&Storage<resource::TextureView<A>>>,
+ samplers: Option<&Storage<resource::Sampler<A>>>,
+ bind_groups: Option<&Storage<binding_model::BindGroup<A>>>,
+ compute_pipelines: Option<&Storage<pipeline::ComputePipeline<A>>>,
+ render_pipelines: Option<&Storage<pipeline::RenderPipeline<A>>>,
+ bundles: Option<&Storage<command::RenderBundle<A>>>,
+ query_sets: Option<&Storage<resource::QuerySet<A>>>,
+ ) {
+ if let Some(buffers) = buffers {
+ self.buffers.set_size(buffers.len());
+ };
+ if let Some(textures) = textures {
+ self.textures.set_size(textures.len());
+ };
+ if let Some(views) = views {
+ self.views.set_size(views.len());
+ };
+ if let Some(samplers) = samplers {
+ self.samplers.set_size(samplers.len());
+ };
+ if let Some(bind_groups) = bind_groups {
+ self.bind_groups.set_size(bind_groups.len());
+ };
+ if let Some(compute_pipelines) = compute_pipelines {
+ self.compute_pipelines.set_size(compute_pipelines.len());
+ }
+ if let Some(render_pipelines) = render_pipelines {
+ self.render_pipelines.set_size(render_pipelines.len());
+ };
+ if let Some(bundles) = bundles {
+ self.bundles.set_size(bundles.len());
+ };
+ if let Some(query_sets) = query_sets {
+ self.query_sets.set_size(query_sets.len());
+ };
+ }
+
+ /// Iterates through all resources in the given bind group and adopts
+ /// the state given for those resources in the UsageScope. It also
+ /// removes all touched resources from the usage scope.
+ ///
+ /// If a transition is needed to get the resources into the needed
+ /// state, those transitions are stored within the tracker. A
+ /// subsequent call to [`BufferTracker::drain`] or
+ /// [`TextureTracker::drain`] is needed to get those transitions.
+ ///
+ /// This is a really funky method used by Compute Passes to generate
+ /// barriers after a call to dispatch without needing to iterate
+ /// over all elements in the usage scope. We use each the
+ /// bind group as a source of which IDs to look at. The bind groups
+ /// must have first been added to the usage scope.
+ ///
+ /// Only stateful things are merged in here, all other resources are owned
+ /// indirectly by the bind group.
+ ///
+ /// # Safety
+ ///
+ /// The maximum ID given by each bind group resource must be less than the
+ /// value given to `set_size`
+ pub unsafe fn set_and_remove_from_usage_scope_sparse(
+ &mut self,
+ scope: &mut UsageScope<A>,
+ bind_group: &BindGroupStates<A>,
+ ) {
+ unsafe {
+ self.buffers.set_and_remove_from_usage_scope_sparse(
+ &mut scope.buffers,
+ bind_group.buffers.used_ids(),
+ )
+ };
+ unsafe {
+ self.textures
+ .set_and_remove_from_usage_scope_sparse(&mut scope.textures, &bind_group.textures)
+ };
+ }
+
+ /// Tracks the stateless resources from the given renderbundle. It is expected
+ /// that the stateful resources will get merged into a usage scope first.
+ ///
+ /// # Safety
+ ///
+ /// The maximum ID given by each bind group resource must be less than the
+ /// value given to `set_size`
+ pub unsafe fn add_from_render_bundle(
+ &mut self,
+ render_bundle: &RenderBundleScope<A>,
+ ) -> Result<(), UsageConflict> {
+ self.bind_groups
+ .add_from_tracker(&*render_bundle.bind_groups.read());
+ self.render_pipelines
+ .add_from_tracker(&*render_bundle.render_pipelines.read());
+ self.query_sets
+ .add_from_tracker(&*render_bundle.query_sets.read());
+
+ Ok(())
+ }
+}
diff --git a/third_party/rust/wgpu-core/src/track/range.rs b/third_party/rust/wgpu-core/src/track/range.rs
new file mode 100644
index 0000000000..3961220c2c
--- /dev/null
+++ b/third_party/rust/wgpu-core/src/track/range.rs
@@ -0,0 +1,206 @@
+//Note: this could be the only place where we need `SmallVec`.
+//TODO: consider getting rid of it.
+use smallvec::SmallVec;
+
+use std::{fmt::Debug, iter, ops::Range};
+
+/// Structure that keeps track of a I -> T mapping,
+/// optimized for a case where keys of the same values
+/// are often grouped together linearly.
+#[derive(Clone, Debug, PartialEq)]
+pub(crate) struct RangedStates<I, T> {
+ /// List of ranges, each associated with a singe value.
+ /// Ranges of keys have to be non-intersecting and ordered.
+ ranges: SmallVec<[(Range<I>, T); 1]>,
+}
+
+impl<I: Copy + Ord, T: Copy + PartialEq> RangedStates<I, T> {
+ pub fn from_range(range: Range<I>, value: T) -> Self {
+ Self {
+ ranges: iter::once((range, value)).collect(),
+ }
+ }
+
+ /// Construct a new instance from a slice of ranges.
+ #[cfg(test)]
+ pub fn from_slice(values: &[(Range<I>, T)]) -> Self {
+ Self {
+ ranges: values.iter().cloned().collect(),
+ }
+ }
+
+ pub fn iter(&self) -> impl Iterator<Item = &(Range<I>, T)> + Clone {
+ self.ranges.iter()
+ }
+
+ pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (Range<I>, T)> {
+ self.ranges.iter_mut()
+ }
+
+ /// Check that all the ranges are non-intersecting and ordered.
+ /// Panics otherwise.
+ #[cfg(test)]
+ fn check_sanity(&self) {
+ for a in self.ranges.iter() {
+ assert!(a.0.start < a.0.end);
+ }
+ for (a, b) in self.ranges.iter().zip(self.ranges[1..].iter()) {
+ assert!(a.0.end <= b.0.start);
+ }
+ }
+
+ /// Merge the neighboring ranges together, where possible.
+ pub fn coalesce(&mut self) {
+ let mut num_removed = 0;
+ let mut iter = self.ranges.iter_mut();
+ let mut cur = match iter.next() {
+ Some(elem) => elem,
+ None => return,
+ };
+ for next in iter {
+ if cur.0.end == next.0.start && cur.1 == next.1 {
+ num_removed += 1;
+ cur.0.end = next.0.end;
+ next.0.end = next.0.start;
+ } else {
+ cur = next;
+ }
+ }
+ if num_removed != 0 {
+ self.ranges.retain(|pair| pair.0.start != pair.0.end);
+ }
+ }
+
+ pub fn iter_filter<'a>(
+ &'a self,
+ range: &'a Range<I>,
+ ) -> impl Iterator<Item = (Range<I>, &T)> + 'a {
+ self.ranges
+ .iter()
+ .filter(move |&(inner, ..)| inner.end > range.start && inner.start < range.end)
+ .map(move |(inner, v)| {
+ let new_range = inner.start.max(range.start)..inner.end.min(range.end);
+
+ (new_range, v)
+ })
+ }
+
+ /// Split the storage ranges in such a way that there is a linear subset of
+ /// them occupying exactly `index` range, which is returned mutably.
+ ///
+ /// Gaps in the ranges are filled with `default` value.
+ pub fn isolate(&mut self, index: &Range<I>, default: T) -> &mut [(Range<I>, T)] {
+ //TODO: implement this in 2 passes:
+ // 1. scan the ranges to figure out how many extra ones need to be inserted
+ // 2. go through the ranges by moving them them to the right and inserting the missing ones
+
+ let mut start_pos = match self.ranges.iter().position(|pair| pair.0.end > index.start) {
+ Some(pos) => pos,
+ None => {
+ let pos = self.ranges.len();
+ self.ranges.push((index.clone(), default));
+ return &mut self.ranges[pos..];
+ }
+ };
+
+ {
+ let (range, value) = self.ranges[start_pos].clone();
+ if range.start < index.start {
+ self.ranges[start_pos].0.start = index.start;
+ self.ranges
+ .insert(start_pos, (range.start..index.start, value));
+ start_pos += 1;
+ }
+ }
+ let mut pos = start_pos;
+ let mut range_pos = index.start;
+ loop {
+ let (range, value) = self.ranges[pos].clone();
+ if range.start >= index.end {
+ self.ranges.insert(pos, (range_pos..index.end, default));
+ pos += 1;
+ break;
+ }
+ if range.start > range_pos {
+ self.ranges.insert(pos, (range_pos..range.start, default));
+ pos += 1;
+ range_pos = range.start;
+ }
+ if range.end >= index.end {
+ if range.end != index.end {
+ self.ranges[pos].0.start = index.end;
+ self.ranges.insert(pos, (range_pos..index.end, value));
+ }
+ pos += 1;
+ break;
+ }
+ pos += 1;
+ range_pos = range.end;
+ if pos == self.ranges.len() {
+ self.ranges.push((range_pos..index.end, default));
+ pos += 1;
+ break;
+ }
+ }
+
+ &mut self.ranges[start_pos..pos]
+ }
+
+ /// Helper method for isolation that checks the sanity of the results.
+ #[cfg(test)]
+ pub fn sanely_isolated(&self, index: Range<I>, default: T) -> Vec<(Range<I>, T)> {
+ let mut clone = self.clone();
+ let result = clone.isolate(&index, default).to_vec();
+ clone.check_sanity();
+ result
+ }
+}
+
+#[cfg(test)]
+mod test {
+ //TODO: randomized/fuzzy testing
+ use super::RangedStates;
+
+ #[test]
+ fn sane_good() {
+ let rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9)]);
+ rs.check_sanity();
+ }
+
+ #[test]
+ #[should_panic]
+ fn sane_empty() {
+ let rs = RangedStates::from_slice(&[(1..4, 9u8), (5..5, 9)]);
+ rs.check_sanity();
+ }
+
+ #[test]
+ #[should_panic]
+ fn sane_intersect() {
+ let rs = RangedStates::from_slice(&[(1..4, 9u8), (3..5, 9)]);
+ rs.check_sanity();
+ }
+
+ #[test]
+ fn coalesce() {
+ let mut rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9), (5..7, 1), (8..9, 1)]);
+ rs.coalesce();
+ rs.check_sanity();
+ assert_eq!(rs.ranges.as_slice(), &[(1..5, 9), (5..7, 1), (8..9, 1),]);
+ }
+
+ #[test]
+ fn isolate() {
+ let rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9), (5..7, 1), (8..9, 1)]);
+ assert_eq!(&rs.sanely_isolated(4..5, 0), &[(4..5, 9u8),]);
+ assert_eq!(
+ &rs.sanely_isolated(0..6, 0),
+ &[(0..1, 0), (1..4, 9u8), (4..5, 9), (5..6, 1),]
+ );
+ assert_eq!(&rs.sanely_isolated(8..10, 1), &[(8..9, 1), (9..10, 1),]);
+ assert_eq!(
+ &rs.sanely_isolated(6..9, 0),
+ &[(6..7, 1), (7..8, 0), (8..9, 1),]
+ );
+ }
+}
diff --git a/third_party/rust/wgpu-core/src/track/stateless.rs b/third_party/rust/wgpu-core/src/track/stateless.rs
new file mode 100644
index 0000000000..4111a90f79
--- /dev/null
+++ b/third_party/rust/wgpu-core/src/track/stateless.rs
@@ -0,0 +1,238 @@
+/*! Stateless Trackers
+ *
+ * Stateless trackers don't have any state, so make no
+ * distinction between a usage scope and a full tracker.
+!*/
+
+use std::sync::Arc;
+
+use parking_lot::Mutex;
+
+use crate::{id::Id, resource::Resource, resource_log, storage::Storage, track::ResourceMetadata};
+
+use super::ResourceTracker;
+
+/// Satisfy clippy.
+type Pair<T> = (Id<<T as Resource>::Marker>, Arc<T>);
+
+/// Stores all the resources that a bind group stores.
+#[derive(Debug)]
+pub(crate) struct StatelessBindGroupSate<T: Resource> {
+ resources: Mutex<Vec<Pair<T>>>,
+}
+
+impl<T: Resource> StatelessBindGroupSate<T> {
+ pub fn new() -> Self {
+ Self {
+ resources: Mutex::new(Vec::new()),
+ }
+ }
+
+ /// Optimize the buffer bind group state by sorting it by ID.
+ ///
+ /// When this list of states is merged into a tracker, the memory
+ /// accesses will be in a constant ascending order.
+ pub(crate) fn optimize(&self) {
+ let mut resources = self.resources.lock();
+ resources.sort_unstable_by_key(|&(id, _)| id.unzip().0);
+ }
+
+ /// Returns a list of all resources tracked. May contain duplicates.
+ pub fn used_resources(&self) -> impl Iterator<Item = Arc<T>> + '_ {
+ let resources = self.resources.lock();
+ resources
+ .iter()
+ .map(|(_, resource)| resource.clone())
+ .collect::<Vec<_>>()
+ .into_iter()
+ }
+
+ /// Returns a list of all resources tracked. May contain duplicates.
+ pub fn drain_resources(&self) -> impl Iterator<Item = Arc<T>> + '_ {
+ let mut resources = self.resources.lock();
+ resources
+ .drain(..)
+ .map(|(_, r)| r)
+ .collect::<Vec<_>>()
+ .into_iter()
+ }
+
+ /// Adds the given resource.
+ pub fn add_single<'a>(&self, storage: &'a Storage<T>, id: Id<T::Marker>) -> Option<&'a T> {
+ let resource = storage.get(id).ok()?;
+
+ let mut resources = self.resources.lock();
+ resources.push((id, resource.clone()));
+
+ Some(resource)
+ }
+}
+
+/// Stores all resource state within a command buffer or device.
+#[derive(Debug)]
+pub(crate) struct StatelessTracker<T: Resource> {
+ metadata: ResourceMetadata<T>,
+}
+
+impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
+ /// Try to remove the given resource from the tracker iff we have the last reference to the
+ /// resource and the epoch matches.
+ ///
+ /// Returns true if the resource was removed or if not existing in metadata.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// false will be returned.
+ fn remove_abandoned(&mut self, id: Id<T::Marker>) -> bool {
+ let index = id.unzip().0 as usize;
+
+ if index >= self.metadata.size() {
+ return false;
+ }
+
+ resource_log!("StatelessTracker::remove_abandoned {id:?}");
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ if self.metadata.contains_unchecked(index) {
+ let existing_ref_count = self.metadata.get_ref_count_unchecked(index);
+ //RefCount 2 means that resource is hold just by DeviceTracker and this suspected resource itself
+ //so it's already been released from user and so it's not inside Registry\Storage
+ if existing_ref_count <= 2 {
+ self.metadata.remove(index);
+ log::trace!("{} {:?} is not tracked anymore", T::TYPE, id,);
+ return true;
+ } else {
+ log::trace!(
+ "{} {:?} is still referenced from {}",
+ T::TYPE,
+ id,
+ existing_ref_count
+ );
+ return false;
+ }
+ }
+ }
+ true
+ }
+}
+
+impl<T: Resource> StatelessTracker<T> {
+ pub fn new() -> Self {
+ Self {
+ metadata: ResourceMetadata::new(),
+ }
+ }
+
+ fn tracker_assert_in_bounds(&self, index: usize) {
+ self.metadata.tracker_assert_in_bounds(index);
+ }
+
+ /// Sets the size of all the vectors inside the tracker.
+ ///
+ /// Must be called with the highest possible Resource ID of this type
+ /// before all unsafe functions are called.
+ pub fn set_size(&mut self, size: usize) {
+ self.metadata.set_size(size);
+ }
+
+ /// Extend the vectors to let the given index be valid.
+ fn allow_index(&mut self, index: usize) {
+ if index >= self.metadata.size() {
+ self.set_size(index + 1);
+ }
+ }
+
+ /// Returns a list of all resources tracked.
+ pub fn used_resources(&self) -> impl Iterator<Item = Arc<T>> + '_ {
+ self.metadata.owned_resources()
+ }
+
+ /// Returns a list of all resources tracked.
+ pub fn drain_resources(&mut self) -> impl Iterator<Item = Arc<T>> + '_ {
+ let resources = self.metadata.drain_resources();
+ resources.into_iter()
+ }
+
+ /// Inserts a single resource into the resource tracker.
+ ///
+ /// If the resource already exists in the tracker, it will be overwritten.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn insert_single(&mut self, id: Id<T::Marker>, resource: Arc<T>) {
+ let (index32, _epoch, _) = id.unzip();
+ let index = index32 as usize;
+
+ self.allow_index(index);
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ self.metadata.insert(index, resource);
+ }
+ }
+
+ /// Adds the given resource to the tracker.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn add_single<'a>(
+ &mut self,
+ storage: &'a Storage<T>,
+ id: Id<T::Marker>,
+ ) -> Option<&'a Arc<T>> {
+ let resource = storage.get(id).ok()?;
+
+ let (index32, _epoch, _) = id.unzip();
+ let index = index32 as usize;
+
+ self.allow_index(index);
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ self.metadata.insert(index, resource.clone());
+ }
+
+ Some(resource)
+ }
+
+ /// Adds the given resources from the given tracker.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn add_from_tracker(&mut self, other: &Self) {
+ let incoming_size = other.metadata.size();
+ if incoming_size > self.metadata.size() {
+ self.set_size(incoming_size);
+ }
+
+ for index in other.metadata.owned_indices() {
+ self.tracker_assert_in_bounds(index);
+ other.tracker_assert_in_bounds(index);
+ unsafe {
+ let previously_owned = self.metadata.contains_unchecked(index);
+
+ if !previously_owned {
+ let other_resource = other.metadata.get_resource_unchecked(index);
+ self.metadata.insert(index, other_resource.clone());
+ }
+ }
+ }
+ }
+
+ pub fn get(&self, id: Id<T::Marker>) -> Option<&Arc<T>> {
+ let index = id.unzip().0 as usize;
+ if index > self.metadata.size() {
+ return None;
+ }
+ self.tracker_assert_in_bounds(index);
+ unsafe {
+ if self.metadata.contains_unchecked(index) {
+ return Some(self.metadata.get_resource_unchecked(index));
+ }
+ }
+ None
+ }
+}
diff --git a/third_party/rust/wgpu-core/src/track/texture.rs b/third_party/rust/wgpu-core/src/track/texture.rs
new file mode 100644
index 0000000000..601df11e1b
--- /dev/null
+++ b/third_party/rust/wgpu-core/src/track/texture.rs
@@ -0,0 +1,1492 @@
+/*! Texture Trackers
+ *
+ * Texture trackers are significantly more complicated than
+ * the buffer trackers because textures can be in a "complex"
+ * state where each individual subresource can potentially be
+ * in a different state from every other subtresource. These
+ * complex states are stored separately from the simple states
+ * because they are signifignatly more difficult to track and
+ * most resources spend the vast majority of their lives in
+ * simple states.
+ *
+ * There are two special texture usages: `UNKNOWN` and `UNINITIALIZED`.
+ * - `UNKNOWN` is only used in complex states and is used to signify
+ * that the complex state does not know anything about those subresources.
+ * It cannot leak into transitions, it is invalid to transition into UNKNOWN
+ * state.
+ * - `UNINITIALIZED` is used in both simple and complex states to mean the texture
+ * is known to be in some undefined state. Any transition away from UNINITIALIZED
+ * will treat the contents as junk.
+!*/
+
+use super::{range::RangedStates, PendingTransition, PendingTransitionList, ResourceTracker};
+use crate::{
+ hal_api::HalApi,
+ id::TextureId,
+ resource::{Resource, Texture, TextureInner},
+ snatch::SnatchGuard,
+ track::{
+ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider,
+ ResourceUses, UsageConflict,
+ },
+};
+use hal::TextureUses;
+
+use arrayvec::ArrayVec;
+use naga::FastHashMap;
+
+use parking_lot::Mutex;
+use wgt::{strict_assert, strict_assert_eq};
+
+use std::{borrow::Cow, iter, marker::PhantomData, ops::Range, sync::Arc, vec::Drain};
+
+/// Specifies a particular set of subresources in a texture.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct TextureSelector {
+ pub mips: Range<u32>,
+ pub layers: Range<u32>,
+}
+
+impl ResourceUses for TextureUses {
+ const EXCLUSIVE: Self = Self::EXCLUSIVE;
+
+ type Selector = TextureSelector;
+
+ fn bits(self) -> u16 {
+ Self::bits(&self)
+ }
+
+ fn all_ordered(self) -> bool {
+ Self::ORDERED.contains(self)
+ }
+
+ fn any_exclusive(self) -> bool {
+ self.intersects(Self::EXCLUSIVE)
+ }
+}
+
+/// Represents the complex state of textures where every subresource is potentially
+/// in a different state.
+#[derive(Clone, Debug, Default, PartialEq)]
+struct ComplexTextureState {
+ mips: ArrayVec<RangedStates<u32, TextureUses>, { hal::MAX_MIP_LEVELS as usize }>,
+}
+
+impl ComplexTextureState {
+ /// Creates complex texture state for the given sizes.
+ ///
+ /// This state will be initialized with the UNKNOWN state, a special state
+ /// which means the trakcer knows nothing about the state.
+ fn new(mip_level_count: u32, array_layer_count: u32) -> Self {
+ Self {
+ mips: iter::repeat_with(|| {
+ RangedStates::from_range(0..array_layer_count, TextureUses::UNKNOWN)
+ })
+ .take(mip_level_count as usize)
+ .collect(),
+ }
+ }
+
+ /// Initialize a complex state from a selector representing the full size of the texture
+ /// and an iterator of a selector and a texture use, specifying a usage for a specific
+ /// set of subresources.
+ ///
+ /// [`Self::to_selector_state_iter`] can be used to create such an iterator.
+ ///
+ /// # Safety
+ ///
+ /// All selectors in the iterator must be inside of the full_range selector.
+ ///
+ /// The full range selector must have mips and layers start at 0.
+ unsafe fn from_selector_state_iter(
+ full_range: TextureSelector,
+ state_iter: impl Iterator<Item = (TextureSelector, TextureUses)>,
+ ) -> Self {
+ strict_assert_eq!(full_range.layers.start, 0);
+ strict_assert_eq!(full_range.mips.start, 0);
+
+ let mut complex =
+ ComplexTextureState::new(full_range.mips.len() as u32, full_range.layers.len() as u32);
+ for (selector, desired_state) in state_iter {
+ strict_assert!(selector.layers.end <= full_range.layers.end);
+ strict_assert!(selector.mips.end <= full_range.mips.end);
+
+ // This should only ever happen with a wgpu bug, but let's just double
+ // check that resource states don't have any conflicts.
+ strict_assert_eq!(invalid_resource_state(desired_state), false);
+
+ let mips = selector.mips.start as usize..selector.mips.end as usize;
+ for mip in unsafe { complex.mips.get_unchecked_mut(mips) } {
+ for &mut (_, ref mut state) in mip.isolate(&selector.layers, TextureUses::UNKNOWN) {
+ *state = desired_state;
+ }
+ }
+ }
+ complex
+ }
+
+ /// Convert a complex state into an iterator over all states stored.
+ ///
+ /// [`Self::from_selector_state_iter`] can be used to consume such an iterator.
+ fn to_selector_state_iter(
+ &self,
+ ) -> impl Iterator<Item = (TextureSelector, TextureUses)> + Clone + '_ {
+ self.mips.iter().enumerate().flat_map(|(mip, inner)| {
+ let mip = mip as u32;
+ {
+ inner.iter().map(move |&(ref layers, inner)| {
+ (
+ TextureSelector {
+ mips: mip..mip + 1,
+ layers: layers.clone(),
+ },
+ inner,
+ )
+ })
+ }
+ })
+ }
+}
+
+#[derive(Debug)]
+struct TextureBindGroupStateData<A: HalApi> {
+ selector: Option<TextureSelector>,
+ texture: Arc<Texture<A>>,
+ usage: TextureUses,
+}
+
+/// Stores all the textures that a bind group stores.
+#[derive(Debug)]
+pub(crate) struct TextureBindGroupState<A: HalApi> {
+ textures: Mutex<Vec<TextureBindGroupStateData<A>>>,
+}
+impl<A: HalApi> TextureBindGroupState<A> {
+ pub fn new() -> Self {
+ Self {
+ textures: Mutex::new(Vec::new()),
+ }
+ }
+
+ /// Optimize the texture bind group state by sorting it by ID.
+ ///
+ /// When this list of states is merged into a tracker, the memory
+ /// accesses will be in a constant ascending order.
+ pub(crate) fn optimize(&self) {
+ let mut textures = self.textures.lock();
+ textures.sort_unstable_by_key(|v| v.texture.as_info().id().unzip().0);
+ }
+
+ /// Returns a list of all textures tracked. May contain duplicates.
+ pub fn drain_resources(&self) -> impl Iterator<Item = Arc<Texture<A>>> + '_ {
+ let mut textures = self.textures.lock();
+ textures
+ .drain(..)
+ .map(|v| v.texture)
+ .collect::<Vec<_>>()
+ .into_iter()
+ }
+
+ /// Adds the given resource with the given state.
+ pub fn add_single<'a>(
+ &self,
+ texture: &'a Arc<Texture<A>>,
+ selector: Option<TextureSelector>,
+ state: TextureUses,
+ ) -> Option<&'a Arc<Texture<A>>> {
+ let mut textures = self.textures.lock();
+ textures.push(TextureBindGroupStateData {
+ selector,
+ texture: texture.clone(),
+ usage: state,
+ });
+ Some(texture)
+ }
+}
+
+/// Container for corresponding simple and complex texture states.
+#[derive(Debug)]
+pub(crate) struct TextureStateSet {
+ simple: Vec<TextureUses>,
+ complex: FastHashMap<usize, ComplexTextureState>,
+}
+impl TextureStateSet {
+ fn new() -> Self {
+ Self {
+ simple: Vec::new(),
+ complex: FastHashMap::default(),
+ }
+ }
+
+ fn clear(&mut self) {
+ self.simple.clear();
+ self.complex.clear();
+ }
+
+ fn set_size(&mut self, size: usize) {
+ self.simple.resize(size, TextureUses::UNINITIALIZED);
+ }
+}
+
+/// Stores all texture state within a single usage scope.
+#[derive(Debug)]
+pub(crate) struct TextureUsageScope<A: HalApi> {
+ set: TextureStateSet,
+ metadata: ResourceMetadata<Texture<A>>,
+}
+
+impl<A: HalApi> TextureUsageScope<A> {
+ pub fn new() -> Self {
+ Self {
+ set: TextureStateSet::new(),
+
+ metadata: ResourceMetadata::new(),
+ }
+ }
+
+ fn tracker_assert_in_bounds(&self, index: usize) {
+ self.metadata.tracker_assert_in_bounds(index);
+
+ strict_assert!(index < self.set.simple.len());
+
+ strict_assert!(if self.metadata.contains(index)
+ && self.set.simple[index] == TextureUses::COMPLEX
+ {
+ self.set.complex.contains_key(&index)
+ } else {
+ true
+ });
+ }
+
+ /// Sets the size of all the vectors inside the tracker.
+ ///
+ /// Must be called with the highest possible Texture ID before
+ /// all unsafe functions are called.
+ pub fn set_size(&mut self, size: usize) {
+ self.set.set_size(size);
+ self.metadata.set_size(size);
+ }
+
+ /// Drains all textures tracked.
+ pub(crate) fn drain_resources(&mut self) -> impl Iterator<Item = Arc<Texture<A>>> + '_ {
+ let resources = self.metadata.drain_resources();
+ self.set.clear();
+ resources.into_iter()
+ }
+
+ /// Returns true if the tracker owns no resources.
+ ///
+ /// This is a O(n) operation.
+ pub(crate) fn is_empty(&self) -> bool {
+ self.metadata.is_empty()
+ }
+
+ /// Merge the list of texture states in the given usage scope into this UsageScope.
+ ///
+ /// If any of the resulting states is invalid, stops the merge and returns a usage
+ /// conflict with the details of the invalid state.
+ ///
+ /// If the given tracker uses IDs higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn merge_usage_scope(&mut self, scope: &Self) -> Result<(), UsageConflict> {
+ let incoming_size = scope.set.simple.len();
+ if incoming_size > self.set.simple.len() {
+ self.set_size(incoming_size);
+ }
+
+ for index in scope.metadata.owned_indices() {
+ self.tracker_assert_in_bounds(index);
+ scope.tracker_assert_in_bounds(index);
+
+ let texture_selector =
+ unsafe { &scope.metadata.get_resource_unchecked(index).full_range };
+ unsafe {
+ insert_or_merge(
+ texture_selector,
+ &mut self.set,
+ &mut self.metadata,
+ index,
+ TextureStateProvider::TextureSet { set: &scope.set },
+ ResourceMetadataProvider::Indirect {
+ metadata: &scope.metadata,
+ },
+ )?
+ };
+ }
+
+ Ok(())
+ }
+
+ /// Merge the list of texture states in the given bind group into this usage scope.
+ ///
+ /// If any of the resulting states is invalid, stops the merge and returns a usage
+ /// conflict with the details of the invalid state.
+ ///
+ /// Because bind groups do not check if the union of all their states is valid,
+ /// this method is allowed to return Err on the first bind group bound.
+ ///
+ /// # Safety
+ ///
+ /// [`Self::set_size`] must be called with the maximum possible Buffer ID before this
+ /// method is called.
+ pub unsafe fn merge_bind_group(
+ &mut self,
+ bind_group: &TextureBindGroupState<A>,
+ ) -> Result<(), UsageConflict> {
+ let textures = bind_group.textures.lock();
+ for t in &*textures {
+ unsafe { self.merge_single(&t.texture, t.selector.clone(), t.usage)? };
+ }
+
+ Ok(())
+ }
+
+ /// Merge a single state into the UsageScope.
+ ///
+ /// If the resulting state is invalid, returns a usage
+ /// conflict with the details of the invalid state.
+ ///
+ /// # Safety
+ ///
+ /// Unlike other trackers whose merge_single is safe, this method is only
+ /// called where there is already other unsafe tracking functions active,
+ /// so we can prove this unsafe "for free".
+ ///
+ /// [`Self::set_size`] must be called with the maximum possible Buffer ID before this
+ /// method is called.
+ pub unsafe fn merge_single(
+ &mut self,
+ texture: &Arc<Texture<A>>,
+ selector: Option<TextureSelector>,
+ new_state: TextureUses,
+ ) -> Result<(), UsageConflict> {
+ let index = texture.as_info().id().unzip().0 as usize;
+
+ self.tracker_assert_in_bounds(index);
+
+ let texture_selector = &texture.full_range;
+ unsafe {
+ insert_or_merge(
+ texture_selector,
+ &mut self.set,
+ &mut self.metadata,
+ index,
+ TextureStateProvider::from_option(selector, new_state),
+ ResourceMetadataProvider::Direct {
+ resource: Cow::Borrowed(texture),
+ },
+ )?
+ };
+
+ Ok(())
+ }
+}
+
+/// Stores all texture state within a command buffer or device.
+pub(crate) struct TextureTracker<A: HalApi> {
+ start_set: TextureStateSet,
+ end_set: TextureStateSet,
+
+ metadata: ResourceMetadata<Texture<A>>,
+
+ temp: Vec<PendingTransition<TextureUses>>,
+
+ _phantom: PhantomData<A>,
+}
+
+impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
+ /// Try to remove the given resource from the tracker iff we have the last reference to the
+ /// resource and the epoch matches.
+ ///
+ /// Returns true if the resource was removed or if not existing in metadata.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// false will be returned.
+ fn remove_abandoned(&mut self, id: TextureId) -> bool {
+ let index = id.unzip().0 as usize;
+
+ if index > self.metadata.size() {
+ return false;
+ }
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ if self.metadata.contains_unchecked(index) {
+ let existing_ref_count = self.metadata.get_ref_count_unchecked(index);
+ //RefCount 2 means that resource is hold just by DeviceTracker and this suspected resource itself
+ //so it's already been released from user and so it's not inside Registry\Storage
+ if existing_ref_count <= 2 {
+ self.start_set.complex.remove(&index);
+ self.end_set.complex.remove(&index);
+ self.metadata.remove(index);
+ log::trace!("Texture {:?} is not tracked anymore", id,);
+ return true;
+ } else {
+ log::trace!(
+ "Texture {:?} is still referenced from {}",
+ id,
+ existing_ref_count
+ );
+ return false;
+ }
+ }
+ }
+ true
+ }
+}
+
+impl<A: HalApi> TextureTracker<A> {
+ pub fn new() -> Self {
+ Self {
+ start_set: TextureStateSet::new(),
+ end_set: TextureStateSet::new(),
+
+ metadata: ResourceMetadata::new(),
+
+ temp: Vec::new(),
+
+ _phantom: PhantomData,
+ }
+ }
+
+ fn tracker_assert_in_bounds(&self, index: usize) {
+ self.metadata.tracker_assert_in_bounds(index);
+
+ strict_assert!(index < self.start_set.simple.len());
+ strict_assert!(index < self.end_set.simple.len());
+
+ strict_assert!(if self.metadata.contains(index)
+ && self.start_set.simple[index] == TextureUses::COMPLEX
+ {
+ self.start_set.complex.contains_key(&index)
+ } else {
+ true
+ });
+ strict_assert!(if self.metadata.contains(index)
+ && self.end_set.simple[index] == TextureUses::COMPLEX
+ {
+ self.end_set.complex.contains_key(&index)
+ } else {
+ true
+ });
+ }
+
+ /// Sets the size of all the vectors inside the tracker.
+ ///
+ /// Must be called with the highest possible Texture ID before
+ /// all unsafe functions are called.
+ pub fn set_size(&mut self, size: usize) {
+ self.start_set.set_size(size);
+ self.end_set.set_size(size);
+
+ self.metadata.set_size(size);
+ }
+
+ /// Extend the vectors to let the given index be valid.
+ fn allow_index(&mut self, index: usize) {
+ if index >= self.start_set.simple.len() {
+ self.set_size(index + 1);
+ }
+ }
+
+ /// Returns a list of all textures tracked.
+ pub fn used_resources(&self) -> impl Iterator<Item = Arc<Texture<A>>> + '_ {
+ self.metadata.owned_resources()
+ }
+
+ /// Drain all currently pending transitions.
+ pub fn drain_transitions<'a>(
+ &'a mut self,
+ snatch_guard: &'a SnatchGuard<'a>,
+ ) -> (PendingTransitionList, Vec<Option<&'a TextureInner<A>>>) {
+ let mut textures = Vec::new();
+ let transitions = self
+ .temp
+ .drain(..)
+ .map(|pending| {
+ let tex = unsafe { self.metadata.get_resource_unchecked(pending.id as _) };
+ textures.push(tex.inner.get(snatch_guard));
+ pending
+ })
+ .collect();
+ (transitions, textures)
+ }
+
+ /// Inserts a single texture and a state into the resource tracker.
+ ///
+ /// If the resource already exists in the tracker, this will panic.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn insert_single(&mut self, id: TextureId, resource: Arc<Texture<A>>, usage: TextureUses) {
+ let index = id.unzip().0 as usize;
+
+ self.allow_index(index);
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ let currently_owned = self.metadata.contains_unchecked(index);
+
+ if currently_owned {
+ panic!("Tried to insert texture already tracked");
+ }
+
+ insert(
+ None,
+ Some(&mut self.start_set),
+ &mut self.end_set,
+ &mut self.metadata,
+ index,
+ TextureStateProvider::KnownSingle { state: usage },
+ None,
+ ResourceMetadataProvider::Direct {
+ resource: Cow::Owned(resource),
+ },
+ )
+ };
+ }
+
+ /// Sets the state of a single texture.
+ ///
+ /// If a transition is needed to get the texture into the given state, that transition
+ /// is returned.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn set_single(
+ &mut self,
+ texture: &Arc<Texture<A>>,
+ selector: TextureSelector,
+ new_state: TextureUses,
+ ) -> Option<Drain<'_, PendingTransition<TextureUses>>> {
+ let index = texture.as_info().id().unzip().0 as usize;
+
+ self.allow_index(index);
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ insert_or_barrier_update(
+ &texture.full_range,
+ Some(&mut self.start_set),
+ &mut self.end_set,
+ &mut self.metadata,
+ index,
+ TextureStateProvider::Selector {
+ selector,
+ state: new_state,
+ },
+ None,
+ ResourceMetadataProvider::Direct {
+ resource: Cow::Owned(texture.clone()),
+ },
+ &mut self.temp,
+ )
+ }
+
+ Some(self.temp.drain(..))
+ }
+
+ /// Sets the given state for all texture in the given tracker.
+ ///
+ /// If a transition is needed to get the texture into the needed state,
+ /// those transitions are stored within the tracker. A subsequent
+ /// call to [`Self::drain_transitions`] is needed to get those transitions.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn set_from_tracker(&mut self, tracker: &Self) {
+ let incoming_size = tracker.start_set.simple.len();
+ if incoming_size > self.start_set.simple.len() {
+ self.set_size(incoming_size);
+ }
+
+ for index in tracker.metadata.owned_indices() {
+ self.tracker_assert_in_bounds(index);
+ tracker.tracker_assert_in_bounds(index);
+ unsafe {
+ let texture_selector = &tracker.metadata.get_resource_unchecked(index).full_range;
+ insert_or_barrier_update(
+ texture_selector,
+ Some(&mut self.start_set),
+ &mut self.end_set,
+ &mut self.metadata,
+ index,
+ TextureStateProvider::TextureSet {
+ set: &tracker.start_set,
+ },
+ Some(TextureStateProvider::TextureSet {
+ set: &tracker.end_set,
+ }),
+ ResourceMetadataProvider::Indirect {
+ metadata: &tracker.metadata,
+ },
+ &mut self.temp,
+ );
+ }
+ }
+ }
+
+ /// Sets the given state for all textures in the given UsageScope.
+ ///
+ /// If a transition is needed to get the textures into the needed state,
+ /// those transitions are stored within the tracker. A subsequent
+ /// call to [`Self::drain_transitions`] is needed to get those transitions.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// the vectors will be extended. A call to set_size is not needed.
+ pub fn set_from_usage_scope(&mut self, scope: &TextureUsageScope<A>) {
+ let incoming_size = scope.set.simple.len();
+ if incoming_size > self.start_set.simple.len() {
+ self.set_size(incoming_size);
+ }
+
+ for index in scope.metadata.owned_indices() {
+ self.tracker_assert_in_bounds(index);
+ scope.tracker_assert_in_bounds(index);
+ unsafe {
+ let texture_selector = &scope.metadata.get_resource_unchecked(index).full_range;
+ insert_or_barrier_update(
+ texture_selector,
+ Some(&mut self.start_set),
+ &mut self.end_set,
+ &mut self.metadata,
+ index,
+ TextureStateProvider::TextureSet { set: &scope.set },
+ None,
+ ResourceMetadataProvider::Indirect {
+ metadata: &scope.metadata,
+ },
+ &mut self.temp,
+ );
+ }
+ }
+ }
+
+ /// Iterates through all textures in the given bind group and adopts
+ /// the state given for those textures in the UsageScope. It also
+ /// removes all touched textures from the usage scope.
+ ///
+ /// If a transition is needed to get the textures into the needed state,
+ /// those transitions are stored within the tracker. A subsequent
+ /// call to [`Self::drain_transitions`] is needed to get those transitions.
+ ///
+ /// This is a really funky method used by Compute Passes to generate
+ /// barriers after a call to dispatch without needing to iterate
+ /// over all elements in the usage scope. We use each the
+ /// bind group as a source of which IDs to look at. The bind groups
+ /// must have first been added to the usage scope.
+ ///
+ /// # Safety
+ ///
+ /// [`Self::set_size`] must be called with the maximum possible Buffer ID before this
+ /// method is called.
+ pub unsafe fn set_and_remove_from_usage_scope_sparse(
+ &mut self,
+ scope: &mut TextureUsageScope<A>,
+ bind_group_state: &TextureBindGroupState<A>,
+ ) {
+ let incoming_size = scope.set.simple.len();
+ if incoming_size > self.start_set.simple.len() {
+ self.set_size(incoming_size);
+ }
+
+ let textures = bind_group_state.textures.lock();
+ for t in textures.iter() {
+ let index = t.texture.as_info().id().unzip().0 as usize;
+ scope.tracker_assert_in_bounds(index);
+
+ if unsafe { !scope.metadata.contains_unchecked(index) } {
+ continue;
+ }
+ let texture_selector = &t.texture.full_range;
+ unsafe {
+ insert_or_barrier_update(
+ texture_selector,
+ Some(&mut self.start_set),
+ &mut self.end_set,
+ &mut self.metadata,
+ index,
+ TextureStateProvider::TextureSet { set: &scope.set },
+ None,
+ ResourceMetadataProvider::Indirect {
+ metadata: &scope.metadata,
+ },
+ &mut self.temp,
+ )
+ };
+
+ unsafe { scope.metadata.remove(index) };
+ }
+ }
+
+ /// Unconditionally removes the given resource from the tracker.
+ ///
+ /// Returns true if the resource was removed.
+ ///
+ /// If the ID is higher than the length of internal vectors,
+ /// false will be returned.
+ pub fn remove(&mut self, id: TextureId) -> bool {
+ let index = id.unzip().0 as usize;
+
+ if index > self.metadata.size() {
+ return false;
+ }
+
+ self.tracker_assert_in_bounds(index);
+
+ unsafe {
+ if self.metadata.contains_unchecked(index) {
+ self.start_set.complex.remove(&index);
+ self.end_set.complex.remove(&index);
+ self.metadata.remove(index);
+ return true;
+ }
+ }
+
+ false
+ }
+}
+
+/// An iterator adapter that can store two different iterator types.
+#[derive(Clone)]
+enum EitherIter<L, R> {
+ Left(L),
+ Right(R),
+}
+
+impl<L, R, D> Iterator for EitherIter<L, R>
+where
+ L: Iterator<Item = D>,
+ R: Iterator<Item = D>,
+{
+ type Item = D;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match *self {
+ EitherIter::Left(ref mut inner) => inner.next(),
+ EitherIter::Right(ref mut inner) => inner.next(),
+ }
+ }
+}
+
+/// Container that signifies storing both different things
+/// if there is a single state or many different states
+/// involved in the operation.
+#[derive(Debug, Clone)]
+enum SingleOrManyStates<S, M> {
+ Single(S),
+ Many(M),
+}
+
+/// A source of texture state.
+#[derive(Clone)]
+enum TextureStateProvider<'a> {
+ /// Comes directly from a single state.
+ KnownSingle { state: TextureUses },
+ /// Comes from a selector and a single state.
+ Selector {
+ selector: TextureSelector,
+ state: TextureUses,
+ },
+ /// Comes from another texture set.
+ TextureSet { set: &'a TextureStateSet },
+}
+impl<'a> TextureStateProvider<'a> {
+ /// Convenience function turning `Option<Selector>` into this enum.
+ fn from_option(selector: Option<TextureSelector>, state: TextureUses) -> Self {
+ match selector {
+ Some(selector) => Self::Selector { selector, state },
+ None => Self::KnownSingle { state },
+ }
+ }
+
+ /// Get the state provided by this.
+ ///
+ /// # Panics
+ ///
+ /// Panics if texture_selector is None and this uses a Selector source.
+ ///
+ /// # Safety
+ ///
+ /// - The index must be in bounds of the state set if this uses an TextureSet source.
+ #[inline(always)]
+ unsafe fn get_state(
+ self,
+ texture_selector: Option<&TextureSelector>,
+ index: usize,
+ ) -> SingleOrManyStates<
+ TextureUses,
+ impl Iterator<Item = (TextureSelector, TextureUses)> + Clone + 'a,
+ > {
+ match self {
+ TextureStateProvider::KnownSingle { state } => SingleOrManyStates::Single(state),
+ TextureStateProvider::Selector { selector, state } => {
+ // We check if the selector given is actually for the full resource,
+ // and if it is we promote to a simple state. This allows upstream
+ // code to specify selectors willy nilly, and all that are really
+ // single states are promoted here.
+ if *texture_selector.unwrap() == selector {
+ SingleOrManyStates::Single(state)
+ } else {
+ SingleOrManyStates::Many(EitherIter::Left(iter::once((selector, state))))
+ }
+ }
+ TextureStateProvider::TextureSet { set } => {
+ let new_state = *unsafe { set.simple.get_unchecked(index) };
+
+ if new_state == TextureUses::COMPLEX {
+ let new_complex = unsafe { set.complex.get(&index).unwrap_unchecked() };
+
+ SingleOrManyStates::Many(EitherIter::Right(
+ new_complex.to_selector_state_iter(),
+ ))
+ } else {
+ SingleOrManyStates::Single(new_state)
+ }
+ }
+ }
+ }
+}
+
+/// Does an insertion operation if the index isn't tracked
+/// in the current metadata, otherwise merges the given state
+/// with the current state. If the merging would cause
+/// a conflict, returns that usage conflict.
+///
+/// # Safety
+///
+/// Indexes must be valid indexes into all arrays passed in
+/// to this function, either directly or via metadata or provider structs.
+#[inline(always)]
+unsafe fn insert_or_merge<A: HalApi>(
+ texture_selector: &TextureSelector,
+ current_state_set: &mut TextureStateSet,
+ resource_metadata: &mut ResourceMetadata<Texture<A>>,
+ index: usize,
+ state_provider: TextureStateProvider<'_>,
+ metadata_provider: ResourceMetadataProvider<'_, Texture<A>>,
+) -> Result<(), UsageConflict> {
+ let currently_owned = unsafe { resource_metadata.contains_unchecked(index) };
+
+ if !currently_owned {
+ unsafe {
+ insert(
+ Some(texture_selector),
+ None,
+ current_state_set,
+ resource_metadata,
+ index,
+ state_provider,
+ None,
+ metadata_provider,
+ )
+ };
+ return Ok(());
+ }
+
+ unsafe {
+ merge(
+ texture_selector,
+ current_state_set,
+ index,
+ state_provider,
+ metadata_provider,
+ )
+ }
+}
+
+/// If the resource isn't tracked
+/// - Inserts the given resource.
+/// - Uses the `start_state_provider` to populate `start_states`
+/// - Uses either `end_state_provider` or `start_state_provider`
+/// to populate `current_states`.
+/// If the resource is tracked
+/// - Inserts barriers from the state in `current_states`
+/// to the state provided by `start_state_provider`.
+/// - Updates the `current_states` with either the state from
+/// `end_state_provider` or `start_state_provider`.
+///
+/// Any barriers are added to the barrier vector.
+///
+/// # Safety
+///
+/// Indexes must be valid indexes into all arrays passed in
+/// to this function, either directly or via metadata or provider structs.
+#[inline(always)]
+unsafe fn insert_or_barrier_update<A: HalApi>(
+ texture_selector: &TextureSelector,
+ start_state: Option<&mut TextureStateSet>,
+ current_state_set: &mut TextureStateSet,
+ resource_metadata: &mut ResourceMetadata<Texture<A>>,
+ index: usize,
+ start_state_provider: TextureStateProvider<'_>,
+ end_state_provider: Option<TextureStateProvider<'_>>,
+ metadata_provider: ResourceMetadataProvider<'_, Texture<A>>,
+ barriers: &mut Vec<PendingTransition<TextureUses>>,
+) {
+ let currently_owned = unsafe { resource_metadata.contains_unchecked(index) };
+
+ if !currently_owned {
+ unsafe {
+ insert(
+ Some(texture_selector),
+ start_state,
+ current_state_set,
+ resource_metadata,
+ index,
+ start_state_provider,
+ end_state_provider,
+ metadata_provider,
+ )
+ };
+ return;
+ }
+
+ let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone());
+ unsafe {
+ barrier(
+ texture_selector,
+ current_state_set,
+ index,
+ start_state_provider,
+ barriers,
+ )
+ };
+
+ let start_state_set = start_state.unwrap();
+ unsafe {
+ update(
+ texture_selector,
+ start_state_set,
+ current_state_set,
+ index,
+ update_state_provider,
+ )
+ };
+}
+
+#[inline(always)]
+unsafe fn insert<A: HalApi>(
+ texture_selector: Option<&TextureSelector>,
+ start_state: Option<&mut TextureStateSet>,
+ end_state: &mut TextureStateSet,
+ resource_metadata: &mut ResourceMetadata<Texture<A>>,
+ index: usize,
+ start_state_provider: TextureStateProvider<'_>,
+ end_state_provider: Option<TextureStateProvider<'_>>,
+ metadata_provider: ResourceMetadataProvider<'_, Texture<A>>,
+) {
+ let start_layers = unsafe { start_state_provider.get_state(texture_selector, index) };
+ match start_layers {
+ SingleOrManyStates::Single(state) => {
+ // This should only ever happen with a wgpu bug, but let's just double
+ // check that resource states don't have any conflicts.
+ strict_assert_eq!(invalid_resource_state(state), false);
+
+ log::trace!("\ttex {index}: insert start {state:?}");
+
+ if let Some(start_state) = start_state {
+ unsafe { *start_state.simple.get_unchecked_mut(index) = state };
+ }
+
+ // We only need to insert ourselves the end state if there is no end state provider.
+ if end_state_provider.is_none() {
+ unsafe { *end_state.simple.get_unchecked_mut(index) = state };
+ }
+ }
+ SingleOrManyStates::Many(state_iter) => {
+ let full_range = texture_selector.unwrap().clone();
+
+ let complex =
+ unsafe { ComplexTextureState::from_selector_state_iter(full_range, state_iter) };
+
+ log::trace!("\ttex {index}: insert start {complex:?}");
+
+ if let Some(start_state) = start_state {
+ unsafe { *start_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX };
+ start_state.complex.insert(index, complex.clone());
+ }
+
+ // We only need to insert ourselves the end state if there is no end state provider.
+ if end_state_provider.is_none() {
+ unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX };
+ end_state.complex.insert(index, complex);
+ }
+ }
+ }
+
+ if let Some(end_state_provider) = end_state_provider {
+ match unsafe { end_state_provider.get_state(texture_selector, index) } {
+ SingleOrManyStates::Single(state) => {
+ // This should only ever happen with a wgpu bug, but let's just double
+ // check that resource states don't have any conflicts.
+ strict_assert_eq!(invalid_resource_state(state), false);
+
+ log::trace!("\ttex {index}: insert end {state:?}");
+
+ // We only need to insert into the end, as there is guaranteed to be
+ // a start state provider.
+ unsafe { *end_state.simple.get_unchecked_mut(index) = state };
+ }
+ SingleOrManyStates::Many(state_iter) => {
+ let full_range = texture_selector.unwrap().clone();
+
+ let complex = unsafe {
+ ComplexTextureState::from_selector_state_iter(full_range, state_iter)
+ };
+
+ log::trace!("\ttex {index}: insert end {complex:?}");
+
+ // We only need to insert into the end, as there is guaranteed to be
+ // a start state provider.
+ unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX };
+ end_state.complex.insert(index, complex);
+ }
+ }
+ }
+
+ unsafe {
+ let resource = metadata_provider.get_own(index);
+ resource_metadata.insert(index, resource);
+ }
+}
+
+#[inline(always)]
+unsafe fn merge<A: HalApi>(
+ texture_selector: &TextureSelector,
+ current_state_set: &mut TextureStateSet,
+ index: usize,
+ state_provider: TextureStateProvider<'_>,
+ metadata_provider: ResourceMetadataProvider<'_, Texture<A>>,
+) -> Result<(), UsageConflict> {
+ let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) };
+ let current_state = if *current_simple == TextureUses::COMPLEX {
+ SingleOrManyStates::Many(unsafe {
+ current_state_set.complex.get_mut(&index).unwrap_unchecked()
+ })
+ } else {
+ SingleOrManyStates::Single(current_simple)
+ };
+
+ let new_state = unsafe { state_provider.get_state(Some(texture_selector), index) };
+
+ match (current_state, new_state) {
+ (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => {
+ let merged_state = *current_simple | new_simple;
+
+ log::trace!("\ttex {index}: merge simple {current_simple:?} + {new_simple:?}");
+
+ if invalid_resource_state(merged_state) {
+ return Err(UsageConflict::from_texture(
+ TextureId::zip(
+ index as _,
+ unsafe { metadata_provider.get_epoch(index) },
+ A::VARIANT,
+ ),
+ texture_selector.clone(),
+ *current_simple,
+ new_simple,
+ ));
+ }
+
+ *current_simple = merged_state;
+ }
+ (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Many(new_many)) => {
+ // Because we are now demoting this simple state to a complex state,
+ // we actually need to make a whole new complex state for us to use
+ // as there wasn't one before.
+ let mut new_complex = unsafe {
+ ComplexTextureState::from_selector_state_iter(
+ texture_selector.clone(),
+ iter::once((texture_selector.clone(), *current_simple)),
+ )
+ };
+
+ for (selector, new_state) in new_many {
+ let merged_state = *current_simple | new_state;
+
+ log::trace!("\ttex {index}: merge {selector:?} {current_simple:?} + {new_state:?}");
+
+ if invalid_resource_state(merged_state) {
+ return Err(UsageConflict::from_texture(
+ TextureId::zip(
+ index as _,
+ unsafe { metadata_provider.get_epoch(index) },
+ A::VARIANT,
+ ),
+ selector,
+ *current_simple,
+ new_state,
+ ));
+ }
+
+ for mip in
+ &mut new_complex.mips[selector.mips.start as usize..selector.mips.end as usize]
+ {
+ for &mut (_, ref mut current_layer_state) in
+ mip.isolate(&selector.layers, TextureUses::UNKNOWN)
+ {
+ *current_layer_state = merged_state;
+ }
+
+ mip.coalesce();
+ }
+ }
+
+ *current_simple = TextureUses::COMPLEX;
+ current_state_set.complex.insert(index, new_complex);
+ }
+ (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Single(new_simple)) => {
+ for (mip_id, mip) in current_complex.mips.iter_mut().enumerate() {
+ let mip_id = mip_id as u32;
+
+ for &mut (ref layers, ref mut current_layer_state) in mip.iter_mut() {
+ let merged_state = *current_layer_state | new_simple;
+
+ // Once we remove unknown, this will never be empty, as
+ // simple states are never unknown.
+ let merged_state = merged_state - TextureUses::UNKNOWN;
+
+ log::trace!(
+ "\ttex {index}: merge mip {mip_id} layers {layers:?} \
+ {current_layer_state:?} + {new_simple:?}"
+ );
+
+ if invalid_resource_state(merged_state) {
+ return Err(UsageConflict::from_texture(
+ TextureId::zip(
+ index as _,
+ unsafe { metadata_provider.get_epoch(index) },
+ A::VARIANT,
+ ),
+ TextureSelector {
+ mips: mip_id..mip_id + 1,
+ layers: layers.clone(),
+ },
+ *current_layer_state,
+ new_simple,
+ ));
+ }
+
+ *current_layer_state = merged_state;
+ }
+
+ mip.coalesce();
+ }
+ }
+ (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Many(new_many)) => {
+ for (selector, new_state) in new_many {
+ for mip_id in selector.mips {
+ strict_assert!((mip_id as usize) < current_complex.mips.len());
+
+ let mip = unsafe { current_complex.mips.get_unchecked_mut(mip_id as usize) };
+
+ for &mut (ref layers, ref mut current_layer_state) in
+ mip.isolate(&selector.layers, TextureUses::UNKNOWN)
+ {
+ let merged_state = *current_layer_state | new_state;
+ let merged_state = merged_state - TextureUses::UNKNOWN;
+
+ if merged_state.is_empty() {
+ // We know nothing about this state, lets just move on.
+ continue;
+ }
+
+ log::trace!(
+ "\ttex {index}: merge mip {mip_id} layers {layers:?} \
+ {current_layer_state:?} + {new_state:?}"
+ );
+
+ if invalid_resource_state(merged_state) {
+ return Err(UsageConflict::from_texture(
+ TextureId::zip(
+ index as _,
+ unsafe { metadata_provider.get_epoch(index) },
+ A::VARIANT,
+ ),
+ TextureSelector {
+ mips: mip_id..mip_id + 1,
+ layers: layers.clone(),
+ },
+ *current_layer_state,
+ new_state,
+ ));
+ }
+ *current_layer_state = merged_state;
+ }
+
+ mip.coalesce();
+ }
+ }
+ }
+ }
+ Ok(())
+}
+
+#[inline(always)]
+unsafe fn barrier(
+ texture_selector: &TextureSelector,
+ current_state_set: &TextureStateSet,
+ index: usize,
+ state_provider: TextureStateProvider<'_>,
+ barriers: &mut Vec<PendingTransition<TextureUses>>,
+) {
+ let current_simple = unsafe { *current_state_set.simple.get_unchecked(index) };
+ let current_state = if current_simple == TextureUses::COMPLEX {
+ SingleOrManyStates::Many(unsafe {
+ current_state_set.complex.get(&index).unwrap_unchecked()
+ })
+ } else {
+ SingleOrManyStates::Single(current_simple)
+ };
+
+ let new_state = unsafe { state_provider.get_state(Some(texture_selector), index) };
+
+ match (current_state, new_state) {
+ (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => {
+ if skip_barrier(current_simple, new_simple) {
+ return;
+ }
+
+ log::trace!("\ttex {index}: transition simple {current_simple:?} -> {new_simple:?}");
+
+ barriers.push(PendingTransition {
+ id: index as _,
+ selector: texture_selector.clone(),
+ usage: current_simple..new_simple,
+ });
+ }
+ (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Many(new_many)) => {
+ for (selector, new_state) in new_many {
+ if new_state == TextureUses::UNKNOWN {
+ continue;
+ }
+
+ if skip_barrier(current_simple, new_state) {
+ continue;
+ }
+
+ log::trace!(
+ "\ttex {index}: transition {selector:?} {current_simple:?} -> {new_state:?}"
+ );
+
+ barriers.push(PendingTransition {
+ id: index as _,
+ selector,
+ usage: current_simple..new_state,
+ });
+ }
+ }
+ (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Single(new_simple)) => {
+ for (mip_id, mip) in current_complex.mips.iter().enumerate() {
+ let mip_id = mip_id as u32;
+
+ for &(ref layers, current_layer_state) in mip.iter() {
+ if current_layer_state == TextureUses::UNKNOWN {
+ continue;
+ }
+
+ if skip_barrier(current_layer_state, new_simple) {
+ continue;
+ }
+
+ log::trace!(
+ "\ttex {index}: transition mip {mip_id} layers {layers:?} \
+ {current_layer_state:?} -> {new_simple:?}"
+ );
+
+ barriers.push(PendingTransition {
+ id: index as _,
+ selector: TextureSelector {
+ mips: mip_id..mip_id + 1,
+ layers: layers.clone(),
+ },
+ usage: current_layer_state..new_simple,
+ });
+ }
+ }
+ }
+ (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Many(new_many)) => {
+ for (selector, new_state) in new_many {
+ for mip_id in selector.mips {
+ strict_assert!((mip_id as usize) < current_complex.mips.len());
+
+ let mip = unsafe { current_complex.mips.get_unchecked(mip_id as usize) };
+
+ for (layers, current_layer_state) in mip.iter_filter(&selector.layers) {
+ if *current_layer_state == TextureUses::UNKNOWN
+ || new_state == TextureUses::UNKNOWN
+ {
+ continue;
+ }
+
+ if skip_barrier(*current_layer_state, new_state) {
+ continue;
+ }
+
+ log::trace!(
+ "\ttex {index}: transition mip {mip_id} layers {layers:?} \
+ {current_layer_state:?} -> {new_state:?}"
+ );
+
+ barriers.push(PendingTransition {
+ id: index as _,
+ selector: TextureSelector {
+ mips: mip_id..mip_id + 1,
+ layers,
+ },
+ usage: *current_layer_state..new_state,
+ });
+ }
+ }
+ }
+ }
+ }
+}
+
+#[allow(clippy::needless_option_as_deref)] // we use this for reborrowing Option<&mut T>
+#[inline(always)]
+unsafe fn update(
+ texture_selector: &TextureSelector,
+ start_state_set: &mut TextureStateSet,
+ current_state_set: &mut TextureStateSet,
+ index: usize,
+ state_provider: TextureStateProvider<'_>,
+) {
+ let start_simple = unsafe { *start_state_set.simple.get_unchecked(index) };
+
+ // We only ever need to update the start state here if the state is complex.
+ //
+ // If the state is simple, the first insert to the tracker would cover it.
+ let mut start_complex = None;
+ if start_simple == TextureUses::COMPLEX {
+ start_complex = Some(unsafe { start_state_set.complex.get_mut(&index).unwrap_unchecked() });
+ }
+
+ let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) };
+ let current_state = if *current_simple == TextureUses::COMPLEX {
+ SingleOrManyStates::Many(unsafe {
+ current_state_set.complex.get_mut(&index).unwrap_unchecked()
+ })
+ } else {
+ SingleOrManyStates::Single(current_simple)
+ };
+
+ let new_state = unsafe { state_provider.get_state(Some(texture_selector), index) };
+
+ match (current_state, new_state) {
+ (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => {
+ *current_simple = new_simple;
+ }
+ (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Many(new_many)) => {
+ // Because we are now demoting this simple state to a complex state,
+ // we actually need to make a whole new complex state for us to use
+ // as there wasn't one before.
+ let mut new_complex = unsafe {
+ ComplexTextureState::from_selector_state_iter(
+ texture_selector.clone(),
+ iter::once((texture_selector.clone(), *current_simple)),
+ )
+ };
+
+ for (selector, mut new_state) in new_many {
+ if new_state == TextureUses::UNKNOWN {
+ new_state = *current_simple;
+ }
+ for mip in
+ &mut new_complex.mips[selector.mips.start as usize..selector.mips.end as usize]
+ {
+ for &mut (_, ref mut current_layer_state) in
+ mip.isolate(&selector.layers, TextureUses::UNKNOWN)
+ {
+ *current_layer_state = new_state;
+ }
+
+ mip.coalesce();
+ }
+ }
+
+ *current_simple = TextureUses::COMPLEX;
+ current_state_set.complex.insert(index, new_complex);
+ }
+ (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Single(new_single)) => {
+ for (mip_id, mip) in current_complex.mips.iter().enumerate() {
+ for &(ref layers, current_layer_state) in mip.iter() {
+ // If this state is unknown, that means that the start is _also_ unknown.
+ if current_layer_state == TextureUses::UNKNOWN {
+ if let Some(&mut ref mut start_complex) = start_complex {
+ strict_assert!(mip_id < start_complex.mips.len());
+
+ let start_mip = unsafe { start_complex.mips.get_unchecked_mut(mip_id) };
+
+ for &mut (_, ref mut current_start_state) in
+ start_mip.isolate(layers, TextureUses::UNKNOWN)
+ {
+ strict_assert_eq!(*current_start_state, TextureUses::UNKNOWN);
+ *current_start_state = new_single;
+ }
+
+ start_mip.coalesce();
+ }
+ }
+ }
+ }
+
+ unsafe { *current_state_set.simple.get_unchecked_mut(index) = new_single };
+ unsafe { current_state_set.complex.remove(&index).unwrap_unchecked() };
+ }
+ (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Many(new_many)) => {
+ for (selector, new_state) in new_many {
+ if new_state == TextureUses::UNKNOWN {
+ // We know nothing new
+ continue;
+ }
+
+ for mip_id in selector.mips {
+ let mip_id = mip_id as usize;
+ strict_assert!(mip_id < current_complex.mips.len());
+
+ let mip = unsafe { current_complex.mips.get_unchecked_mut(mip_id) };
+
+ for &mut (ref layers, ref mut current_layer_state) in
+ mip.isolate(&selector.layers, TextureUses::UNKNOWN)
+ {
+ if *current_layer_state == TextureUses::UNKNOWN
+ && new_state != TextureUses::UNKNOWN
+ {
+ // We now know something about this subresource that
+ // we didn't before so we should go back and update
+ // the start state.
+ //
+ // We know we must have starter state be complex,
+ // otherwise we would know about this state.
+ strict_assert!(start_complex.is_some());
+
+ let start_complex =
+ unsafe { start_complex.as_deref_mut().unwrap_unchecked() };
+
+ strict_assert!(mip_id < start_complex.mips.len());
+
+ let start_mip = unsafe { start_complex.mips.get_unchecked_mut(mip_id) };
+
+ for &mut (_, ref mut current_start_state) in
+ start_mip.isolate(layers, TextureUses::UNKNOWN)
+ {
+ strict_assert_eq!(*current_start_state, TextureUses::UNKNOWN);
+ *current_start_state = new_state;
+ }
+
+ start_mip.coalesce();
+ }
+
+ *current_layer_state = new_state;
+ }
+
+ mip.coalesce();
+ }
+ }
+ }
+ }
+}