summaryrefslogtreecommitdiffstats
path: root/third_party/rust/wgpu-core/src/device/queue.rs
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/wgpu-core/src/device/queue.rs')
-rw-r--r--third_party/rust/wgpu-core/src/device/queue.rs169
1 files changed, 56 insertions, 113 deletions
diff --git a/third_party/rust/wgpu-core/src/device/queue.rs b/third_party/rust/wgpu-core/src/device/queue.rs
index 3cb5f695a7..f0db961ffc 100644
--- a/third_party/rust/wgpu-core/src/device/queue.rs
+++ b/third_party/rust/wgpu-core/src/device/queue.rs
@@ -4,16 +4,17 @@ use crate::{
api_log,
command::{
extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range,
- ClearError, CommandBuffer, CopySide, ImageCopyTexture, TransferError,
+ ClearError, CommandAllocator, CommandBuffer, CopySide, ImageCopyTexture, TransferError,
},
conv,
- device::{life::ResourceMaps, DeviceError, WaitIdleError},
+ device::{DeviceError, WaitIdleError},
get_lowest_common_denom,
global::Global,
hal_api::HalApi,
hal_label,
id::{self, DeviceId, QueueId},
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
+ lock::{rank, Mutex, RwLockWriteGuard},
resource::{
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedTexture, Resource,
ResourceInfo, ResourceType, StagingBuffer, Texture, TextureInner,
@@ -22,7 +23,6 @@ use crate::{
};
use hal::{CommandEncoder as _, Device as _, Queue as _};
-use parking_lot::Mutex;
use smallvec::SmallVec;
use std::{
@@ -34,9 +34,9 @@ use thiserror::Error;
use super::Device;
pub struct Queue<A: HalApi> {
- pub device: Option<Arc<Device<A>>>,
- pub raw: Option<A::Queue>,
- pub info: ResourceInfo<Queue<A>>,
+ pub(crate) device: Option<Arc<Device<A>>>,
+ pub(crate) raw: Option<A::Queue>,
+ pub(crate) info: ResourceInfo<Queue<A>>,
}
impl<A: HalApi> Resource for Queue<A> {
@@ -152,13 +152,21 @@ pub enum TempResource<A: HalApi> {
Texture(Arc<Texture<A>>),
}
-/// A queue execution for a particular command encoder.
+/// A series of raw [`CommandBuffer`]s that have been submitted to a
+/// queue, and the [`wgpu_hal::CommandEncoder`] that built them.
+///
+/// [`CommandBuffer`]: hal::Api::CommandBuffer
+/// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
pub(crate) struct EncoderInFlight<A: HalApi> {
raw: A::CommandEncoder,
cmd_buffers: Vec<A::CommandBuffer>,
}
impl<A: HalApi> EncoderInFlight<A> {
+ /// Free all of our command buffers.
+ ///
+ /// Return the command encoder, fully reset and ready to be
+ /// reused.
pub(crate) unsafe fn land(mut self) -> A::CommandEncoder {
unsafe { self.raw.reset_all(self.cmd_buffers.into_iter()) };
self.raw
@@ -192,6 +200,8 @@ pub(crate) struct PendingWrites<A: HalApi> {
/// True if `command_encoder` is in the "recording" state, as
/// described in the docs for the [`wgpu_hal::CommandEncoder`]
/// trait.
+ ///
+ /// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
pub is_recording: bool,
pub temp_resources: Vec<TempResource<A>>,
@@ -253,7 +263,7 @@ impl<A: HalApi> PendingWrites<A> {
#[must_use]
fn post_submit(
&mut self,
- command_allocator: &mut super::CommandAllocator<A>,
+ command_allocator: &CommandAllocator<A>,
device: &A::Device,
queue: &A::Queue,
) -> Option<EncoderInFlight<A>> {
@@ -307,7 +317,7 @@ fn prepare_staging_buffer<A: HalApi>(
let mapping = unsafe { device.raw().map_buffer(&buffer, 0..size) }?;
let staging_buffer = StagingBuffer {
- raw: Mutex::new(Some(buffer)),
+ raw: Mutex::new(rank::STAGING_BUFFER_RAW, Some(buffer)),
device: device.clone(),
size,
info: ResourceInfo::new(
@@ -490,7 +500,7 @@ impl Global {
prepare_staging_buffer(device, buffer_size.get(), device.instance_flags)?;
let fid = hub.staging_buffers.prepare(id_in);
- let (id, _) = fid.assign(staging_buffer);
+ let (id, _) = fid.assign(Arc::new(staging_buffer));
resource_log!("Queue::create_staging_buffer {id:?}");
Ok((id, staging_buffer_ptr))
@@ -707,7 +717,7 @@ impl Global {
.get(destination.texture)
.map_err(|_| TransferError::InvalidTexture(destination.texture))?;
- if dst.device.as_info().id() != queue_id.transmute() {
+ if dst.device.as_info().id().into_queue_id() != queue_id {
return Err(DeviceError::WrongDevice.into());
}
@@ -1152,8 +1162,8 @@ impl Global {
let snatch_guard = device.snatchable_lock.read();
// Fence lock must be acquired after the snatch lock everywhere to avoid deadlocks.
- let mut fence = device.fence.write();
- let fence = fence.as_mut().unwrap();
+ let mut fence_guard = device.fence.write();
+ let fence = fence_guard.as_mut().unwrap();
let submit_index = device
.active_submission_index
.fetch_add(1, Ordering::Relaxed)
@@ -1173,11 +1183,6 @@ impl Global {
//TODO: if multiple command buffers are submitted, we can re-use the last
// native command buffer of the previous chain instead of always creating
// a temporary one, since the chains are not finished.
- let mut temp_suspected = device.temp_suspected.lock();
- {
- let mut suspected = temp_suspected.replace(ResourceMaps::new()).unwrap();
- suspected.clear();
- }
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
@@ -1191,7 +1196,7 @@ impl Global {
Err(_) => continue,
};
- if cmdbuf.device.as_info().id() != queue_id.transmute() {
+ if cmdbuf.device.as_info().id().into_queue_id() != queue_id {
return Err(DeviceError::WrongDevice.into());
}
@@ -1210,13 +1215,10 @@ impl Global {
));
}
if !cmdbuf.is_finished() {
- if let Some(cmdbuf) = Arc::into_inner(cmdbuf) {
- device.destroy_command_buffer(cmdbuf);
- } else {
- panic!(
- "Command buffer cannot be destroyed because is still in use"
- );
- }
+ let cmdbuf = Arc::into_inner(cmdbuf).expect(
+ "Command buffer cannot be destroyed because is still in use",
+ );
+ device.destroy_command_buffer(cmdbuf);
continue;
}
@@ -1228,41 +1230,23 @@ impl Global {
// update submission IDs
for buffer in cmd_buf_trackers.buffers.used_resources() {
- let tracker_index = buffer.info.tracker_index();
- let raw_buf = match buffer.raw.get(&snatch_guard) {
- Some(raw) => raw,
- None => {
- return Err(QueueSubmitError::DestroyedBuffer(
- buffer.info.id(),
- ));
- }
- };
+ if buffer.raw.get(&snatch_guard).is_none() {
+ return Err(QueueSubmitError::DestroyedBuffer(
+ buffer.info.id(),
+ ));
+ }
buffer.info.use_at(submit_index);
- if buffer.is_unique() {
- if let BufferMapState::Active { .. } = *buffer.map_state.lock()
- {
- log::warn!("Dropped buffer has a pending mapping.");
- unsafe { device.raw().unmap_buffer(raw_buf) }
- .map_err(DeviceError::from)?;
- }
- temp_suspected
- .as_mut()
- .unwrap()
- .buffers
- .insert(tracker_index, buffer.clone());
- } else {
- match *buffer.map_state.lock() {
- BufferMapState::Idle => (),
- _ => {
- return Err(QueueSubmitError::BufferStillMapped(
- buffer.info.id(),
- ))
- }
+
+ match *buffer.map_state.lock() {
+ BufferMapState::Idle => (),
+ _ => {
+ return Err(QueueSubmitError::BufferStillMapped(
+ buffer.info.id(),
+ ))
}
}
}
for texture in cmd_buf_trackers.textures.used_resources() {
- let tracker_index = texture.info.tracker_index();
let should_extend = match texture.inner.get(&snatch_guard) {
None => {
return Err(QueueSubmitError::DestroyedTexture(
@@ -1279,13 +1263,6 @@ impl Global {
}
};
texture.info.use_at(submit_index);
- if texture.is_unique() {
- temp_suspected
- .as_mut()
- .unwrap()
- .textures
- .insert(tracker_index, texture.clone());
- }
if should_extend {
unsafe {
used_surface_textures
@@ -1296,12 +1273,6 @@ impl Global {
}
for texture_view in cmd_buf_trackers.views.used_resources() {
texture_view.info.use_at(submit_index);
- if texture_view.is_unique() {
- temp_suspected.as_mut().unwrap().texture_views.insert(
- texture_view.as_info().tracker_index(),
- texture_view.clone(),
- );
- }
}
{
for bg in cmd_buf_trackers.bind_groups.used_resources() {
@@ -1315,13 +1286,6 @@ impl Global {
for sampler in bg.used.samplers.used_resources() {
sampler.info.use_at(submit_index);
}
- if bg.is_unique() {
- temp_suspected
- .as_mut()
- .unwrap()
- .bind_groups
- .insert(bg.as_info().tracker_index(), bg.clone());
- }
}
}
// assert!(cmd_buf_trackers.samplers.is_empty());
@@ -1329,32 +1293,14 @@ impl Global {
cmd_buf_trackers.compute_pipelines.used_resources()
{
compute_pipeline.info.use_at(submit_index);
- if compute_pipeline.is_unique() {
- temp_suspected.as_mut().unwrap().compute_pipelines.insert(
- compute_pipeline.as_info().tracker_index(),
- compute_pipeline.clone(),
- );
- }
}
for render_pipeline in
cmd_buf_trackers.render_pipelines.used_resources()
{
render_pipeline.info.use_at(submit_index);
- if render_pipeline.is_unique() {
- temp_suspected.as_mut().unwrap().render_pipelines.insert(
- render_pipeline.as_info().tracker_index(),
- render_pipeline.clone(),
- );
- }
}
for query_set in cmd_buf_trackers.query_sets.used_resources() {
query_set.info.use_at(submit_index);
- if query_set.is_unique() {
- temp_suspected.as_mut().unwrap().query_sets.insert(
- query_set.as_info().tracker_index(),
- query_set.clone(),
- );
- }
}
for bundle in cmd_buf_trackers.bundles.used_resources() {
bundle.info.use_at(submit_index);
@@ -1369,13 +1315,6 @@ impl Global {
for query_set in bundle.used.query_sets.read().used_resources() {
query_set.info.use_at(submit_index);
}
- if bundle.is_unique() {
- temp_suspected
- .as_mut()
- .unwrap()
- .render_bundles
- .insert(bundle.as_info().tracker_index(), bundle.clone());
- }
}
}
let mut baked = cmdbuf.from_arc_into_baked();
@@ -1452,8 +1391,8 @@ impl Global {
}
}
- let mut pending_writes = device.pending_writes.lock();
- let pending_writes = pending_writes.as_mut().unwrap();
+ let mut pending_writes_guard = device.pending_writes.lock();
+ let pending_writes = pending_writes_guard.as_mut().unwrap();
{
used_surface_textures.set_size(hub.textures.read().len());
@@ -1528,7 +1467,7 @@ impl Global {
profiling::scope!("cleanup");
if let Some(pending_execution) = pending_writes.post_submit(
- device.command_allocator.lock().as_mut().unwrap(),
+ &device.command_allocator,
device.raw(),
queue.raw.as_ref().unwrap(),
) {
@@ -1543,18 +1482,22 @@ impl Global {
active_executions,
);
- // This will schedule destruction of all resources that are no longer needed
- // by the user but used in the command stream, among other things.
- let (closures, _) = match device.maintain(fence, wgt::Maintain::Poll, snatch_guard) {
- Ok(closures) => closures,
- Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
- Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
- Err(WaitIdleError::WrongSubmissionIndex(..)) => unreachable!(),
- };
-
// pending_write_resources has been drained, so it's empty, but we
// want to retain its heap allocation.
pending_writes.temp_resources = pending_write_resources;
+ drop(pending_writes_guard);
+
+ // This will schedule destruction of all resources that are no longer needed
+ // by the user but used in the command stream, among other things.
+ let fence_guard = RwLockWriteGuard::downgrade(fence_guard);
+ let (closures, _) =
+ match device.maintain(fence_guard, wgt::Maintain::Poll, snatch_guard) {
+ Ok(closures) => closures,
+ Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
+ Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
+ Err(WaitIdleError::WrongSubmissionIndex(..)) => unreachable!(),
+ };
+
device.lock_life().post_submit();
(submit_index, closures)