summaryrefslogtreecommitdiffstats
path: root/third_party/rust/wgpu-core
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/wgpu-core')
-rw-r--r--third_party/rust/wgpu-core/.cargo-checksum.json2
-rw-r--r--third_party/rust/wgpu-core/Cargo.toml1
-rw-r--r--third_party/rust/wgpu-core/src/binding_model.rs26
-rw-r--r--third_party/rust/wgpu-core/src/command/bundle.rs294
-rw-r--r--third_party/rust/wgpu-core/src/command/clear.rs45
-rw-r--r--third_party/rust/wgpu-core/src/command/compute.rs35
-rw-r--r--third_party/rust/wgpu-core/src/command/draw.rs120
-rw-r--r--third_party/rust/wgpu-core/src/command/mod.rs5
-rw-r--r--third_party/rust/wgpu-core/src/command/query.rs9
-rw-r--r--third_party/rust/wgpu-core/src/command/render.rs54
-rw-r--r--third_party/rust/wgpu-core/src/device/global.rs245
-rw-r--r--third_party/rust/wgpu-core/src/device/life.rs129
-rw-r--r--third_party/rust/wgpu-core/src/device/queue.rs125
-rw-r--r--third_party/rust/wgpu-core/src/device/resource.rs204
-rw-r--r--third_party/rust/wgpu-core/src/id.rs2
-rw-r--r--third_party/rust/wgpu-core/src/identity.rs63
-rw-r--r--third_party/rust/wgpu-core/src/instance.rs22
-rw-r--r--third_party/rust/wgpu-core/src/lib.rs30
-rw-r--r--third_party/rust/wgpu-core/src/pipeline.rs23
-rw-r--r--third_party/rust/wgpu-core/src/present.rs43
-rw-r--r--third_party/rust/wgpu-core/src/registry.rs9
-rw-r--r--third_party/rust/wgpu-core/src/resource.rs38
-rw-r--r--third_party/rust/wgpu-core/src/track/buffer.rs65
-rw-r--r--third_party/rust/wgpu-core/src/track/metadata.rs11
-rw-r--r--third_party/rust/wgpu-core/src/track/mod.rs218
-rw-r--r--third_party/rust/wgpu-core/src/track/stateless.rs43
-rw-r--r--third_party/rust/wgpu-core/src/track/texture.rs65
-rw-r--r--third_party/rust/wgpu-core/src/validation.rs115
28 files changed, 1235 insertions, 806 deletions
diff --git a/third_party/rust/wgpu-core/.cargo-checksum.json b/third_party/rust/wgpu-core/.cargo-checksum.json
index bf0a48e81a..d22e0914d7 100644
--- a/third_party/rust/wgpu-core/.cargo-checksum.json
+++ b/third_party/rust/wgpu-core/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"92c0bcfb5bf68fb55acb6e7b826ec07c1cfdd6d53b057c16a5c698e044ea228e","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"a99478d7f63fb41429e3834f4d0e5cd333f94ba1834c68295f929170e16987de","src/any_surface.rs":"1c032bc1894a222a47f0116b976f1543c1140c0534678502ee1172d4f77fc515","src/binding_model.rs":"2bd4e4a36742ccf0cab0afa039411a791e2a6e9ea3909d0b85cc9a84cc151c6b","src/command/bind.rs":"a37f042484b65d9fdea4cdab3667381623ee9a8943a6d32683d410b92736d306","src/command/bundle.rs":"91513a3be0adf46a9f3454b6a3d00ff6686729eb91fe9dd6d732cbfa1ff6d1d8","src/command/clear.rs":"b20e93c4b8cb47062b38e472f78d28d9ec00fd1169b17a87094be7f9d1c995e1","src/command/compute.rs":"eb60f0e2842dd20b366905225af24f4ca2a1b0c67914b86009c5b870b26f747f","src/command/draw.rs":"e8a664fc248e273e8c0e4aaeb645010b3f4ec61d29d858137f31f6f653c86542","src/command/memory_init.rs":"6ec93b9e2eb21edaa534e60770b4ba95735e9de61e74d827bc492df8e3639449","src/command/mod.rs":"d6a66a5796bd824be72af2c8d3ece59a507090c61cb50e9856eb4c70a28945e2","src/command/query.rs":"dffc843746b10ba9a50b8a2b92a59b407b56a845619a96d72a5883588fcb50f0","src/command/render.rs":"c3783b4f19b4eafb33f94554aea69408d42e40b5e98da22aa804a0931430ea6f","src/command/transfer.rs":"bf1077d1a99a258bad46087ae7234703627e7f4d30b38e6142d016c02deaad3a","src/conv.rs":"7e3ffe33b47a6fd3617aabf9f11cc68f1ccbee2c7343b8dbbcd0e8f3447e1ad8","src/device/any_device.rs":"65f47b58939b60f88f47861e65d5d45209492df8e73e7c1b60b3b459f510c09e","src/device/bgl.rs":"ec8bdd6e9b4cd50c25bed317275863d0c16bb6619f62ed85bf0464948010dfc1","src/device/global.rs":"7d70a45bd39e251c6945fc475883c4e69632f92a7abe263adab6e47a248de5a4","src/device/life.rs":"cd12343d5a14d82b18b787991811b36f420719776336f8a65b45c32fd47a77d4","src/device/mod.rs":"fff41f92e1a9f6660e18dc30452d9911ca827701bb8303af2ae06f1c1e1a795f","src/device/queue.rs":"2ffc477d1bebb35a1fc8e46f4ca2c5ef50a4eb6034968f076062461b2e678699","src/device/resource.rs":"4f22cf27da8d829b624877d7d3bb10971a0e8fb7c4f95d85d5011049a010684a","src/device/trace.rs":"9deb1b083165e07253b4928ac2f564aba06f9089c3aca1c0a1d438d87d981542","src/error.rs":"e3b6b7a69877437f4e46af7f0e8ca1db1822beae7c8448db41c2bae0f64b2bb4","src/global.rs":"0966475959706650fd036a18d51441a8e14c3ef10107db617f597614ca47e50a","src/hal_api.rs":"1cd9c3fe1c9d8c3a24e3e7f963a2ef26e056a2b26d529b840dbc969090aaf201","src/hash_utils.rs":"e8d484027c7ce81978e4679a5e20af9416ab7d2fa595f1ca95992b29d625b0ca","src/hub.rs":"352a1b75d4535f24b06d16134421db98f910e6e719f50f863a204df6768e3369","src/id.rs":"c736c0b3d35cf620e2c01322d57c4938b42828b39948ecad82d39fc39c1093c1","src/identity.rs":"c6a719389d71bb11c9ceaeadb0496f8b4c6ad24e35597e12b40980ad7ad72f10","src/init_tracker/buffer.rs":"61eb9cfaa312135b7a937ff6a3117f531b5b7323fae6553a41d6de9bc106d7e0","src/init_tracker/mod.rs":"a0f64730cc025113b656b4690f9dcb0ec18b8770bc7ef24c7b4ad8bebae03d24","src/init_tracker/texture.rs":"030fd594bf9948fad391390d85c5e1fec7eaf67b6e812c60f2dd59bc4fda8fd5","src/instance.rs":"c9b5b53a0aeac8e117d49a3a007fab001cd5737e29dd75388cdbfc24f3d8df08","src/lib.rs":"49174591f8116c3b8fadb185f89ce69ae931ee6e9f639d2558848db82ea1651f","src/pipeline.rs":"300f58afc16c454ce52aabff6debd7a7db85ed627b111a8801bcb201827f110c","src/pool.rs":"778ea1c23fcfaaa5001606e686f712f606826039d60dd5a3cd26e7de91ac057a","src/present.rs":"86b1e8bd7314f77f083be6d89a2f734e92f2ed11c86eb4c912c754fcdaa2e597","src/registry.rs":"dbc9310a24a843cf6b94a4bab78b0bb5f325e18c1f3c19c94d4f12b4f29e8598","src/resource.rs":"cd568c9d1abd4bf740cb86efae7862b5478518f3b1cdaf792ae05b3c0920c8e0","src/snatch.rs":"29a1135ee09c06883eac4df6f45b7220c2ba8f89f34232ea1d270d6e7b05c7a8","src/storage.rs":"f0c41461b8f9cdc862dbd3de04c8e720ee416c7c57310696f6f4fd22183fcc85","src/track/buffer.rs":"65c27dfabe7a1c3e4ddbde7189e53b2e95f3f3663aa82b121801a2fd0dcbd304","src/track/metadata.rs":"ac82a9c69b0a141b5c3ca69b203c5aa2a17578b598cab3ae156b917cef734b97","src/track/mod.rs":"8f03955447544f3ebcb48547440a48d321ad1ff0e0c601a62623b5457763b8de","src/track/range.rs":"2a15794e79b0470d5ba6b3267173a42f34312878e1cb288f198d2854a7888e53","src/track/stateless.rs":"2da10160c46d07ad15986ba6f1356b7933806fc5c3fa5a9d8deea44d9a3c93a7","src/track/texture.rs":"15892e639f2ecbb13c8d34c29e3fd6ad719cb71e2d40c64910b552b8985ddab0","src/validation.rs":"613c58c3601f36d6aa5986cea01f30497c6bd4ceb990824904d101b2327941a9"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"4880d66b004519ca6e424fc9e2e6ac065536d36334a2e327b90422e97f2a2a35","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"a99478d7f63fb41429e3834f4d0e5cd333f94ba1834c68295f929170e16987de","src/any_surface.rs":"1c032bc1894a222a47f0116b976f1543c1140c0534678502ee1172d4f77fc515","src/binding_model.rs":"bb4aefad17957e770a5f70f00bf5853dc13da1d9f836493c9aa9adbbe7bb8147","src/command/bind.rs":"a37f042484b65d9fdea4cdab3667381623ee9a8943a6d32683d410b92736d306","src/command/bundle.rs":"fea00382acdf204bcb58522953335dd8f0092565693fa65d0c008e2698e39445","src/command/clear.rs":"03cfc0d4c689d56010391440ab279e615ef1d3235eb1f9f9df0323682d275109","src/command/compute.rs":"2b6beed328ed351ad6fe7088cfa1824c1bf4be50deaeab971cdcb09914d791de","src/command/draw.rs":"15f9ad857504d8098279f9c789317feba321c9b6b8f0de20b8ba98f358c99d89","src/command/memory_init.rs":"6ec93b9e2eb21edaa534e60770b4ba95735e9de61e74d827bc492df8e3639449","src/command/mod.rs":"1d347e1746194f7a07d1f75bd3a9d3cbe121fbaa479c25ba6b8c16e9d699e06b","src/command/query.rs":"43b78a163eb0eb5f1427b7a57b6d39a2748c25f880ba024c91e2f71e2a6a817d","src/command/render.rs":"808dc8106811b32877637851e63baeba7c7438748dec67cbb17ea93c58dc61bd","src/command/transfer.rs":"bf1077d1a99a258bad46087ae7234703627e7f4d30b38e6142d016c02deaad3a","src/conv.rs":"7e3ffe33b47a6fd3617aabf9f11cc68f1ccbee2c7343b8dbbcd0e8f3447e1ad8","src/device/any_device.rs":"65f47b58939b60f88f47861e65d5d45209492df8e73e7c1b60b3b459f510c09e","src/device/bgl.rs":"ec8bdd6e9b4cd50c25bed317275863d0c16bb6619f62ed85bf0464948010dfc1","src/device/global.rs":"ff90a9e3b261bedbec37ab1aed0bf23f1e50c5418da72184e2b175057ed18fce","src/device/life.rs":"3cacaaa74df04bb1285a36d70395b35cfa17059f8d6289b41e665ecbc64cb66a","src/device/mod.rs":"fff41f92e1a9f6660e18dc30452d9911ca827701bb8303af2ae06f1c1e1a795f","src/device/queue.rs":"da0aeebfd1d1c6e155dc89cebf75dfdb6ec18062f9512044ed7e0fef0bda2f74","src/device/resource.rs":"74d3180c12602133bee46925d3788ac510d2ad5ea141a2b46f6904f38549053b","src/device/trace.rs":"9deb1b083165e07253b4928ac2f564aba06f9089c3aca1c0a1d438d87d981542","src/error.rs":"e3b6b7a69877437f4e46af7f0e8ca1db1822beae7c8448db41c2bae0f64b2bb4","src/global.rs":"0966475959706650fd036a18d51441a8e14c3ef10107db617f597614ca47e50a","src/hal_api.rs":"1cd9c3fe1c9d8c3a24e3e7f963a2ef26e056a2b26d529b840dbc969090aaf201","src/hash_utils.rs":"e8d484027c7ce81978e4679a5e20af9416ab7d2fa595f1ca95992b29d625b0ca","src/hub.rs":"352a1b75d4535f24b06d16134421db98f910e6e719f50f863a204df6768e3369","src/id.rs":"9f67dbef5d7a416eb740281ecf8a94673f624da16f21ec33c425c11d9ed01e90","src/identity.rs":"12b820eb4b8bd7b226e15eec97d0f100a695f6b9be7acd79ad2421f2d0fe1985","src/init_tracker/buffer.rs":"61eb9cfaa312135b7a937ff6a3117f531b5b7323fae6553a41d6de9bc106d7e0","src/init_tracker/mod.rs":"a0f64730cc025113b656b4690f9dcb0ec18b8770bc7ef24c7b4ad8bebae03d24","src/init_tracker/texture.rs":"030fd594bf9948fad391390d85c5e1fec7eaf67b6e812c60f2dd59bc4fda8fd5","src/instance.rs":"b6de2a371ef3b43d3217102fe87e423dd1eb12da86b65f54b902d9eaa38b6b9f","src/lib.rs":"4ad9979442cf88557fb3b9f8d3b26c7b929a710c60cabcd1f51788917c95aecb","src/pipeline.rs":"89d88de4b8b8e1dd2bc834d101a1bdf34816ebcaa616dc795f604e9183a21cd0","src/pool.rs":"778ea1c23fcfaaa5001606e686f712f606826039d60dd5a3cd26e7de91ac057a","src/present.rs":"f69580ee0baf181162f9dd82b159596c738558d8abb60db93047effbe1436b2f","src/registry.rs":"913e651dc585ff12fe7659443c38d635a2904881e56cb7159c5ca72d45ae5800","src/resource.rs":"59731bc9a207d87b07b6db9c897e20d64be27c144bb8eb8ab2505807163acfc4","src/snatch.rs":"29a1135ee09c06883eac4df6f45b7220c2ba8f89f34232ea1d270d6e7b05c7a8","src/storage.rs":"f0c41461b8f9cdc862dbd3de04c8e720ee416c7c57310696f6f4fd22183fcc85","src/track/buffer.rs":"83a0cbb8026dbd651d32ea5a47f332f691afed1c5e6f14e78a4fe8aa25e2ad12","src/track/metadata.rs":"655985fdfdd1c7fe8220af98abadf33de7e8920b485e3dd27c28688c3dd2e47d","src/track/mod.rs":"52470a48de6b5dce55385e23ba7a3cbf512cc10cdf431a35aa42190e2fc4306d","src/track/range.rs":"2a15794e79b0470d5ba6b3267173a42f34312878e1cb288f198d2854a7888e53","src/track/stateless.rs":"305e0a493fb1cd0a325274c0757e99c19f9d14deaa8ca11ada41c1399a4ae5c4","src/track/texture.rs":"ba3e3814b341b5242548b55d77bef1d1d9e7d52d63784be98c51e342da7fefff","src/validation.rs":"026168ac4f23bc6a58a90c78fd3eb73485b3c1aad630ef43755604d1babade79"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/wgpu-core/Cargo.toml b/third_party/rust/wgpu-core/Cargo.toml
index f9692cf607..3d3b4dc80c 100644
--- a/third_party/rust/wgpu-core/Cargo.toml
+++ b/third_party/rust/wgpu-core/Cargo.toml
@@ -41,6 +41,7 @@ arrayvec = "0.7"
bit-vec = "0.6"
bitflags = "2"
codespan-reporting = "0.11"
+document-features = "0.2.8"
indexmap = "2"
log = "0.4"
once_cell = "1"
diff --git a/third_party/rust/wgpu-core/src/binding_model.rs b/third_party/rust/wgpu-core/src/binding_model.rs
index d7b54ad5a5..8689af2ac1 100644
--- a/third_party/rust/wgpu-core/src/binding_model.rs
+++ b/third_party/rust/wgpu-core/src/binding_model.rs
@@ -38,6 +38,8 @@ pub enum BindGroupLayoutEntryError {
ArrayUnsupported,
#[error("Multisampled binding with sample type `TextureSampleType::Float` must have filterable set to false.")]
SampleTypeFloatFilterableBindingMultisampled,
+ #[error("Multisampled texture binding view dimension must be 2d, got {0:?}")]
+ Non2DMultisampled(wgt::TextureViewDimension),
#[error(transparent)]
MissingFeatures(#[from] MissingFeatures),
#[error(transparent)]
@@ -219,7 +221,7 @@ pub enum BindingZone {
}
#[derive(Clone, Debug, Error)]
-#[error("Too many bindings of type {kind:?} in {zone}, limit is {limit}, count was {count}")]
+#[error("Too many bindings of type {kind:?} in {zone}, limit is {limit}, count was {count}. Check the limit `{}` passed to `Adapter::request_device`", .kind.to_config_str())]
pub struct BindingTypeMaxCountError {
pub kind: BindingTypeMaxCountErrorKind,
pub zone: BindingZone,
@@ -238,6 +240,28 @@ pub enum BindingTypeMaxCountErrorKind {
UniformBuffers,
}
+impl BindingTypeMaxCountErrorKind {
+ fn to_config_str(&self) -> &'static str {
+ match self {
+ BindingTypeMaxCountErrorKind::DynamicUniformBuffers => {
+ "max_dynamic_uniform_buffers_per_pipeline_layout"
+ }
+ BindingTypeMaxCountErrorKind::DynamicStorageBuffers => {
+ "max_dynamic_storage_buffers_per_pipeline_layout"
+ }
+ BindingTypeMaxCountErrorKind::SampledTextures => {
+ "max_sampled_textures_per_shader_stage"
+ }
+ BindingTypeMaxCountErrorKind::Samplers => "max_samplers_per_shader_stage",
+ BindingTypeMaxCountErrorKind::StorageBuffers => "max_storage_buffers_per_shader_stage",
+ BindingTypeMaxCountErrorKind::StorageTextures => {
+ "max_storage_textures_per_shader_stage"
+ }
+ BindingTypeMaxCountErrorKind::UniformBuffers => "max_uniform_buffers_per_shader_stage",
+ }
+ }
+}
+
#[derive(Debug, Default)]
pub(crate) struct PerStageBindingTypeCounter {
vertex: u32,
diff --git a/third_party/rust/wgpu-core/src/command/bundle.rs b/third_party/rust/wgpu-core/src/command/bundle.rs
index 9d80c62f85..ab2d18bc59 100644
--- a/third_party/rust/wgpu-core/src/command/bundle.rs
+++ b/third_party/rust/wgpu-core/src/command/bundle.rs
@@ -97,7 +97,7 @@ use crate::{
id,
init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction},
pipeline::{PipelineFlags, RenderPipeline, VertexStep},
- resource::{Resource, ResourceInfo, ResourceType},
+ resource::{Buffer, Resource, ResourceInfo, ResourceType},
resource_log,
track::RenderBundleScope,
validation::check_buffer_usage,
@@ -110,9 +110,11 @@ use thiserror::Error;
use hal::CommandEncoder as _;
+use super::ArcRenderCommand;
+
/// https://gpuweb.github.io/gpuweb/#dom-gpurendercommandsmixin-draw
-fn validate_draw(
- vertex: &[Option<VertexState>],
+fn validate_draw<A: HalApi>(
+ vertex: &[Option<VertexState<A>>],
step: &[VertexStep],
first_vertex: u32,
vertex_count: u32,
@@ -152,10 +154,10 @@ fn validate_draw(
}
// See https://gpuweb.github.io/gpuweb/#dom-gpurendercommandsmixin-drawindexed
-fn validate_indexed_draw(
- vertex: &[Option<VertexState>],
+fn validate_indexed_draw<A: HalApi>(
+ vertex: &[Option<VertexState<A>>],
step: &[VertexStep],
- index_state: &IndexState,
+ index_state: &IndexState<A>,
first_index: u32,
index_count: u32,
first_instance: u32,
@@ -260,6 +262,9 @@ impl RenderBundleEncoder {
None => (true, true),
};
+ // TODO: should be device.limits.max_color_attachments
+ let max_color_attachments = hal::MAX_COLOR_ATTACHMENTS;
+
//TODO: validate that attachment formats are renderable,
// have expected aspects, support multisampling.
Ok(Self {
@@ -267,11 +272,11 @@ impl RenderBundleEncoder {
parent_id,
context: RenderPassContext {
attachments: AttachmentData {
- colors: if desc.color_formats.len() > hal::MAX_COLOR_ATTACHMENTS {
+ colors: if desc.color_formats.len() > max_color_attachments {
return Err(CreateRenderBundleError::ColorAttachment(
ColorAttachmentError::TooMany {
given: desc.color_formats.len(),
- limit: hal::MAX_COLOR_ATTACHMENTS,
+ limit: max_color_attachments,
},
));
} else {
@@ -345,24 +350,44 @@ impl RenderBundleEncoder {
) -> Result<RenderBundle<A>, RenderBundleError> {
let bind_group_guard = hub.bind_groups.read();
let pipeline_guard = hub.render_pipelines.read();
- let query_set_guard = hub.query_sets.read();
let buffer_guard = hub.buffers.read();
- let texture_guard = hub.textures.read();
let mut state = State {
- trackers: RenderBundleScope::new(
- &*buffer_guard,
- &*texture_guard,
- &*bind_group_guard,
- &*pipeline_guard,
- &*query_set_guard,
- ),
+ trackers: RenderBundleScope::new(),
pipeline: None,
bind: (0..hal::MAX_BIND_GROUPS).map(|_| None).collect(),
vertex: (0..hal::MAX_VERTEX_BUFFERS).map(|_| None).collect(),
index: None,
flat_dynamic_offsets: Vec::new(),
};
+
+ let indices = &device.tracker_indices;
+ state
+ .trackers
+ .buffers
+ .write()
+ .set_size(indices.buffers.size());
+ state
+ .trackers
+ .textures
+ .write()
+ .set_size(indices.textures.size());
+ state
+ .trackers
+ .bind_groups
+ .write()
+ .set_size(indices.bind_groups.size());
+ state
+ .trackers
+ .render_pipelines
+ .write()
+ .set_size(indices.render_pipelines.size());
+ state
+ .trackers
+ .query_sets
+ .write()
+ .set_size(indices.query_sets.size());
+
let mut commands = Vec::new();
let mut buffer_memory_init_actions = Vec::new();
let mut texture_memory_init_actions = Vec::new();
@@ -399,7 +424,6 @@ impl RenderBundleEncoder {
}
// Identify the next `num_dynamic_offsets` entries from `base.dynamic_offsets`.
- let num_dynamic_offsets = num_dynamic_offsets;
let offsets_range =
next_dynamic_offset..next_dynamic_offset + num_dynamic_offsets;
next_dynamic_offset = offsets_range.end;
@@ -471,7 +495,7 @@ impl RenderBundleEncoder {
let pipeline_state = PipelineState::new(pipeline);
- commands.push(command);
+ commands.push(ArcRenderCommand::SetPipeline(pipeline.clone()));
// If this pipeline uses push constants, zero out their values.
if let Some(iter) = pipeline_state.zero_push_constants() {
@@ -496,7 +520,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::INDEX)
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDEX)
.map_pass_err(scope)?;
let end = match size {
@@ -508,7 +532,7 @@ impl RenderBundleEncoder {
offset..end,
MemoryInitKind::NeedsInitializedMemory,
));
- state.set_index_buffer(buffer_id, index_format, offset..end);
+ state.set_index_buffer(buffer.clone(), index_format, offset..end);
}
RenderCommand::SetVertexBuffer {
slot,
@@ -535,7 +559,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::VERTEX)
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::VERTEX)
.map_pass_err(scope)?;
let end = match size {
@@ -547,13 +571,13 @@ impl RenderBundleEncoder {
offset..end,
MemoryInitKind::NeedsInitializedMemory,
));
- state.vertex[slot as usize] = Some(VertexState::new(buffer_id, offset..end));
+ state.vertex[slot as usize] = Some(VertexState::new(buffer.clone(), offset..end));
}
RenderCommand::SetPushConstant {
stages,
offset,
size_bytes,
- values_offset: _,
+ values_offset,
} => {
let scope = PassErrorScope::SetPushConstant;
let end_offset = offset + size_bytes;
@@ -564,7 +588,7 @@ impl RenderBundleEncoder {
.validate_push_constant_ranges(stages, offset, end_offset)
.map_pass_err(scope)?;
- commands.push(command);
+ commands.push(ArcRenderCommand::SetPushConstant { stages, offset, size_bytes, values_offset });
}
RenderCommand::Draw {
vertex_count,
@@ -592,14 +616,19 @@ impl RenderBundleEncoder {
if instance_count > 0 && vertex_count > 0 {
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
- commands.push(command);
+ commands.push(ArcRenderCommand::Draw {
+ vertex_count,
+ instance_count,
+ first_vertex,
+ first_instance,
+ });
}
}
RenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
- base_vertex: _,
+ base_vertex,
first_instance,
} => {
let scope = PassErrorScope::Draw {
@@ -628,7 +657,7 @@ impl RenderBundleEncoder {
commands.extend(state.flush_index());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
- commands.push(command);
+ commands.push(ArcRenderCommand::DrawIndexed { index_count, instance_count, first_index, base_vertex, first_instance });
}
}
RenderCommand::MultiDrawIndirect {
@@ -657,7 +686,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT)
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action(
@@ -668,7 +697,7 @@ impl RenderBundleEncoder {
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
- commands.push(command);
+ commands.push(ArcRenderCommand::MultiDrawIndirect { buffer: buffer.clone(), offset, count: None, indexed: false });
}
RenderCommand::MultiDrawIndirect {
buffer_id,
@@ -696,7 +725,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT)
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action(
@@ -713,7 +742,7 @@ impl RenderBundleEncoder {
commands.extend(index.flush());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
- commands.push(command);
+ commands.push(ArcRenderCommand::MultiDrawIndirect { buffer: buffer.clone(), offset, count: None, indexed: true });
}
RenderCommand::MultiDrawIndirect { .. }
| RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(),
@@ -748,7 +777,10 @@ impl RenderBundleEncoder {
buffer_memory_init_actions,
texture_memory_init_actions,
context: self.context,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(device.tracker_indices.bundles.clone()),
+ ),
discard_hal_labels: device
.instance_flags
.contains(wgt::InstanceFlags::DISCARD_HAL_LABELS),
@@ -824,7 +856,7 @@ pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
pub struct RenderBundle<A: HalApi> {
// Normalized command stream. It can be executed verbatim,
// without re-binding anything on the pipeline change.
- base: BasePass<RenderCommand>,
+ base: BasePass<ArcRenderCommand<A>>,
pub(super) is_depth_read_only: bool,
pub(super) is_stencil_read_only: bool,
pub(crate) device: Arc<Device<A>>,
@@ -863,7 +895,6 @@ impl<A: HalApi> RenderBundle<A> {
/// All the validation has already been done by this point.
/// The only failure condition is if some of the used buffers are destroyed.
pub(super) unsafe fn execute(&self, raw: &mut A::CommandEncoder) -> Result<(), ExecutionError> {
- let trackers = &self.used;
let mut offsets = self.base.dynamic_offsets.as_slice();
let mut pipeline_layout = None::<Arc<PipelineLayout<A>>>;
if !self.discard_hal_labels {
@@ -874,74 +905,65 @@ impl<A: HalApi> RenderBundle<A> {
let snatch_guard = self.device.snatchable_lock.read();
+ use ArcRenderCommand as Cmd;
for command in self.base.commands.iter() {
- match *command {
- RenderCommand::SetBindGroup {
+ match command {
+ Cmd::SetBindGroup {
index,
num_dynamic_offsets,
- bind_group_id,
+ bind_group,
} => {
- let bind_groups = trackers.bind_groups.read();
- let bind_group = bind_groups.get(bind_group_id).unwrap();
let raw_bg = bind_group
.raw(&snatch_guard)
- .ok_or(ExecutionError::InvalidBindGroup(bind_group_id))?;
+ .ok_or(ExecutionError::InvalidBindGroup(bind_group.info.id()))?;
unsafe {
raw.set_bind_group(
pipeline_layout.as_ref().unwrap().raw(),
- index,
+ *index,
raw_bg,
- &offsets[..num_dynamic_offsets],
+ &offsets[..*num_dynamic_offsets],
)
};
- offsets = &offsets[num_dynamic_offsets..];
+ offsets = &offsets[*num_dynamic_offsets..];
}
- RenderCommand::SetPipeline(pipeline_id) => {
- let render_pipelines = trackers.render_pipelines.read();
- let pipeline = render_pipelines.get(pipeline_id).unwrap();
+ Cmd::SetPipeline(pipeline) => {
unsafe { raw.set_render_pipeline(pipeline.raw()) };
pipeline_layout = Some(pipeline.layout.clone());
}
- RenderCommand::SetIndexBuffer {
- buffer_id,
+ Cmd::SetIndexBuffer {
+ buffer,
index_format,
offset,
size,
} => {
- let buffers = trackers.buffers.read();
- let buffer: &A::Buffer = buffers
- .get(buffer_id)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
+ let buffer: &A::Buffer = buffer
.raw(&snatch_guard)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
+ .ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let bb = hal::BufferBinding {
buffer,
- offset,
- size,
+ offset: *offset,
+ size: *size,
};
- unsafe { raw.set_index_buffer(bb, index_format) };
+ unsafe { raw.set_index_buffer(bb, *index_format) };
}
- RenderCommand::SetVertexBuffer {
+ Cmd::SetVertexBuffer {
slot,
- buffer_id,
+ buffer,
offset,
size,
} => {
- let buffers = trackers.buffers.read();
- let buffer = buffers
- .get(buffer_id)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
+ let buffer = buffer
.raw(&snatch_guard)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
+ .ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let bb = hal::BufferBinding {
buffer,
- offset,
- size,
+ offset: *offset,
+ size: *size,
};
- unsafe { raw.set_vertex_buffer(slot, bb) };
+ unsafe { raw.set_vertex_buffer(*slot, bb) };
}
- RenderCommand::SetPushConstant {
+ Cmd::SetPushConstant {
stages,
offset,
size_bytes,
@@ -949,7 +971,7 @@ impl<A: HalApi> RenderBundle<A> {
} => {
let pipeline_layout = pipeline_layout.as_ref().unwrap();
- if let Some(values_offset) = values_offset {
+ if let Some(values_offset) = *values_offset {
let values_end_offset =
(values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
let data_slice = &self.base.push_constant_data
@@ -958,20 +980,20 @@ impl<A: HalApi> RenderBundle<A> {
unsafe {
raw.set_push_constants(
pipeline_layout.raw(),
- stages,
- offset,
+ *stages,
+ *offset,
data_slice,
)
}
} else {
super::push_constant_clear(
- offset,
- size_bytes,
+ *offset,
+ *size_bytes,
|clear_offset, clear_data| {
unsafe {
raw.set_push_constants(
pipeline_layout.raw(),
- stages,
+ *stages,
clear_offset,
clear_data,
)
@@ -980,15 +1002,22 @@ impl<A: HalApi> RenderBundle<A> {
);
}
}
- RenderCommand::Draw {
+ Cmd::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
} => {
- unsafe { raw.draw(first_vertex, vertex_count, first_instance, instance_count) };
+ unsafe {
+ raw.draw(
+ *first_vertex,
+ *vertex_count,
+ *first_instance,
+ *instance_count,
+ )
+ };
}
- RenderCommand::DrawIndexed {
+ Cmd::DrawIndexed {
index_count,
instance_count,
first_index,
@@ -997,63 +1026,54 @@ impl<A: HalApi> RenderBundle<A> {
} => {
unsafe {
raw.draw_indexed(
- first_index,
- index_count,
- base_vertex,
- first_instance,
- instance_count,
+ *first_index,
+ *index_count,
+ *base_vertex,
+ *first_instance,
+ *instance_count,
)
};
}
- RenderCommand::MultiDrawIndirect {
- buffer_id,
+ Cmd::MultiDrawIndirect {
+ buffer,
offset,
count: None,
indexed: false,
} => {
- let buffers = trackers.buffers.read();
- let buffer = buffers
- .get(buffer_id)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
+ let buffer = buffer
.raw(&snatch_guard)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
- unsafe { raw.draw_indirect(buffer, offset, 1) };
+ .ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
+ unsafe { raw.draw_indirect(buffer, *offset, 1) };
}
- RenderCommand::MultiDrawIndirect {
- buffer_id,
+ Cmd::MultiDrawIndirect {
+ buffer,
offset,
count: None,
indexed: true,
} => {
- let buffers = trackers.buffers.read();
- let buffer = buffers
- .get(buffer_id)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
+ let buffer = buffer
.raw(&snatch_guard)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
- unsafe { raw.draw_indexed_indirect(buffer, offset, 1) };
+ .ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
+ unsafe { raw.draw_indexed_indirect(buffer, *offset, 1) };
}
- RenderCommand::MultiDrawIndirect { .. }
- | RenderCommand::MultiDrawIndirectCount { .. } => {
+ Cmd::MultiDrawIndirect { .. } | Cmd::MultiDrawIndirectCount { .. } => {
return Err(ExecutionError::Unimplemented("multi-draw-indirect"))
}
- RenderCommand::PushDebugGroup { .. }
- | RenderCommand::InsertDebugMarker { .. }
- | RenderCommand::PopDebugGroup => {
+ Cmd::PushDebugGroup { .. } | Cmd::InsertDebugMarker { .. } | Cmd::PopDebugGroup => {
return Err(ExecutionError::Unimplemented("debug-markers"))
}
- RenderCommand::WriteTimestamp { .. }
- | RenderCommand::BeginOcclusionQuery { .. }
- | RenderCommand::EndOcclusionQuery
- | RenderCommand::BeginPipelineStatisticsQuery { .. }
- | RenderCommand::EndPipelineStatisticsQuery => {
+ Cmd::WriteTimestamp { .. }
+ | Cmd::BeginOcclusionQuery { .. }
+ | Cmd::EndOcclusionQuery
+ | Cmd::BeginPipelineStatisticsQuery { .. }
+ | Cmd::EndPipelineStatisticsQuery => {
return Err(ExecutionError::Unimplemented("queries"))
}
- RenderCommand::ExecuteBundle(_)
- | RenderCommand::SetBlendConstant(_)
- | RenderCommand::SetStencilReference(_)
- | RenderCommand::SetViewport { .. }
- | RenderCommand::SetScissor(_) => unreachable!(),
+ Cmd::ExecuteBundle(_)
+ | Cmd::SetBlendConstant(_)
+ | Cmd::SetStencilReference(_)
+ | Cmd::SetViewport { .. }
+ | Cmd::SetScissor(_) => unreachable!(),
}
}
@@ -1087,14 +1107,14 @@ impl<A: HalApi> Resource for RenderBundle<A> {
/// and calls [`State::flush_index`] before any indexed draw command to produce
/// a `SetIndexBuffer` command if one is necessary.
#[derive(Debug)]
-struct IndexState {
- buffer: id::BufferId,
+struct IndexState<A: HalApi> {
+ buffer: Arc<Buffer<A>>,
format: wgt::IndexFormat,
range: Range<wgt::BufferAddress>,
is_dirty: bool,
}
-impl IndexState {
+impl<A: HalApi> IndexState<A> {
/// Return the number of entries in the current index buffer.
///
/// Panic if no index buffer has been set.
@@ -1109,11 +1129,11 @@ impl IndexState {
/// Generate a `SetIndexBuffer` command to prepare for an indexed draw
/// command, if needed.
- fn flush(&mut self) -> Option<RenderCommand> {
+ fn flush(&mut self) -> Option<ArcRenderCommand<A>> {
if self.is_dirty {
self.is_dirty = false;
- Some(RenderCommand::SetIndexBuffer {
- buffer_id: self.buffer,
+ Some(ArcRenderCommand::SetIndexBuffer {
+ buffer: self.buffer.clone(),
index_format: self.format,
offset: self.range.start,
size: wgt::BufferSize::new(self.range.end - self.range.start),
@@ -1134,14 +1154,14 @@ impl IndexState {
///
/// [`flush`]: IndexState::flush
#[derive(Debug)]
-struct VertexState {
- buffer: id::BufferId,
+struct VertexState<A: HalApi> {
+ buffer: Arc<Buffer<A>>,
range: Range<wgt::BufferAddress>,
is_dirty: bool,
}
-impl VertexState {
- fn new(buffer: id::BufferId, range: Range<wgt::BufferAddress>) -> Self {
+impl<A: HalApi> VertexState<A> {
+ fn new(buffer: Arc<Buffer<A>>, range: Range<wgt::BufferAddress>) -> Self {
Self {
buffer,
range,
@@ -1152,12 +1172,12 @@ impl VertexState {
/// Generate a `SetVertexBuffer` command for this slot, if necessary.
///
/// `slot` is the index of the vertex buffer slot that `self` tracks.
- fn flush(&mut self, slot: u32) -> Option<RenderCommand> {
+ fn flush(&mut self, slot: u32) -> Option<ArcRenderCommand<A>> {
if self.is_dirty {
self.is_dirty = false;
- Some(RenderCommand::SetVertexBuffer {
+ Some(ArcRenderCommand::SetVertexBuffer {
slot,
- buffer_id: self.buffer,
+ buffer: self.buffer.clone(),
offset: self.range.start,
size: wgt::BufferSize::new(self.range.end - self.range.start),
})
@@ -1219,7 +1239,7 @@ impl<A: HalApi> PipelineState<A> {
/// Return a sequence of commands to zero the push constant ranges this
/// pipeline uses. If no initialization is necessary, return `None`.
- fn zero_push_constants(&self) -> Option<impl Iterator<Item = RenderCommand>> {
+ fn zero_push_constants(&self) -> Option<impl Iterator<Item = ArcRenderCommand<A>>> {
if !self.push_constant_ranges.is_empty() {
let nonoverlapping_ranges =
super::bind::compute_nonoverlapping_ranges(&self.push_constant_ranges);
@@ -1227,7 +1247,7 @@ impl<A: HalApi> PipelineState<A> {
Some(
nonoverlapping_ranges
.into_iter()
- .map(|range| RenderCommand::SetPushConstant {
+ .map(|range| ArcRenderCommand::SetPushConstant {
stages: range.stages,
offset: range.range.start,
size_bytes: range.range.end - range.range.start,
@@ -1261,11 +1281,11 @@ struct State<A: HalApi> {
bind: ArrayVec<Option<BindState<A>>, { hal::MAX_BIND_GROUPS }>,
/// The state of each vertex buffer slot.
- vertex: ArrayVec<Option<VertexState>, { hal::MAX_VERTEX_BUFFERS }>,
+ vertex: ArrayVec<Option<VertexState<A>>, { hal::MAX_VERTEX_BUFFERS }>,
/// The current index buffer, if one has been set. We flush this state
/// before indexed draw commands.
- index: Option<IndexState>,
+ index: Option<IndexState<A>>,
/// Dynamic offset values used by the cleaned-up command sequence.
///
@@ -1375,13 +1395,13 @@ impl<A: HalApi> State<A> {
/// Set the bundle's current index buffer and its associated parameters.
fn set_index_buffer(
&mut self,
- buffer: id::BufferId,
+ buffer: Arc<Buffer<A>>,
format: wgt::IndexFormat,
range: Range<wgt::BufferAddress>,
) {
match self.index {
Some(ref current)
- if current.buffer == buffer
+ if Arc::ptr_eq(&current.buffer, &buffer)
&& current.format == format
&& current.range == range =>
{
@@ -1400,11 +1420,11 @@ impl<A: HalApi> State<A> {
/// Generate a `SetIndexBuffer` command to prepare for an indexed draw
/// command, if needed.
- fn flush_index(&mut self) -> Option<RenderCommand> {
+ fn flush_index(&mut self) -> Option<ArcRenderCommand<A>> {
self.index.as_mut().and_then(|index| index.flush())
}
- fn flush_vertices(&mut self) -> impl Iterator<Item = RenderCommand> + '_ {
+ fn flush_vertices(&mut self) -> impl Iterator<Item = ArcRenderCommand<A>> + '_ {
self.vertex
.iter_mut()
.enumerate()
@@ -1416,7 +1436,7 @@ impl<A: HalApi> State<A> {
&mut self,
used_bind_groups: usize,
dynamic_offsets: &[wgt::DynamicOffset],
- ) -> impl Iterator<Item = RenderCommand> + '_ {
+ ) -> impl Iterator<Item = ArcRenderCommand<A>> + '_ {
// Append each dirty bind group's dynamic offsets to `flat_dynamic_offsets`.
for contents in self.bind[..used_bind_groups].iter().flatten() {
if contents.is_dirty {
@@ -1435,9 +1455,9 @@ impl<A: HalApi> State<A> {
if contents.is_dirty {
contents.is_dirty = false;
let offsets = &contents.dynamic_offsets;
- return Some(RenderCommand::SetBindGroup {
+ return Some(ArcRenderCommand::SetBindGroup {
index: i.try_into().unwrap(),
- bind_group_id: contents.bind_group.as_info().id(),
+ bind_group: contents.bind_group.clone(),
num_dynamic_offsets: offsets.end - offsets.start,
});
}
diff --git a/third_party/rust/wgpu-core/src/command/clear.rs b/third_party/rust/wgpu-core/src/command/clear.rs
index 2569fea1a4..e404fabb14 100644
--- a/third_party/rust/wgpu-core/src/command/clear.rs
+++ b/third_party/rust/wgpu-core/src/command/clear.rs
@@ -39,6 +39,11 @@ pub enum ClearError {
UnalignedFillSize(BufferAddress),
#[error("Buffer offset {0:?} is not a multiple of `COPY_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(BufferAddress),
+ #[error("Clear starts at offset {start_offset} with size of {requested_size}, but these added together exceed `u64::MAX`")]
+ OffsetPlusSizeExceeds64BitBounds {
+ start_offset: BufferAddress,
+ requested_size: BufferAddress,
+ },
#[error("Clear of {start_offset}..{end_offset} would end up overrunning the bounds of the buffer of size {buffer_size}")]
BufferOverrun {
start_offset: BufferAddress,
@@ -117,25 +122,27 @@ impl Global {
if offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(ClearError::UnalignedBufferOffset(offset));
}
- if let Some(size) = size {
- if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
- return Err(ClearError::UnalignedFillSize(size));
- }
- let destination_end_offset = offset + size;
- if destination_end_offset > dst_buffer.size {
- return Err(ClearError::BufferOverrun {
+
+ let size = size.unwrap_or(dst_buffer.size.saturating_sub(offset));
+ if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ return Err(ClearError::UnalignedFillSize(size));
+ }
+ let end_offset =
+ offset
+ .checked_add(size)
+ .ok_or(ClearError::OffsetPlusSizeExceeds64BitBounds {
start_offset: offset,
- end_offset: destination_end_offset,
- buffer_size: dst_buffer.size,
- });
- }
+ requested_size: size,
+ })?;
+ if end_offset > dst_buffer.size {
+ return Err(ClearError::BufferOverrun {
+ start_offset: offset,
+ end_offset,
+ buffer_size: dst_buffer.size,
+ });
}
- let end = match size {
- Some(size) => offset + size,
- None => dst_buffer.size,
- };
- if offset == end {
+ if offset == end_offset {
log::trace!("Ignoring fill_buffer of size 0");
return Ok(());
}
@@ -144,7 +151,7 @@ impl Global {
cmd_buf_data.buffer_memory_init_actions.extend(
dst_buffer.initialization_status.read().create_action(
&dst_buffer,
- offset..end,
+ offset..end_offset,
MemoryInitKind::ImplicitlyInitialized,
),
);
@@ -154,7 +161,7 @@ impl Global {
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
unsafe {
cmd_buf_raw.transition_buffers(dst_barrier.into_iter());
- cmd_buf_raw.clear_buffer(dst_raw, offset..end);
+ cmd_buf_raw.clear_buffer(dst_raw, offset..end_offset);
}
Ok(())
}
@@ -366,7 +373,7 @@ fn clear_texture_via_buffer_copies<A: HalApi>(
assert!(
max_rows_per_copy > 0,
"Zero buffer size is too small to fill a single row \
- of a texture with format {:?} and desc {:?}",
+ of a texture with format {:?} and desc {:?}",
texture_desc.format,
texture_desc.size
);
diff --git a/third_party/rust/wgpu-core/src/command/compute.rs b/third_party/rust/wgpu-core/src/command/compute.rs
index 804186a01e..c2fd3ab397 100644
--- a/third_party/rust/wgpu-core/src/command/compute.rs
+++ b/third_party/rust/wgpu-core/src/command/compute.rs
@@ -1,6 +1,7 @@
use crate::device::DeviceError;
use crate::resource::Resource;
use crate::snatch::SnatchGuard;
+use crate::track::TrackerIndex;
use crate::{
binding_model::{
BindError, BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError,
@@ -305,7 +306,7 @@ impl<A: HalApi> State<A> {
raw_encoder: &mut A::CommandEncoder,
base_trackers: &mut Tracker<A>,
bind_group_guard: &Storage<BindGroup<A>>,
- indirect_buffer: Option<id::BufferId>,
+ indirect_buffer: Option<TrackerIndex>,
snatch_guard: &SnatchGuard,
) -> Result<(), UsageConflict> {
for id in self.binder.list_active() {
@@ -402,12 +403,11 @@ impl Global {
let pipeline_guard = hub.compute_pipelines.read();
let query_set_guard = hub.query_sets.read();
let buffer_guard = hub.buffers.read();
- let texture_guard = hub.textures.read();
let mut state = State {
binder: Binder::new(),
pipeline: None,
- scope: UsageScope::new(&*buffer_guard, &*texture_guard),
+ scope: UsageScope::new(&device.tracker_indices),
debug_scope_depth: 0,
};
let mut temp_offsets = Vec::new();
@@ -452,17 +452,14 @@ impl Global {
let snatch_guard = device.snatchable_lock.read();
- tracker.set_size(
- Some(&*buffer_guard),
- Some(&*texture_guard),
- None,
- None,
- Some(&*bind_group_guard),
- Some(&*pipeline_guard),
- None,
- None,
- Some(&*query_set_guard),
- );
+ let indices = &device.tracker_indices;
+ tracker.buffers.set_size(indices.buffers.size());
+ tracker.textures.set_size(indices.textures.size());
+ tracker.bind_groups.set_size(indices.bind_groups.size());
+ tracker
+ .compute_pipelines
+ .set_size(indices.compute_pipelines.size());
+ tracker.query_sets.set_size(indices.query_sets.size());
let discard_hal_labels = self
.instance
@@ -719,8 +716,12 @@ impl Global {
.buffers
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
- check_buffer_usage(indirect_buffer.usage, wgt::BufferUsages::INDIRECT)
- .map_pass_err(scope)?;
+ check_buffer_usage(
+ buffer_id,
+ indirect_buffer.usage,
+ wgt::BufferUsages::INDIRECT,
+ )
+ .map_pass_err(scope)?;
let end_offset = offset + mem::size_of::<wgt::DispatchIndirectArgs>() as u64;
if end_offset > indirect_buffer.size {
@@ -753,7 +754,7 @@ impl Global {
raw,
&mut intermediate_trackers,
&*bind_group_guard,
- Some(buffer_id),
+ Some(indirect_buffer.as_info().tracker_index()),
&snatch_guard,
)
.map_pass_err(scope)?;
diff --git a/third_party/rust/wgpu-core/src/command/draw.rs b/third_party/rust/wgpu-core/src/command/draw.rs
index e03a78ee93..98aa689b78 100644
--- a/third_party/rust/wgpu-core/src/command/draw.rs
+++ b/third_party/rust/wgpu-core/src/command/draw.rs
@@ -2,17 +2,22 @@
!*/
use crate::{
- binding_model::{LateMinBufferBindingSizeMismatch, PushConstantUploadError},
+ binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError},
error::ErrorFormatter,
+ hal_api::HalApi,
id,
+ pipeline::RenderPipeline,
+ resource::{Buffer, QuerySet},
track::UsageConflict,
validation::{MissingBufferUsageError, MissingTextureUsageError},
};
use wgt::{BufferAddress, BufferSize, Color, VertexStepMode};
-use std::num::NonZeroU32;
+use std::{num::NonZeroU32, sync::Arc};
use thiserror::Error;
+use super::RenderBundle;
+
/// Error validating a draw call.
#[derive(Clone, Debug, Error, Eq, PartialEq)]
#[non_exhaustive]
@@ -245,3 +250,114 @@ pub enum RenderCommand {
EndPipelineStatisticsQuery,
ExecuteBundle(id::RenderBundleId),
}
+
+/// Equivalent to `RenderCommand` with the Ids resolved into resource Arcs.
+#[doc(hidden)]
+#[derive(Clone, Debug)]
+pub enum ArcRenderCommand<A: HalApi> {
+ SetBindGroup {
+ index: u32,
+ num_dynamic_offsets: usize,
+ bind_group: Arc<BindGroup<A>>,
+ },
+ SetPipeline(Arc<RenderPipeline<A>>),
+ SetIndexBuffer {
+ buffer: Arc<Buffer<A>>,
+ index_format: wgt::IndexFormat,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ },
+ SetVertexBuffer {
+ slot: u32,
+ buffer: Arc<Buffer<A>>,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ },
+ SetBlendConstant(Color),
+ SetStencilReference(u32),
+ SetViewport {
+ rect: Rect<f32>,
+ depth_min: f32,
+ depth_max: f32,
+ },
+ SetScissor(Rect<u32>),
+
+ /// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
+ ///
+ /// See [`wgpu::RenderPass::set_push_constants`] for a detailed explanation
+ /// of the restrictions these commands must satisfy.
+ SetPushConstant {
+ /// Which stages we are setting push constant values for.
+ stages: wgt::ShaderStages,
+
+ /// The byte offset within the push constant storage to write to. This
+ /// must be a multiple of four.
+ offset: u32,
+
+ /// The number of bytes to write. This must be a multiple of four.
+ size_bytes: u32,
+
+ /// Index in [`BasePass::push_constant_data`] of the start of the data
+ /// to be written.
+ ///
+ /// Note: this is not a byte offset like `offset`. Rather, it is the
+ /// index of the first `u32` element in `push_constant_data` to read.
+ ///
+ /// `None` means zeros should be written to the destination range, and
+ /// there is no corresponding data in `push_constant_data`. This is used
+ /// by render bundles, which explicitly clear out any state that
+ /// post-bundle code might see.
+ values_offset: Option<u32>,
+ },
+ Draw {
+ vertex_count: u32,
+ instance_count: u32,
+ first_vertex: u32,
+ first_instance: u32,
+ },
+ DrawIndexed {
+ index_count: u32,
+ instance_count: u32,
+ first_index: u32,
+ base_vertex: i32,
+ first_instance: u32,
+ },
+ MultiDrawIndirect {
+ buffer: Arc<Buffer<A>>,
+ offset: BufferAddress,
+ /// Count of `None` represents a non-multi call.
+ count: Option<NonZeroU32>,
+ indexed: bool,
+ },
+ MultiDrawIndirectCount {
+ buffer: Arc<Buffer<A>>,
+ offset: BufferAddress,
+ count_buffer: Arc<Buffer<A>>,
+ count_buffer_offset: BufferAddress,
+ max_count: u32,
+ indexed: bool,
+ },
+ PushDebugGroup {
+ color: u32,
+ len: usize,
+ },
+ PopDebugGroup,
+ InsertDebugMarker {
+ color: u32,
+ len: usize,
+ },
+ WriteTimestamp {
+ query_set: Arc<QuerySet<A>>,
+ query_index: u32,
+ },
+ BeginOcclusionQuery {
+ query_index: u32,
+ },
+ EndOcclusionQuery,
+ BeginPipelineStatisticsQuery {
+ query_set: Arc<QuerySet<A>>,
+ query_index: u32,
+ },
+ EndPipelineStatisticsQuery,
+ ExecuteBundle(Arc<RenderBundle<A>>),
+}
diff --git a/third_party/rust/wgpu-core/src/command/mod.rs b/third_party/rust/wgpu-core/src/command/mod.rs
index 2d5fca200a..febed4fc97 100644
--- a/third_party/rust/wgpu-core/src/command/mod.rs
+++ b/third_party/rust/wgpu-core/src/command/mod.rs
@@ -75,7 +75,7 @@ impl<A: HalApi> CommandEncoder<A> {
Ok(())
}
- fn discard(&mut self) {
+ pub(crate) fn discard(&mut self) {
if self.is_open {
self.is_open = false;
unsafe { self.raw.discard_encoding() };
@@ -112,7 +112,7 @@ pub(crate) struct DestroyedBufferError(pub id::BufferId);
pub(crate) struct DestroyedTextureError(pub id::TextureId);
pub struct CommandBufferMutable<A: HalApi> {
- encoder: CommandEncoder<A>,
+ pub(crate) encoder: CommandEncoder<A>,
status: CommandEncoderStatus,
pub(crate) trackers: Tracker<A>,
buffer_memory_init_actions: Vec<BufferInitTrackerAction<A>>,
@@ -174,6 +174,7 @@ impl<A: HalApi> CommandBuffer<A> {
.as_ref()
.unwrap_or(&String::from("<CommandBuffer>"))
.as_str(),
+ None,
),
data: Mutex::new(Some(CommandBufferMutable {
encoder: CommandEncoder {
diff --git a/third_party/rust/wgpu-core/src/command/query.rs b/third_party/rust/wgpu-core/src/command/query.rs
index 39d7a9cc93..89cba6fbf3 100644
--- a/third_party/rust/wgpu-core/src/command/query.rs
+++ b/third_party/rust/wgpu-core/src/command/query.rs
@@ -4,7 +4,7 @@ use hal::CommandEncoder as _;
use crate::device::trace::Command as TraceCommand;
use crate::{
command::{CommandBuffer, CommandEncoderError},
- device::DeviceError,
+ device::{DeviceError, MissingFeatures},
global::Global,
hal_api::HalApi,
id::{self, Id},
@@ -108,6 +108,8 @@ pub enum QueryError {
Device(#[from] DeviceError),
#[error(transparent)]
Encoder(#[from] CommandEncoderError),
+ #[error(transparent)]
+ MissingFeature(#[from] MissingFeatures),
#[error("Error encountered while trying to use queries")]
Use(#[from] QueryUseError),
#[error("Error encountered while trying to resolve a query")]
@@ -355,6 +357,11 @@ impl Global {
let hub = A::hub(self);
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?;
+
+ cmd_buf
+ .device
+ .require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?;
+
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
diff --git a/third_party/rust/wgpu-core/src/command/render.rs b/third_party/rust/wgpu-core/src/command/render.rs
index d3de3e26e1..9141ddb021 100644
--- a/third_party/rust/wgpu-core/src/command/render.rs
+++ b/third_party/rust/wgpu-core/src/command/render.rs
@@ -22,7 +22,7 @@ use crate::{
hal_label, id,
init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction},
pipeline::{self, PipelineFlags},
- resource::{Buffer, QuerySet, Texture, TextureView, TextureViewNotRenderableReason},
+ resource::{QuerySet, Texture, TextureView, TextureViewNotRenderableReason},
storage::Storage,
track::{TextureSelector, Tracker, UsageConflict, UsageScope},
validation::{
@@ -531,6 +531,8 @@ pub enum ColorAttachmentError {
InvalidFormat(wgt::TextureFormat),
#[error("The number of color attachments {given} exceeds the limit {limit}")]
TooMany { given: usize, limit: usize },
+ #[error("The total number of bytes per sample in color attachments {total} exceeds the limit {limit}")]
+ TooManyBytesPerSample { total: u32, limit: u32 },
}
/// Error encountered when performing a render pass.
@@ -799,8 +801,6 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
texture_memory_actions: &mut CommandBufferTextureMemoryActions<A>,
pending_query_resets: &mut QueryResetMap<A>,
view_guard: &'a Storage<TextureView<A>>,
- buffer_guard: &'a Storage<Buffer<A>>,
- texture_guard: &'a Storage<Texture<A>>,
query_set_guard: &'a Storage<QuerySet<A>>,
snatch_guard: &SnatchGuard<'a>,
) -> Result<Self, RenderPassErrorInner> {
@@ -1214,7 +1214,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
Ok(Self {
context,
- usage_scope: UsageScope::new(buffer_guard, texture_guard),
+ usage_scope: UsageScope::new(&device.tracker_indices),
render_attachments,
is_depth_read_only,
is_stencil_read_only,
@@ -1386,7 +1386,6 @@ impl Global {
let render_pipeline_guard = hub.render_pipelines.read();
let query_set_guard = hub.query_sets.read();
let buffer_guard = hub.buffers.read();
- let texture_guard = hub.textures.read();
let view_guard = hub.texture_views.read();
log::trace!(
@@ -1406,24 +1405,21 @@ impl Global {
texture_memory_actions,
pending_query_resets,
&*view_guard,
- &*buffer_guard,
- &*texture_guard,
&*query_set_guard,
&snatch_guard,
)
.map_pass_err(pass_scope)?;
- tracker.set_size(
- Some(&*buffer_guard),
- Some(&*texture_guard),
- Some(&*view_guard),
- None,
- Some(&*bind_group_guard),
- None,
- Some(&*render_pipeline_guard),
- Some(&*bundle_guard),
- Some(&*query_set_guard),
- );
+ let indices = &device.tracker_indices;
+ tracker.buffers.set_size(indices.buffers.size());
+ tracker.textures.set_size(indices.textures.size());
+ tracker.views.set_size(indices.texture_views.size());
+ tracker.bind_groups.set_size(indices.bind_groups.size());
+ tracker
+ .render_pipelines
+ .set_size(indices.render_pipelines.size());
+ tracker.bundles.set_size(indices.bundles.size());
+ tracker.query_sets.set_size(indices.query_sets.size());
let raw = &mut encoder.raw;
@@ -1675,7 +1671,7 @@ impl Global {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
- check_buffer_usage(buffer.usage, BufferUsages::INDEX)
+ check_buffer_usage(buffer_id, buffer.usage, BufferUsages::INDEX)
.map_pass_err(scope)?;
let buf_raw = buffer
.raw
@@ -1737,7 +1733,7 @@ impl Global {
.map_pass_err(scope);
}
- check_buffer_usage(buffer.usage, BufferUsages::VERTEX)
+ check_buffer_usage(buffer_id, buffer.usage, BufferUsages::VERTEX)
.map_pass_err(scope)?;
let buf_raw = buffer
.raw
@@ -2034,8 +2030,12 @@ impl Global {
.buffers
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
- check_buffer_usage(indirect_buffer.usage, BufferUsages::INDIRECT)
- .map_pass_err(scope)?;
+ check_buffer_usage(
+ buffer_id,
+ indirect_buffer.usage,
+ BufferUsages::INDIRECT,
+ )
+ .map_pass_err(scope)?;
let indirect_raw = indirect_buffer
.raw
.get(&snatch_guard)
@@ -2106,8 +2106,12 @@ impl Global {
.buffers
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
- check_buffer_usage(indirect_buffer.usage, BufferUsages::INDIRECT)
- .map_pass_err(scope)?;
+ check_buffer_usage(
+ buffer_id,
+ indirect_buffer.usage,
+ BufferUsages::INDIRECT,
+ )
+ .map_pass_err(scope)?;
let indirect_raw = indirect_buffer
.raw
.get(&snatch_guard)
@@ -2123,7 +2127,7 @@ impl Global {
hal::BufferUses::INDIRECT,
)
.map_pass_err(scope)?;
- check_buffer_usage(count_buffer.usage, BufferUsages::INDIRECT)
+ check_buffer_usage(buffer_id, count_buffer.usage, BufferUsages::INDIRECT)
.map_pass_err(scope)?;
let count_raw = count_buffer
.raw
diff --git a/third_party/rust/wgpu-core/src/device/global.rs b/third_party/rust/wgpu-core/src/device/global.rs
index 64fd6d4de7..539b92e0f3 100644
--- a/third_party/rust/wgpu-core/src/device/global.rs
+++ b/third_party/rust/wgpu-core/src/device/global.rs
@@ -26,9 +26,7 @@ use wgt::{BufferAddress, TextureFormat};
use std::{
borrow::Cow,
- iter,
- ops::Range,
- ptr,
+ iter, ptr,
sync::{atomic::Ordering, Arc},
};
@@ -219,7 +217,7 @@ impl Global {
mapped_at_creation: false,
};
let stage = match device.create_buffer(&stage_desc, true) {
- Ok(stage) => stage,
+ Ok(stage) => Arc::new(stage),
Err(e) => {
to_destroy.push(buffer);
break e;
@@ -232,14 +230,10 @@ impl Global {
Ok(mapping) => mapping,
Err(e) => {
to_destroy.push(buffer);
- to_destroy.push(stage);
break CreateBufferError::Device(e.into());
}
};
- let stage_fid = hub.buffers.request();
- let stage = stage_fid.init(stage);
-
assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0);
// Zero initialize memory and then mark both staging and buffer as initialized
// (it's guaranteed that this is the case by the time the buffer is usable)
@@ -262,7 +256,7 @@ impl Global {
.trackers
.lock()
.buffers
- .insert_single(id, resource, buffer_use);
+ .insert_single(resource, buffer_use);
return (id, None);
};
@@ -383,7 +377,7 @@ impl Global {
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?;
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::MAP_WRITE)?;
//assert!(buffer isn't used by the GPU);
#[cfg(feature = "trace")]
@@ -446,7 +440,7 @@ impl Global {
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?;
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::MAP_READ)?;
//assert!(buffer isn't used by the GPU);
let raw_buf = buffer
@@ -529,7 +523,7 @@ impl Global {
.lock_life()
.suspected_resources
.buffers
- .insert(buffer_id, buffer);
+ .insert(buffer.info.tracker_index(), buffer);
}
if wait {
@@ -573,11 +567,11 @@ impl Global {
let (id, resource) = fid.assign(texture);
api_log!("Device::create_texture({desc:?}) -> {id:?}");
- device.trackers.lock().textures.insert_single(
- id,
- resource,
- hal::TextureUses::UNINITIALIZED,
- );
+ device
+ .trackers
+ .lock()
+ .textures
+ .insert_single(resource, hal::TextureUses::UNINITIALIZED);
return (id, None);
};
@@ -647,11 +641,11 @@ impl Global {
let (id, resource) = fid.assign(texture);
api_log!("Device::create_texture({desc:?}) -> {id:?}");
- device.trackers.lock().textures.insert_single(
- id,
- resource,
- hal::TextureUses::UNINITIALIZED,
- );
+ device
+ .trackers
+ .lock()
+ .textures
+ .insert_single(resource, hal::TextureUses::UNINITIALIZED);
return (id, None);
};
@@ -704,7 +698,7 @@ impl Global {
.trackers
.lock()
.buffers
- .insert_single(id, buffer, hal::BufferUses::empty());
+ .insert_single(buffer, hal::BufferUses::empty());
return (id, None);
};
@@ -764,7 +758,7 @@ impl Global {
.lock_life()
.suspected_resources
.textures
- .insert(texture_id, texture.clone());
+ .insert(texture.info.tracker_index(), texture.clone());
}
}
@@ -824,7 +818,7 @@ impl Global {
}
api_log!("Texture::create_view({texture_id:?}) -> {id:?}");
- device.trackers.lock().views.insert_single(id, resource);
+ device.trackers.lock().views.insert_single(resource);
return (id, None);
};
@@ -854,7 +848,7 @@ impl Global {
.lock_life()
.suspected_resources
.texture_views
- .insert(texture_view_id, view.clone());
+ .insert(view.info.tracker_index(), view.clone());
if wait {
match view.device.wait_for_submit(last_submit_index) {
@@ -900,7 +894,7 @@ impl Global {
let (id, resource) = fid.assign(sampler);
api_log!("Device::create_sampler -> {id:?}");
- device.trackers.lock().samplers.insert_single(id, resource);
+ device.trackers.lock().samplers.insert_single(resource);
return (id, None);
};
@@ -925,7 +919,7 @@ impl Global {
.lock_life()
.suspected_resources
.samplers
- .insert(sampler_id, sampler.clone());
+ .insert(sampler.info.tracker_index(), sampler.clone());
}
}
@@ -1024,7 +1018,7 @@ impl Global {
.lock_life()
.suspected_resources
.bind_group_layouts
- .insert(bind_group_layout_id, layout.clone());
+ .insert(layout.info.tracker_index(), layout.clone());
}
}
@@ -1085,7 +1079,7 @@ impl Global {
.lock_life()
.suspected_resources
.pipeline_layouts
- .insert(pipeline_layout_id, layout.clone());
+ .insert(layout.info.tracker_index(), layout.clone());
}
}
@@ -1140,11 +1134,7 @@ impl Global {
api_log!("Device::create_bind_group -> {id:?}");
- device
- .trackers
- .lock()
- .bind_groups
- .insert_single(id, resource);
+ device.trackers.lock().bind_groups.insert_single(resource);
return (id, None);
};
@@ -1168,7 +1158,7 @@ impl Global {
.lock_life()
.suspected_resources
.bind_groups
- .insert(bind_group_id, bind_group.clone());
+ .insert(bind_group.info.tracker_index(), bind_group.clone());
}
}
@@ -1332,9 +1322,8 @@ impl Global {
if !device.is_valid() {
break DeviceError::Lost;
}
- let queue = match hub.queues.get(device.queue_id.read().unwrap()) {
- Ok(queue) => queue,
- Err(_) => break DeviceError::InvalidQueueId,
+ let Some(queue) = device.get_queue() else {
+ break DeviceError::InvalidQueueId;
};
let encoder = match device
.command_allocator
@@ -1379,6 +1368,7 @@ impl Global {
.command_buffers
.unregister(command_encoder_id.transmute())
{
+ cmd_buf.data.lock().as_mut().unwrap().encoder.discard();
cmd_buf
.device
.untrack(&cmd_buf.data.lock().as_ref().unwrap().trackers);
@@ -1450,7 +1440,7 @@ impl Global {
let (id, resource) = fid.assign(render_bundle);
api_log!("RenderBundleEncoder::finish -> {id:?}");
- device.trackers.lock().bundles.insert_single(id, resource);
+ device.trackers.lock().bundles.insert_single(resource);
return (id, None);
};
@@ -1474,7 +1464,7 @@ impl Global {
.lock_life()
.suspected_resources
.render_bundles
- .insert(render_bundle_id, bundle.clone());
+ .insert(bundle.info.tracker_index(), bundle.clone());
}
}
@@ -1513,11 +1503,7 @@ impl Global {
let (id, resource) = fid.assign(query_set);
api_log!("Device::create_query_set -> {id:?}");
- device
- .trackers
- .lock()
- .query_sets
- .insert_single(id, resource);
+ device.trackers.lock().query_sets.insert_single(resource);
return (id, None);
};
@@ -1544,7 +1530,7 @@ impl Global {
.lock_life()
.suspected_resources
.query_sets
- .insert(query_set_id, query_set.clone());
+ .insert(query_set.info.tracker_index(), query_set.clone());
}
}
@@ -1600,7 +1586,7 @@ impl Global {
.trackers
.lock()
.render_pipelines
- .insert_single(id, resource);
+ .insert_single(resource);
return (id, None);
};
@@ -1672,18 +1658,17 @@ impl Global {
let hub = A::hub(self);
if let Some(pipeline) = hub.render_pipelines.unregister(render_pipeline_id) {
- let layout_id = pipeline.layout.as_info().id();
let device = &pipeline.device;
let mut life_lock = device.lock_life();
life_lock
.suspected_resources
.render_pipelines
- .insert(render_pipeline_id, pipeline.clone());
+ .insert(pipeline.info.tracker_index(), pipeline.clone());
- life_lock
- .suspected_resources
- .pipeline_layouts
- .insert(layout_id, pipeline.layout.clone());
+ life_lock.suspected_resources.pipeline_layouts.insert(
+ pipeline.layout.info.tracker_index(),
+ pipeline.layout.clone(),
+ );
}
}
@@ -1734,7 +1719,7 @@ impl Global {
.trackers
.lock()
.compute_pipelines
- .insert_single(id, resource);
+ .insert_single(resource);
return (id, None);
};
@@ -1804,17 +1789,16 @@ impl Global {
let hub = A::hub(self);
if let Some(pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) {
- let layout_id = pipeline.layout.as_info().id();
let device = &pipeline.device;
let mut life_lock = device.lock_life();
life_lock
.suspected_resources
.compute_pipelines
- .insert(compute_pipeline_id, pipeline.clone());
- life_lock
- .suspected_resources
- .pipeline_layouts
- .insert(layout_id, pipeline.layout.clone());
+ .insert(pipeline.info.tracker_index(), pipeline.clone());
+ life_lock.suspected_resources.pipeline_layouts.insert(
+ pipeline.layout.info.tracker_index(),
+ pipeline.layout.clone(),
+ );
}
}
@@ -2113,28 +2097,41 @@ impl Global {
.get(device_id)
.map_err(|_| DeviceError::Invalid)?;
- let (closures, queue_empty) = {
- if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain {
- if submission_index.queue_id != device_id.transmute() {
- return Err(WaitIdleError::WrongSubmissionIndex(
- submission_index.queue_id,
- device_id,
- ));
- }
+ if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain {
+ if submission_index.queue_id != device_id.transmute() {
+ return Err(WaitIdleError::WrongSubmissionIndex(
+ submission_index.queue_id,
+ device_id,
+ ));
}
+ }
- let fence = device.fence.read();
- let fence = fence.as_ref().unwrap();
- device.maintain(fence, maintain)?
- };
+ let DevicePoll {
+ closures,
+ queue_empty,
+ } = Self::poll_single_device(&device, maintain)?;
+
+ closures.fire();
+
+ Ok(queue_empty)
+ }
+
+ fn poll_single_device<A: HalApi>(
+ device: &crate::device::Device<A>,
+ maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
+ ) -> Result<DevicePoll, WaitIdleError> {
+ let fence = device.fence.read();
+ let fence = fence.as_ref().unwrap();
+ let (closures, queue_empty) = device.maintain(fence, maintain)?;
// Some deferred destroys are scheduled in maintain so run this right after
// to avoid holding on to them until the next device poll.
device.deferred_resource_destruction();
- closures.fire();
-
- Ok(queue_empty)
+ Ok(DevicePoll {
+ closures,
+ queue_empty,
+ })
}
/// Poll all devices belonging to the backend `A`.
@@ -2143,7 +2140,7 @@ impl Global {
///
/// Return `all_queue_empty` indicating whether there are more queue
/// submissions still in flight.
- fn poll_device<A: HalApi>(
+ fn poll_all_devices_of_api<A: HalApi>(
&self,
force_wait: bool,
closures: &mut UserClosures,
@@ -2161,10 +2158,13 @@ impl Global {
} else {
wgt::Maintain::Poll
};
- let fence = device.fence.read();
- let fence = fence.as_ref().unwrap();
- let (cbs, queue_empty) = device.maintain(fence, maintain)?;
- all_queue_empty = all_queue_empty && queue_empty;
+
+ let DevicePoll {
+ closures: cbs,
+ queue_empty,
+ } = Self::poll_single_device(device, maintain)?;
+
+ all_queue_empty &= queue_empty;
closures.extend(cbs);
}
@@ -2186,23 +2186,23 @@ impl Global {
#[cfg(vulkan)]
{
- all_queue_empty =
- self.poll_device::<hal::api::Vulkan>(force_wait, &mut closures)? && all_queue_empty;
+ all_queue_empty &=
+ self.poll_all_devices_of_api::<hal::api::Vulkan>(force_wait, &mut closures)?;
}
#[cfg(metal)]
{
- all_queue_empty =
- self.poll_device::<hal::api::Metal>(force_wait, &mut closures)? && all_queue_empty;
+ all_queue_empty &=
+ self.poll_all_devices_of_api::<hal::api::Metal>(force_wait, &mut closures)?;
}
#[cfg(dx12)]
{
- all_queue_empty =
- self.poll_device::<hal::api::Dx12>(force_wait, &mut closures)? && all_queue_empty;
+ all_queue_empty &=
+ self.poll_all_devices_of_api::<hal::api::Dx12>(force_wait, &mut closures)?;
}
#[cfg(gles)]
{
- all_queue_empty =
- self.poll_device::<hal::api::Gles>(force_wait, &mut closures)? && all_queue_empty;
+ all_queue_empty &=
+ self.poll_all_devices_of_api::<hal::api::Gles>(force_wait, &mut closures)?;
}
closures.fire();
@@ -2336,15 +2336,18 @@ impl Global {
pub fn buffer_map_async<A: HalApi>(
&self,
buffer_id: id::BufferId,
- range: Range<BufferAddress>,
+ offset: BufferAddress,
+ size: Option<BufferAddress>,
op: BufferMapOperation,
) -> BufferAccessResult {
- api_log!("Buffer::map_async {buffer_id:?} range {range:?} op: {op:?}");
+ api_log!("Buffer::map_async {buffer_id:?} offset {offset:?} size {size:?} op: {op:?}");
// User callbacks must not be called while holding buffer_map_async_inner's locks, so we
// defer the error callback if it needs to be called immediately (typically when running
// into errors).
- if let Err((mut operation, err)) = self.buffer_map_async_inner::<A>(buffer_id, range, op) {
+ if let Err((mut operation, err)) =
+ self.buffer_map_async_inner::<A>(buffer_id, offset, size, op)
+ {
if let Some(callback) = operation.callback.take() {
callback.call(Err(err.clone()));
}
@@ -2360,7 +2363,8 @@ impl Global {
fn buffer_map_async_inner<A: HalApi>(
&self,
buffer_id: id::BufferId,
- range: Range<BufferAddress>,
+ offset: BufferAddress,
+ size: Option<BufferAddress>,
op: BufferMapOperation,
) -> Result<(), (BufferMapOperation, BufferAccessError)> {
profiling::scope!("Buffer::map_async");
@@ -2372,29 +2376,50 @@ impl Global {
HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE),
};
- if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 {
- return Err((op, BufferAccessError::UnalignedRange));
- }
-
let buffer = {
- let buffer = hub
- .buffers
- .get(buffer_id)
- .map_err(|_| BufferAccessError::Invalid);
+ let buffer = hub.buffers.get(buffer_id);
let buffer = match buffer {
Ok(b) => b,
- Err(e) => {
- return Err((op, e));
+ Err(_) => {
+ return Err((op, BufferAccessError::Invalid));
}
};
+ {
+ let snatch_guard = buffer.device.snatchable_lock.read();
+ if buffer.is_destroyed(&snatch_guard) {
+ return Err((op, BufferAccessError::Destroyed));
+ }
+ }
+
+ let range_size = if let Some(size) = size {
+ size
+ } else if offset > buffer.size {
+ 0
+ } else {
+ buffer.size - offset
+ };
+
+ if offset % wgt::MAP_ALIGNMENT != 0 {
+ return Err((op, BufferAccessError::UnalignedOffset { offset }));
+ }
+ if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ return Err((op, BufferAccessError::UnalignedRangeSize { range_size }));
+ }
+
+ let range = offset..(offset + range_size);
+
+ if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0
+ {
+ return Err((op, BufferAccessError::UnalignedRange));
+ }
let device = &buffer.device;
if !device.is_valid() {
return Err((op, DeviceError::Lost.into()));
}
- if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) {
+ if let Err(e) = check_buffer_usage(buffer.info.id(), buffer.usage, pub_usage) {
return Err((op, e.into()));
}
@@ -2417,11 +2442,6 @@ impl Global {
));
}
- let snatch_guard = device.snatchable_lock.read();
- if buffer.is_destroyed(&snatch_guard) {
- return Err((op, BufferAccessError::Destroyed));
- }
-
{
let map_state = &mut *buffer.map_state.lock();
*map_state = match *map_state {
@@ -2442,6 +2462,8 @@ impl Global {
};
}
+ let snatch_guard = buffer.device.snatchable_lock.read();
+
{
let mut trackers = buffer.device.as_ref().trackers.lock();
trackers.buffers.set_single(&buffer, internal_use);
@@ -2557,3 +2579,8 @@ impl Global {
buffer.unmap()
}
}
+
+struct DevicePoll {
+ closures: UserClosures,
+ queue_empty: bool,
+}
diff --git a/third_party/rust/wgpu-core/src/device/life.rs b/third_party/rust/wgpu-core/src/device/life.rs
index 86c5d027c7..7b06a4a30b 100644
--- a/third_party/rust/wgpu-core/src/device/life.rs
+++ b/third_party/rust/wgpu-core/src/device/life.rs
@@ -6,17 +6,13 @@ use crate::{
DeviceError, DeviceLostClosure,
},
hal_api::HalApi,
- id::{
- self, BindGroupId, BindGroupLayoutId, BufferId, ComputePipelineId, Id, PipelineLayoutId,
- QuerySetId, RenderBundleId, RenderPipelineId, SamplerId, StagingBufferId, TextureId,
- TextureViewId,
- },
+ id,
pipeline::{ComputePipeline, RenderPipeline},
resource::{
self, Buffer, DestroyedBuffer, DestroyedTexture, QuerySet, Resource, Sampler,
StagingBuffer, Texture, TextureView,
},
- track::{ResourceTracker, Tracker},
+ track::{ResourceTracker, Tracker, TrackerIndex},
FastHashMap, SubmissionIndex,
};
use smallvec::SmallVec;
@@ -28,20 +24,20 @@ use thiserror::Error;
/// A struct that keeps lists of resources that are no longer needed by the user.
#[derive(Default)]
pub(crate) struct ResourceMaps<A: HalApi> {
- pub buffers: FastHashMap<BufferId, Arc<Buffer<A>>>,
- pub staging_buffers: FastHashMap<StagingBufferId, Arc<StagingBuffer<A>>>,
- pub textures: FastHashMap<TextureId, Arc<Texture<A>>>,
- pub texture_views: FastHashMap<TextureViewId, Arc<TextureView<A>>>,
- pub samplers: FastHashMap<SamplerId, Arc<Sampler<A>>>,
- pub bind_groups: FastHashMap<BindGroupId, Arc<BindGroup<A>>>,
- pub bind_group_layouts: FastHashMap<BindGroupLayoutId, Arc<BindGroupLayout<A>>>,
- pub render_pipelines: FastHashMap<RenderPipelineId, Arc<RenderPipeline<A>>>,
- pub compute_pipelines: FastHashMap<ComputePipelineId, Arc<ComputePipeline<A>>>,
- pub pipeline_layouts: FastHashMap<PipelineLayoutId, Arc<PipelineLayout<A>>>,
- pub render_bundles: FastHashMap<RenderBundleId, Arc<RenderBundle<A>>>,
- pub query_sets: FastHashMap<QuerySetId, Arc<QuerySet<A>>>,
- pub destroyed_buffers: FastHashMap<BufferId, Arc<DestroyedBuffer<A>>>,
- pub destroyed_textures: FastHashMap<TextureId, Arc<DestroyedTexture<A>>>,
+ pub buffers: FastHashMap<TrackerIndex, Arc<Buffer<A>>>,
+ pub staging_buffers: FastHashMap<TrackerIndex, Arc<StagingBuffer<A>>>,
+ pub textures: FastHashMap<TrackerIndex, Arc<Texture<A>>>,
+ pub texture_views: FastHashMap<TrackerIndex, Arc<TextureView<A>>>,
+ pub samplers: FastHashMap<TrackerIndex, Arc<Sampler<A>>>,
+ pub bind_groups: FastHashMap<TrackerIndex, Arc<BindGroup<A>>>,
+ pub bind_group_layouts: FastHashMap<TrackerIndex, Arc<BindGroupLayout<A>>>,
+ pub render_pipelines: FastHashMap<TrackerIndex, Arc<RenderPipeline<A>>>,
+ pub compute_pipelines: FastHashMap<TrackerIndex, Arc<ComputePipeline<A>>>,
+ pub pipeline_layouts: FastHashMap<TrackerIndex, Arc<PipelineLayout<A>>>,
+ pub render_bundles: FastHashMap<TrackerIndex, Arc<RenderBundle<A>>>,
+ pub query_sets: FastHashMap<TrackerIndex, Arc<QuerySet<A>>>,
+ pub destroyed_buffers: FastHashMap<TrackerIndex, Arc<DestroyedBuffer<A>>>,
+ pub destroyed_textures: FastHashMap<TrackerIndex, Arc<DestroyedTexture<A>>>,
}
impl<A: HalApi> ResourceMaps<A> {
@@ -276,25 +272,29 @@ impl<A: HalApi> LifetimeTracker<A> {
for res in temp_resources {
match res {
TempResource::Buffer(raw) => {
- last_resources.buffers.insert(raw.as_info().id(), raw);
+ last_resources
+ .buffers
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::StagingBuffer(raw) => {
last_resources
.staging_buffers
- .insert(raw.as_info().id(), raw);
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedBuffer(destroyed) => {
last_resources
.destroyed_buffers
- .insert(destroyed.id, destroyed);
+ .insert(destroyed.tracker_index, destroyed);
}
TempResource::Texture(raw) => {
- last_resources.textures.insert(raw.as_info().id(), raw);
+ last_resources
+ .textures
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedTexture(destroyed) => {
last_resources
.destroyed_textures
- .insert(destroyed.id, destroyed);
+ .insert(destroyed.tracker_index, destroyed);
}
}
}
@@ -310,12 +310,14 @@ impl<A: HalApi> LifetimeTracker<A> {
pub fn post_submit(&mut self) {
for v in self.future_suspected_buffers.drain(..).take(1) {
- self.suspected_resources.buffers.insert(v.as_info().id(), v);
+ self.suspected_resources
+ .buffers
+ .insert(v.as_info().tracker_index(), v);
}
for v in self.future_suspected_textures.drain(..).take(1) {
self.suspected_resources
.textures
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
}
@@ -386,19 +388,27 @@ impl<A: HalApi> LifetimeTracker<A> {
if let Some(resources) = resources {
match temp_resource {
TempResource::Buffer(raw) => {
- resources.buffers.insert(raw.as_info().id(), raw);
+ resources.buffers.insert(raw.as_info().tracker_index(), raw);
}
TempResource::StagingBuffer(raw) => {
- resources.staging_buffers.insert(raw.as_info().id(), raw);
+ resources
+ .staging_buffers
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedBuffer(destroyed) => {
- resources.destroyed_buffers.insert(destroyed.id, destroyed);
+ resources
+ .destroyed_buffers
+ .insert(destroyed.tracker_index, destroyed);
}
TempResource::Texture(raw) => {
- resources.textures.insert(raw.as_info().id(), raw);
+ resources
+ .textures
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedTexture(destroyed) => {
- resources.destroyed_textures.insert(destroyed.id, destroyed);
+ resources
+ .destroyed_textures
+ .insert(destroyed.tracker_index, destroyed);
}
}
}
@@ -420,27 +430,27 @@ impl<A: HalApi> LifetimeTracker<A> {
impl<A: HalApi> LifetimeTracker<A> {
fn triage_resources<R>(
- resources_map: &mut FastHashMap<Id<R::Marker>, Arc<R>>,
+ resources_map: &mut FastHashMap<TrackerIndex, Arc<R>>,
active: &mut [ActiveSubmission<A>],
- trackers: &mut impl ResourceTracker<R>,
- get_resource_map: impl Fn(&mut ResourceMaps<A>) -> &mut FastHashMap<Id<R::Marker>, Arc<R>>,
+ trackers: &mut impl ResourceTracker,
+ get_resource_map: impl Fn(&mut ResourceMaps<A>) -> &mut FastHashMap<TrackerIndex, Arc<R>>,
) -> Vec<Arc<R>>
where
R: Resource,
{
let mut removed_resources = Vec::new();
- resources_map.retain(|&id, resource| {
+ resources_map.retain(|&index, resource| {
let submit_index = resource.as_info().submission_index();
let non_referenced_resources = active
.iter_mut()
.find(|a| a.index == submit_index)
.map(|a| &mut a.last_resources);
- let is_removed = trackers.remove_abandoned(id);
+ let is_removed = trackers.remove_abandoned(index);
if is_removed {
removed_resources.push(resource.clone());
if let Some(resources) = non_referenced_resources {
- get_resource_map(resources).insert(id, resource.clone());
+ get_resource_map(resources).insert(index, resource.clone());
}
}
!is_removed
@@ -459,27 +469,29 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resources.drain(..).for_each(|bundle| {
for v in bundle.used.buffers.write().drain_resources() {
- self.suspected_resources.buffers.insert(v.as_info().id(), v);
+ self.suspected_resources
+ .buffers
+ .insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.textures.write().drain_resources() {
self.suspected_resources
.textures
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.bind_groups.write().drain_resources() {
self.suspected_resources
.bind_groups
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.render_pipelines.write().drain_resources() {
self.suspected_resources
.render_pipelines
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.query_sets.write().drain_resources() {
self.suspected_resources
.query_sets
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
});
self
@@ -496,27 +508,30 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resource.drain(..).for_each(|bind_group| {
for v in bind_group.used.buffers.drain_resources() {
- self.suspected_resources.buffers.insert(v.as_info().id(), v);
+ self.suspected_resources
+ .buffers
+ .insert(v.as_info().tracker_index(), v);
}
for v in bind_group.used.textures.drain_resources() {
self.suspected_resources
.textures
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bind_group.used.views.drain_resources() {
self.suspected_resources
.texture_views
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bind_group.used.samplers.drain_resources() {
self.suspected_resources
.samplers
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
- self.suspected_resources
- .bind_group_layouts
- .insert(bind_group.layout.as_info().id(), bind_group.layout.clone());
+ self.suspected_resources.bind_group_layouts.insert(
+ bind_group.layout.as_info().tracker_index(),
+ bind_group.layout.clone(),
+ );
});
self
}
@@ -605,7 +620,7 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resources.drain(..).for_each(|compute_pipeline| {
self.suspected_resources.pipeline_layouts.insert(
- compute_pipeline.layout.as_info().id(),
+ compute_pipeline.layout.as_info().tracker_index(),
compute_pipeline.layout.clone(),
);
});
@@ -623,7 +638,7 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resources.drain(..).for_each(|render_pipeline| {
self.suspected_resources.pipeline_layouts.insert(
- render_pipeline.layout.as_info().id(),
+ render_pipeline.layout.as_info().tracker_index(),
render_pipeline.layout.clone(),
);
});
@@ -642,7 +657,7 @@ impl<A: HalApi> LifetimeTracker<A> {
for bgl in &pipeline_layout.bind_group_layouts {
self.suspected_resources
.bind_group_layouts
- .insert(bgl.as_info().id(), bgl.clone());
+ .insert(bgl.as_info().tracker_index(), bgl.clone());
}
});
self
@@ -773,14 +788,14 @@ impl<A: HalApi> LifetimeTracker<A> {
Vec::with_capacity(self.ready_to_map.len());
for buffer in self.ready_to_map.drain(..) {
- let buffer_id = buffer.info.id();
+ let tracker_index = buffer.info.tracker_index();
let is_removed = {
let mut trackers = trackers.lock();
- trackers.buffers.remove_abandoned(buffer_id)
+ trackers.buffers.remove_abandoned(tracker_index)
};
if is_removed {
*buffer.map_state.lock() = resource::BufferMapState::Idle;
- log::trace!("Buffer ready to map {:?} is not tracked anymore", buffer_id);
+ log::trace!("Buffer ready to map {tracker_index:?} is not tracked anymore");
} else {
let mapping = match std::mem::replace(
&mut *buffer.map_state.lock(),
@@ -798,7 +813,7 @@ impl<A: HalApi> LifetimeTracker<A> {
_ => panic!("No pending mapping."),
};
let status = if mapping.range.start != mapping.range.end {
- log::debug!("Buffer {:?} map state -> Active", buffer_id);
+ log::debug!("Buffer {tracker_index:?} map state -> Active");
let host = mapping.op.host;
let size = mapping.range.end - mapping.range.start;
match super::map_buffer(raw, &buffer, mapping.range.start, size, host) {
diff --git a/third_party/rust/wgpu-core/src/device/queue.rs b/third_party/rust/wgpu-core/src/device/queue.rs
index 08c5b767b6..6ebb9eb09b 100644
--- a/third_party/rust/wgpu-core/src/device/queue.rs
+++ b/third_party/rust/wgpu-core/src/device/queue.rs
@@ -12,7 +12,7 @@ use crate::{
global::Global,
hal_api::HalApi,
hal_label,
- id::{self, QueueId},
+ id::{self, DeviceId, QueueId},
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
resource::{
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedTexture, Resource,
@@ -188,10 +188,17 @@ impl<A: HalApi> EncoderInFlight<A> {
#[derive(Debug)]
pub(crate) struct PendingWrites<A: HalApi> {
pub command_encoder: A::CommandEncoder,
- pub is_active: bool,
+
+ /// True if `command_encoder` is in the "recording" state, as
+ /// described in the docs for the [`wgpu_hal::CommandEncoder`]
+ /// trait.
+ pub is_recording: bool,
+
pub temp_resources: Vec<TempResource<A>>,
pub dst_buffers: FastHashMap<id::BufferId, Arc<Buffer<A>>>,
pub dst_textures: FastHashMap<id::TextureId, Arc<Texture<A>>>,
+
+ /// All command buffers allocated from `command_encoder`.
pub executing_command_buffers: Vec<A::CommandBuffer>,
}
@@ -199,7 +206,7 @@ impl<A: HalApi> PendingWrites<A> {
pub fn new(command_encoder: A::CommandEncoder) -> Self {
Self {
command_encoder,
- is_active: false,
+ is_recording: false,
temp_resources: Vec::new(),
dst_buffers: FastHashMap::default(),
dst_textures: FastHashMap::default(),
@@ -209,7 +216,7 @@ impl<A: HalApi> PendingWrites<A> {
pub fn dispose(mut self, device: &A::Device) {
unsafe {
- if self.is_active {
+ if self.is_recording {
self.command_encoder.discard_encoding();
}
self.command_encoder
@@ -232,9 +239,9 @@ impl<A: HalApi> PendingWrites<A> {
fn pre_submit(&mut self) -> Result<Option<&A::CommandBuffer>, DeviceError> {
self.dst_buffers.clear();
self.dst_textures.clear();
- if self.is_active {
+ if self.is_recording {
let cmd_buf = unsafe { self.command_encoder.end_encoding()? };
- self.is_active = false;
+ self.is_recording = false;
self.executing_command_buffers.push(cmd_buf);
return Ok(self.executing_command_buffers.last());
@@ -262,23 +269,23 @@ impl<A: HalApi> PendingWrites<A> {
}
pub fn activate(&mut self) -> &mut A::CommandEncoder {
- if !self.is_active {
+ if !self.is_recording {
unsafe {
self.command_encoder
.begin_encoding(Some("(wgpu internal) PendingWrites"))
.unwrap();
}
- self.is_active = true;
+ self.is_recording = true;
}
&mut self.command_encoder
}
pub fn deactivate(&mut self) {
- if self.is_active {
+ if self.is_recording {
unsafe {
self.command_encoder.discard_encoding();
}
- self.is_active = false;
+ self.is_recording = false;
}
}
}
@@ -303,7 +310,10 @@ fn prepare_staging_buffer<A: HalApi>(
raw: Mutex::new(Some(buffer)),
device: device.clone(),
size,
- info: ResourceInfo::new("<StagingBuffer>"),
+ info: ResourceInfo::new(
+ "<StagingBuffer>",
+ Some(device.tracker_indices.staging_buffers.clone()),
+ ),
is_coherent: mapping.is_coherent,
};
@@ -332,6 +342,15 @@ pub struct InvalidQueue;
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum QueueWriteError {
+ #[error(
+ "Device of queue ({:?}) does not match device of write recipient ({:?})",
+ queue_device_id,
+ target_device_id
+ )]
+ DeviceMismatch {
+ queue_device_id: DeviceId,
+ target_device_id: DeviceId,
+ },
#[error(transparent)]
Queue(#[from] DeviceError),
#[error(transparent)]
@@ -376,6 +395,14 @@ impl Global {
let hub = A::hub(self);
+ let buffer_device_id = hub
+ .buffers
+ .get(buffer_id)
+ .map_err(|_| TransferError::InvalidBuffer(buffer_id))?
+ .device
+ .as_info()
+ .id();
+
let queue = hub
.queues
.get(queue_id)
@@ -383,6 +410,16 @@ impl Global {
let device = queue.device.as_ref().unwrap();
+ {
+ let queue_device_id = device.as_info().id();
+ if buffer_device_id != queue_device_id {
+ return Err(QueueWriteError::DeviceMismatch {
+ queue_device_id,
+ target_device_id: buffer_device_id,
+ });
+ }
+ }
+
let data_size = data.len() as wgt::BufferAddress;
#[cfg(feature = "trace")]
@@ -1143,7 +1180,7 @@ impl Global {
for &cmb_id in command_buffer_ids {
// we reset the used surface textures every time we use
// it, so make sure to set_size on it.
- used_surface_textures.set_size(hub.textures.read().len());
+ used_surface_textures.set_size(device.tracker_indices.textures.size());
#[allow(unused_mut)]
let mut cmdbuf = match command_buffer_guard.replace_with_error(cmb_id) {
@@ -1188,11 +1225,13 @@ impl Global {
// update submission IDs
for buffer in cmd_buf_trackers.buffers.used_resources() {
- let id = buffer.info.id();
+ let tracker_index = buffer.info.tracker_index();
let raw_buf = match buffer.raw.get(&snatch_guard) {
Some(raw) => raw,
None => {
- return Err(QueueSubmitError::DestroyedBuffer(id));
+ return Err(QueueSubmitError::DestroyedBuffer(
+ buffer.info.id(),
+ ));
}
};
buffer.info.use_at(submit_index);
@@ -1207,28 +1246,28 @@ impl Global {
.as_mut()
.unwrap()
.buffers
- .insert(id, buffer.clone());
+ .insert(tracker_index, buffer.clone());
} else {
match *buffer.map_state.lock() {
BufferMapState::Idle => (),
- _ => return Err(QueueSubmitError::BufferStillMapped(id)),
+ _ => {
+ return Err(QueueSubmitError::BufferStillMapped(
+ buffer.info.id(),
+ ))
+ }
}
}
}
for texture in cmd_buf_trackers.textures.used_resources() {
- let id = texture.info.id();
+ let tracker_index = texture.info.tracker_index();
let should_extend = match texture.inner.get(&snatch_guard) {
None => {
- return Err(QueueSubmitError::DestroyedTexture(id));
+ return Err(QueueSubmitError::DestroyedTexture(
+ texture.info.id(),
+ ));
}
Some(TextureInner::Native { .. }) => false,
- Some(TextureInner::Surface {
- ref has_work,
- ref raw,
- ..
- }) => {
- has_work.store(true, Ordering::Relaxed);
-
+ Some(TextureInner::Surface { ref raw, .. }) => {
if raw.is_some() {
submit_surface_textures_owned.push(texture.clone());
}
@@ -1242,7 +1281,7 @@ impl Global {
.as_mut()
.unwrap()
.textures
- .insert(id, texture.clone());
+ .insert(tracker_index, texture.clone());
}
if should_extend {
unsafe {
@@ -1255,11 +1294,10 @@ impl Global {
for texture_view in cmd_buf_trackers.views.used_resources() {
texture_view.info.use_at(submit_index);
if texture_view.is_unique() {
- temp_suspected
- .as_mut()
- .unwrap()
- .texture_views
- .insert(texture_view.as_info().id(), texture_view.clone());
+ temp_suspected.as_mut().unwrap().texture_views.insert(
+ texture_view.as_info().tracker_index(),
+ texture_view.clone(),
+ );
}
}
{
@@ -1279,7 +1317,7 @@ impl Global {
.as_mut()
.unwrap()
.bind_groups
- .insert(bg.as_info().id(), bg.clone());
+ .insert(bg.as_info().tracker_index(), bg.clone());
}
}
}
@@ -1290,7 +1328,7 @@ impl Global {
compute_pipeline.info.use_at(submit_index);
if compute_pipeline.is_unique() {
temp_suspected.as_mut().unwrap().compute_pipelines.insert(
- compute_pipeline.as_info().id(),
+ compute_pipeline.as_info().tracker_index(),
compute_pipeline.clone(),
);
}
@@ -1301,7 +1339,7 @@ impl Global {
render_pipeline.info.use_at(submit_index);
if render_pipeline.is_unique() {
temp_suspected.as_mut().unwrap().render_pipelines.insert(
- render_pipeline.as_info().id(),
+ render_pipeline.as_info().tracker_index(),
render_pipeline.clone(),
);
}
@@ -1309,11 +1347,10 @@ impl Global {
for query_set in cmd_buf_trackers.query_sets.used_resources() {
query_set.info.use_at(submit_index);
if query_set.is_unique() {
- temp_suspected
- .as_mut()
- .unwrap()
- .query_sets
- .insert(query_set.as_info().id(), query_set.clone());
+ temp_suspected.as_mut().unwrap().query_sets.insert(
+ query_set.as_info().tracker_index(),
+ query_set.clone(),
+ );
}
}
for bundle in cmd_buf_trackers.bundles.used_resources() {
@@ -1334,7 +1371,7 @@ impl Global {
.as_mut()
.unwrap()
.render_bundles
- .insert(bundle.as_info().id(), bundle.clone());
+ .insert(bundle.as_info().tracker_index(), bundle.clone());
}
}
}
@@ -1423,13 +1460,7 @@ impl Global {
return Err(QueueSubmitError::DestroyedTexture(id));
}
Some(TextureInner::Native { .. }) => {}
- Some(TextureInner::Surface {
- ref has_work,
- ref raw,
- ..
- }) => {
- has_work.store(true, Ordering::Relaxed);
-
+ Some(TextureInner::Surface { ref raw, .. }) => {
if raw.is_some() {
submit_surface_textures_owned.push(texture.clone());
}
diff --git a/third_party/rust/wgpu-core/src/device/resource.rs b/third_party/rust/wgpu-core/src/device/resource.rs
index b2c85a056a..28ba0eafb1 100644
--- a/third_party/rust/wgpu-core/src/device/resource.rs
+++ b/third_party/rust/wgpu-core/src/device/resource.rs
@@ -13,7 +13,6 @@ use crate::{
hal_api::HalApi,
hal_label,
hub::Hub,
- id::QueueId,
init_tracker::{
BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange,
TextureInitTracker, TextureInitTrackerAction,
@@ -29,13 +28,16 @@ use crate::{
resource_log,
snatch::{SnatchGuard, SnatchLock, Snatchable},
storage::Storage,
- track::{BindGroupStates, TextureSelector, Tracker},
- validation::{self, check_buffer_usage, check_texture_usage},
+ track::{BindGroupStates, TextureSelector, Tracker, TrackerIndexAllocators},
+ validation::{
+ self, check_buffer_usage, check_texture_usage, validate_color_attachment_bytes_per_sample,
+ },
FastHashMap, LabelHelpers as _, SubmissionIndex,
};
use arrayvec::ArrayVec;
use hal::{CommandEncoder as _, Device as _};
+use once_cell::sync::OnceCell;
use parking_lot::{Mutex, MutexGuard, RwLock};
use smallvec::SmallVec;
@@ -54,7 +56,7 @@ use std::{
use super::{
life::{self, ResourceMaps},
- queue::{self},
+ queue::{self, Queue},
DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, ENTRYPOINT_FAILURE_ERROR,
IMPLICIT_BIND_GROUP_LAYOUT_ERROR_LABEL, ZERO_BUFFER_SIZE,
};
@@ -87,8 +89,8 @@ use super::{
pub struct Device<A: HalApi> {
raw: Option<A::Device>,
pub(crate) adapter: Arc<Adapter<A>>,
- pub(crate) queue_id: RwLock<Option<QueueId>>,
- queue_to_drop: RwLock<Option<A::Queue>>,
+ pub(crate) queue: OnceCell<Weak<Queue<A>>>,
+ queue_to_drop: OnceCell<A::Queue>,
pub(crate) zero_buffer: Option<A::Buffer>,
pub(crate) info: ResourceInfo<Device<A>>,
@@ -116,6 +118,7 @@ pub struct Device<A: HalApi> {
/// Has to be locked temporarily only (locked last)
/// and never before pending_writes
pub(crate) trackers: Mutex<Tracker<A>>,
+ pub(crate) tracker_indices: TrackerIndexAllocators,
// Life tracker should be locked right after the device and before anything else.
life_tracker: Mutex<LifetimeTracker<A>>,
/// Temporary storage for resource management functions. Cleared at the end
@@ -160,7 +163,7 @@ impl<A: HalApi> Drop for Device<A> {
unsafe {
raw.destroy_buffer(self.zero_buffer.take().unwrap());
raw.destroy_fence(self.fence.write().take().unwrap());
- let queue = self.queue_to_drop.write().take().unwrap();
+ let queue = self.queue_to_drop.take().unwrap();
raw.exit(queue);
}
}
@@ -258,16 +261,17 @@ impl<A: HalApi> Device<A> {
Ok(Self {
raw: Some(raw_device),
adapter: adapter.clone(),
- queue_id: RwLock::new(None),
- queue_to_drop: RwLock::new(None),
+ queue: OnceCell::new(),
+ queue_to_drop: OnceCell::new(),
zero_buffer: Some(zero_buffer),
- info: ResourceInfo::new("<device>"),
+ info: ResourceInfo::new("<device>", None),
command_allocator: Mutex::new(Some(com_alloc)),
active_submission_index: AtomicU64::new(0),
fence: RwLock::new(Some(fence)),
snatchable_lock: unsafe { SnatchLock::new() },
valid: AtomicBool::new(true),
trackers: Mutex::new(Tracker::new()),
+ tracker_indices: TrackerIndexAllocators::new(),
life_tracker: Mutex::new(life::LifetimeTracker::new()),
temp_suspected: Mutex::new(Some(life::ResourceMaps::new())),
bgl_pool: ResourcePool::new(),
@@ -300,7 +304,7 @@ impl<A: HalApi> Device<A> {
}
pub(crate) fn release_queue(&self, queue: A::Queue) {
- self.queue_to_drop.write().replace(queue);
+ assert!(self.queue_to_drop.set(queue).is_ok());
}
pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker<A>> {
@@ -339,7 +343,8 @@ impl<A: HalApi> Device<A> {
let Some(bind_group) = bind_group.upgrade() else {
continue;
};
- let Some(raw_bind_group) = bind_group.raw.snatch(self.snatchable_lock.write()) else {
+ let Some(raw_bind_group) = bind_group.raw.snatch(self.snatchable_lock.write())
+ else {
continue;
};
@@ -357,6 +362,14 @@ impl<A: HalApi> Device<A> {
}
}
+ pub fn get_queue(&self) -> Option<Arc<Queue<A>>> {
+ self.queue.get().as_ref()?.upgrade()
+ }
+
+ pub fn set_queue(&self, queue: Arc<Queue<A>>) {
+ assert!(self.queue.set(Arc::downgrade(&queue)).is_ok());
+ }
+
/// Check this device for completed commands.
///
/// The `maintain` argument tells how the maintence function should behave, either
@@ -483,56 +496,56 @@ impl<A: HalApi> Device<A> {
if resource.is_unique() {
temp_suspected
.buffers
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.textures.used_resources() {
if resource.is_unique() {
temp_suspected
.textures
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.views.used_resources() {
if resource.is_unique() {
temp_suspected
.texture_views
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.bind_groups.used_resources() {
if resource.is_unique() {
temp_suspected
.bind_groups
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.samplers.used_resources() {
if resource.is_unique() {
temp_suspected
.samplers
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.compute_pipelines.used_resources() {
if resource.is_unique() {
temp_suspected
.compute_pipelines
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.render_pipelines.used_resources() {
if resource.is_unique() {
temp_suspected
.render_pipelines
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.query_sets.used_resources() {
if resource.is_unique() {
temp_suspected
.query_sets
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
}
@@ -633,7 +646,10 @@ impl<A: HalApi> Device<A> {
initialization_status: RwLock::new(BufferInitTracker::new(aligned_size)),
sync_mapped_writes: Mutex::new(None),
map_state: Mutex::new(resource::BufferMapState::Idle),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.buffers.clone()),
+ ),
bind_groups: Mutex::new(Vec::new()),
})
}
@@ -662,7 +678,10 @@ impl<A: HalApi> Device<A> {
mips: 0..desc.mip_level_count,
layers: 0..desc.array_layer_count(),
},
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.textures.clone()),
+ ),
clear_mode: RwLock::new(clear_mode),
views: Mutex::new(Vec::new()),
bind_groups: Mutex::new(Vec::new()),
@@ -684,7 +703,10 @@ impl<A: HalApi> Device<A> {
initialization_status: RwLock::new(BufferInitTracker::new(0)),
sync_mapped_writes: Mutex::new(None),
map_state: Mutex::new(resource::BufferMapState::Idle),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.buffers.clone()),
+ ),
bind_groups: Mutex::new(Vec::new()),
}
}
@@ -1262,7 +1284,10 @@ impl<A: HalApi> Device<A> {
render_extent,
samples: texture.desc.sample_count,
selector,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.texture_views.clone()),
+ ),
})
}
@@ -1366,7 +1391,10 @@ impl<A: HalApi> Device<A> {
Ok(Sampler {
raw: Some(raw),
device: self.clone(),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.samplers.clone()),
+ ),
comparison: desc.compare.is_some(),
filtering: desc.min_filter == wgt::FilterMode::Linear
|| desc.mag_filter == wgt::FilterMode::Linear,
@@ -1484,6 +1512,10 @@ impl<A: HalApi> Device<A> {
.contains(wgt::Features::SHADER_EARLY_DEPTH_TEST),
);
caps.set(
+ Caps::SHADER_INT64,
+ self.features.contains(wgt::Features::SHADER_INT64),
+ );
+ caps.set(
Caps::MULTISAMPLED_SHADING,
self.downlevel
.flags
@@ -1559,7 +1591,7 @@ impl<A: HalApi> Device<A> {
raw: Some(raw),
device: self.clone(),
interface: Some(interface),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(desc.label.borrow_or_default(), None),
label: desc.label.borrow_or_default().to_string(),
})
}
@@ -1600,7 +1632,7 @@ impl<A: HalApi> Device<A> {
raw: Some(raw),
device: self.clone(),
interface: None,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(desc.label.borrow_or_default(), None),
label: desc.label.borrow_or_default().to_string(),
})
}
@@ -1704,10 +1736,23 @@ impl<A: HalApi> Device<A> {
BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled,
});
}
- Bt::Texture { .. } => (
- Some(wgt::Features::TEXTURE_BINDING_ARRAY),
- WritableStorage::No,
- ),
+ Bt::Texture {
+ multisampled,
+ view_dimension,
+ ..
+ } => {
+ if multisampled && view_dimension != TextureViewDimension::D2 {
+ return Err(binding_model::CreateBindGroupLayoutError::Entry {
+ binding: entry.binding,
+ error: BindGroupLayoutEntryError::Non2DMultisampled(view_dimension),
+ });
+ }
+
+ (
+ Some(wgt::Features::TEXTURE_BINDING_ARRAY),
+ WritableStorage::No,
+ )
+ }
Bt::StorageTexture {
access,
view_dimension,
@@ -1840,7 +1885,10 @@ impl<A: HalApi> Device<A> {
entries: entry_map,
origin,
binding_count_validator: count_validator,
- info: ResourceInfo::new(label.unwrap_or("<BindGroupLayout>")),
+ info: ResourceInfo::new(
+ label.unwrap_or("<BindGroupLayout>"),
+ Some(self.tracker_indices.bind_group_layouts.clone()),
+ ),
label: label.unwrap_or_default().to_string(),
})
}
@@ -1905,7 +1953,7 @@ impl<A: HalApi> Device<A> {
.add_single(storage, bb.buffer_id, internal_use)
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
- check_buffer_usage(buffer.usage, pub_usage)?;
+ check_buffer_usage(bb.buffer_id, buffer.usage, pub_usage)?;
let raw_buffer = buffer
.raw
.get(snatch_guard)
@@ -2273,7 +2321,10 @@ impl<A: HalApi> Device<A> {
raw: Snatchable::new(raw),
device: self.clone(),
layout: layout.clone(),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.bind_groups.clone()),
+ ),
used,
used_buffer_ranges,
used_texture_ranges,
@@ -2555,7 +2606,10 @@ impl<A: HalApi> Device<A> {
Ok(binding_model::PipelineLayout {
raw: Some(raw),
device: self.clone(),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.pipeline_layouts.clone()),
+ ),
bind_group_layouts,
push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
})
@@ -2656,14 +2710,21 @@ impl<A: HalApi> Device<A> {
let mut shader_binding_sizes = FastHashMap::default();
let io = validation::StageIo::default();
+ let final_entry_point_name;
+
{
let stage = wgt::ShaderStages::COMPUTE;
+ final_entry_point_name = shader_module.finalize_entry_point_name(
+ stage,
+ desc.stage.entry_point.as_ref().map(|ep| ep.as_ref()),
+ )?;
+
if let Some(ref interface) = shader_module.interface {
let _ = interface.check_stage(
&mut binding_layout_source,
&mut shader_binding_sizes,
- &desc.stage.entry_point,
+ &final_entry_point_name,
stage,
io,
None,
@@ -2691,7 +2752,7 @@ impl<A: HalApi> Device<A> {
label: desc.label.to_hal(self.instance_flags),
layout: pipeline_layout.raw(),
stage: hal::ProgrammableStage {
- entry_point: desc.stage.entry_point.as_ref(),
+ entry_point: final_entry_point_name.as_ref(),
module: shader_module.raw(),
},
};
@@ -2720,7 +2781,10 @@ impl<A: HalApi> Device<A> {
device: self.clone(),
_shader_module: shader_module,
late_sized_buffer_groups,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.compute_pipelines.clone()),
+ ),
};
Ok(pipeline)
}
@@ -2749,11 +2813,12 @@ impl<A: HalApi> Device<A> {
let mut shader_binding_sizes = FastHashMap::default();
let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0);
- if num_attachments > hal::MAX_COLOR_ATTACHMENTS {
+ let max_attachments = self.limits.max_color_attachments as usize;
+ if num_attachments > max_attachments {
return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
command::ColorAttachmentError::TooMany {
given: num_attachments,
- limit: hal::MAX_COLOR_ATTACHMENTS,
+ limit: max_attachments,
},
));
}
@@ -2959,6 +3024,7 @@ impl<A: HalApi> Device<A> {
}
}
}
+
break None;
};
if let Some(e) = error {
@@ -2967,6 +3033,16 @@ impl<A: HalApi> Device<A> {
}
}
+ let limit = self.limits.max_color_attachment_bytes_per_sample;
+ let formats = color_targets
+ .iter()
+ .map(|cs| cs.as_ref().map(|cs| cs.format));
+ if let Err(total) = validate_color_attachment_bytes_per_sample(formats, limit) {
+ return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
+ command::ColorAttachmentError::TooManyBytesPerSample { total, limit },
+ ));
+ }
+
if let Some(ds) = depth_stencil_state {
let error = loop {
let format_features = self.describe_format_features(adapter, ds.format)?;
@@ -3051,6 +3127,7 @@ impl<A: HalApi> Device<A> {
};
let vertex_shader_module;
+ let vertex_entry_point_name;
let vertex_stage = {
let stage_desc = &desc.vertex.stage;
let stage = wgt::ShaderStages::VERTEX;
@@ -3065,27 +3142,37 @@ impl<A: HalApi> Device<A> {
return Err(DeviceError::WrongDevice.into());
}
+ let stage_err = |error| pipeline::CreateRenderPipelineError::Stage { stage, error };
+
+ vertex_entry_point_name = vertex_shader_module
+ .finalize_entry_point_name(
+ stage,
+ stage_desc.entry_point.as_ref().map(|ep| ep.as_ref()),
+ )
+ .map_err(stage_err)?;
+
if let Some(ref interface) = vertex_shader_module.interface {
io = interface
.check_stage(
&mut binding_layout_source,
&mut shader_binding_sizes,
- &stage_desc.entry_point,
+ &vertex_entry_point_name,
stage,
io,
desc.depth_stencil.as_ref().map(|d| d.depth_compare),
)
- .map_err(|error| pipeline::CreateRenderPipelineError::Stage { stage, error })?;
+ .map_err(stage_err)?;
validated_stages |= stage;
}
hal::ProgrammableStage {
module: vertex_shader_module.raw(),
- entry_point: stage_desc.entry_point.as_ref(),
+ entry_point: &vertex_entry_point_name,
}
};
let mut fragment_shader_module = None;
+ let fragment_entry_point_name;
let fragment_stage = match desc.fragment {
Some(ref fragment_state) => {
let stage = wgt::ShaderStages::FRAGMENT;
@@ -3099,28 +3186,38 @@ impl<A: HalApi> Device<A> {
})?,
);
+ let stage_err = |error| pipeline::CreateRenderPipelineError::Stage { stage, error };
+
+ fragment_entry_point_name = shader_module
+ .finalize_entry_point_name(
+ stage,
+ fragment_state
+ .stage
+ .entry_point
+ .as_ref()
+ .map(|ep| ep.as_ref()),
+ )
+ .map_err(stage_err)?;
+
if validated_stages == wgt::ShaderStages::VERTEX {
if let Some(ref interface) = shader_module.interface {
io = interface
.check_stage(
&mut binding_layout_source,
&mut shader_binding_sizes,
- &fragment_state.stage.entry_point,
+ &fragment_entry_point_name,
stage,
io,
desc.depth_stencil.as_ref().map(|d| d.depth_compare),
)
- .map_err(|error| pipeline::CreateRenderPipelineError::Stage {
- stage,
- error,
- })?;
+ .map_err(stage_err)?;
validated_stages |= stage;
}
}
if let Some(ref interface) = shader_module.interface {
shader_expects_dual_source_blending = interface
- .fragment_uses_dual_source_blending(&fragment_state.stage.entry_point)
+ .fragment_uses_dual_source_blending(&fragment_entry_point_name)
.map_err(|error| pipeline::CreateRenderPipelineError::Stage {
stage,
error,
@@ -3129,7 +3226,7 @@ impl<A: HalApi> Device<A> {
Some(hal::ProgrammableStage {
module: shader_module.raw(),
- entry_point: fragment_state.stage.entry_point.as_ref(),
+ entry_point: &fragment_entry_point_name,
})
}
None => None,
@@ -3302,7 +3399,10 @@ impl<A: HalApi> Device<A> {
strip_index_format: desc.primitive.strip_index_format,
vertex_steps,
late_sized_buffer_groups,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.render_pipelines.clone()),
+ ),
};
Ok(pipeline)
}
@@ -3415,7 +3515,7 @@ impl<A: HalApi> Device<A> {
Ok(QuerySet {
raw: Some(unsafe { self.raw().create_query_set(&hal_desc).unwrap() }),
device: self.clone(),
- info: ResourceInfo::new(""),
+ info: ResourceInfo::new("", Some(self.tracker_indices.query_sets.clone())),
desc: desc.map_label(|_| ()),
})
}
diff --git a/third_party/rust/wgpu-core/src/id.rs b/third_party/rust/wgpu-core/src/id.rs
index 1dbb491e60..72b74218d0 100644
--- a/third_party/rust/wgpu-core/src/id.rs
+++ b/third_party/rust/wgpu-core/src/id.rs
@@ -275,7 +275,7 @@ where
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- self.0.partial_cmp(&other.0)
+ Some(self.cmp(other))
}
}
diff --git a/third_party/rust/wgpu-core/src/identity.rs b/third_party/rust/wgpu-core/src/identity.rs
index 0e34055c74..d76d29341a 100644
--- a/third_party/rust/wgpu-core/src/identity.rs
+++ b/third_party/rust/wgpu-core/src/identity.rs
@@ -3,10 +3,17 @@ use wgt::Backend;
use crate::{
id::{Id, Marker},
- Epoch, FastHashMap, Index,
+ Epoch, Index,
};
use std::{fmt::Debug, marker::PhantomData};
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum IdSource {
+ External,
+ Allocated,
+ None,
+}
+
/// A simple structure to allocate [`Id`] identifiers.
///
/// Calling [`alloc`] returns a fresh, never-before-seen id. Calling [`free`]
@@ -34,12 +41,15 @@ use std::{fmt::Debug, marker::PhantomData};
/// [`Backend`]: wgt::Backend;
/// [`alloc`]: IdentityManager::alloc
/// [`free`]: IdentityManager::free
-#[derive(Debug, Default)]
+#[derive(Debug)]
pub(super) struct IdentityValues {
free: Vec<(Index, Epoch)>,
- //sorted by Index
- used: FastHashMap<Epoch, Vec<Index>>,
+ next_index: Index,
count: usize,
+ // Sanity check: The allocation logic works under the assumption that we don't
+ // do a mix of allocating ids from here and providing ids manually for the same
+ // storage container.
+ id_source: IdSource,
}
impl IdentityValues {
@@ -48,35 +58,41 @@ impl IdentityValues {
/// The backend is incorporated into the id, so that ids allocated with
/// different `backend` values are always distinct.
pub fn alloc<T: Marker>(&mut self, backend: Backend) -> Id<T> {
+ assert!(
+ self.id_source != IdSource::External,
+ "Mix of internally allocated and externally provided IDs"
+ );
+ self.id_source = IdSource::Allocated;
+
self.count += 1;
match self.free.pop() {
Some((index, epoch)) => Id::zip(index, epoch + 1, backend),
None => {
+ let index = self.next_index;
+ self.next_index += 1;
let epoch = 1;
- let used = self.used.entry(epoch).or_insert_with(Default::default);
- let index = if let Some(i) = used.iter().max_by_key(|v| *v) {
- i + 1
- } else {
- 0
- };
- used.push(index);
Id::zip(index, epoch, backend)
}
}
}
pub fn mark_as_used<T: Marker>(&mut self, id: Id<T>) -> Id<T> {
+ assert!(
+ self.id_source != IdSource::Allocated,
+ "Mix of internally allocated and externally provided IDs"
+ );
+ self.id_source = IdSource::External;
+
self.count += 1;
- let (index, epoch, _backend) = id.unzip();
- let used = self.used.entry(epoch).or_insert_with(Default::default);
- used.push(index);
id
}
/// Free `id`. It will never be returned from `alloc` again.
pub fn release<T: Marker>(&mut self, id: Id<T>) {
- let (index, epoch, _backend) = id.unzip();
- self.free.push((index, epoch));
+ if let IdSource::Allocated = self.id_source {
+ let (index, epoch, _backend) = id.unzip();
+ self.free.push((index, epoch));
+ }
self.count -= 1;
}
@@ -106,7 +122,12 @@ impl<T: Marker> IdentityManager<T> {
impl<T: Marker> IdentityManager<T> {
pub fn new() -> Self {
Self {
- values: Mutex::new(IdentityValues::default()),
+ values: Mutex::new(IdentityValues {
+ free: Vec::new(),
+ next_index: 0,
+ count: 0,
+ id_source: IdSource::None,
+ }),
_phantom: PhantomData,
}
}
@@ -115,15 +136,11 @@ impl<T: Marker> IdentityManager<T> {
#[test]
fn test_epoch_end_of_life() {
use crate::id;
-
let man = IdentityManager::<id::markers::Buffer>::new();
- let forced_id = man.mark_as_used(id::BufferId::zip(0, 1, Backend::Empty));
- assert_eq!(forced_id.unzip().0, 0);
let id1 = man.process(Backend::Empty);
- assert_eq!(id1.unzip().0, 1);
+ assert_eq!(id1.unzip(), (0, 1, Backend::Empty));
man.free(id1);
let id2 = man.process(Backend::Empty);
// confirm that the epoch 1 is no longer re-used
- assert_eq!(id2.unzip().0, 1);
- assert_eq!(id2.unzip().1, 2);
+ assert_eq!(id2.unzip(), (0, 2, Backend::Empty));
}
diff --git a/third_party/rust/wgpu-core/src/instance.rs b/third_party/rust/wgpu-core/src/instance.rs
index 582571c2b8..b909245fac 100644
--- a/third_party/rust/wgpu-core/src/instance.rs
+++ b/third_party/rust/wgpu-core/src/instance.rs
@@ -198,7 +198,7 @@ impl<A: HalApi> Adapter<A> {
Self {
raw,
- info: ResourceInfo::new("<Adapter>"),
+ info: ResourceInfo::new("<Adapter>", None),
}
}
@@ -303,7 +303,7 @@ impl<A: HalApi> Adapter<A> {
let queue = Queue {
device: None,
raw: Some(hal_device.queue),
- info: ResourceInfo::new("<Queue>"),
+ info: ResourceInfo::new("<Queue>", None),
};
return Ok((device, queue));
}
@@ -521,7 +521,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: hal_surface,
};
@@ -542,7 +542,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@@ -575,7 +575,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@@ -604,7 +604,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@@ -633,7 +633,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@@ -1072,10 +1072,10 @@ impl Global {
let device = hub.devices.get(device_id).unwrap();
queue.device = Some(device.clone());
- let (queue_id, _) = queue_fid.assign(queue);
+ let (queue_id, queue) = queue_fid.assign(queue);
resource_log!("Created Queue {:?}", queue_id);
- device.queue_id.write().replace(queue_id);
+ device.set_queue(queue);
return (device_id, queue_id, None);
};
@@ -1124,10 +1124,10 @@ impl Global {
let device = hub.devices.get(device_id).unwrap();
queue.device = Some(device.clone());
- let (queue_id, _) = queues_fid.assign(queue);
+ let (queue_id, queue) = queues_fid.assign(queue);
resource_log!("Created Queue {:?}", queue_id);
- device.queue_id.write().replace(queue_id);
+ device.set_queue(queue);
return (device_id, queue_id, None);
};
diff --git a/third_party/rust/wgpu-core/src/lib.rs b/third_party/rust/wgpu-core/src/lib.rs
index 9f6526fc11..5454f0d682 100644
--- a/third_party/rust/wgpu-core/src/lib.rs
+++ b/third_party/rust/wgpu-core/src/lib.rs
@@ -3,36 +3,8 @@
//! into other language-specific user-friendly libraries.
//!
//! ## Feature flags
-// NOTE: feature docs. below should be kept in sync. with `Cargo.toml`!
+#![doc = document_features::document_features!()]
//!
-//! - **`api_log_info`** --- Log all API entry points at info instead of trace level.
-//! - **`resource_log_info`** --- Log resource lifecycle management at info instead of trace level.
-//! - **`link`** _(enabled by default)_ --- Use static linking for libraries. Disable to manually
-//! link. Enabled by default.
-//! - **`renderdoc`** --- Support the Renderdoc graphics debugger:
-//! [https://renderdoc.org/](https://renderdoc.org/)
-//! - **`strict_asserts`** --- Apply run-time checks, even in release builds. These are in addition
-//! to the validation carried out at public APIs in all builds.
-//! - **`serde`** --- Enables serialization via `serde` on common wgpu types.
-//! - **`trace`** --- Enable API tracing.
-//! - **`replay`** --- Enable API replaying
-//! - **`wgsl`** --- Enable `ShaderModuleSource::Wgsl`
-//! - **`fragile-send-sync-non-atomic-wasm`** --- Implement `Send` and `Sync` on Wasm, but only if
-//! atomics are not enabled.
-//!
-//! WebGL/WebGPU objects can not be shared between threads. However, it can be useful to
-//! artificially mark them as `Send` and `Sync` anyways to make it easier to write cross-platform
-//! code. This is technically _very_ unsafe in a multithreaded environment, but on a wasm binary
-//! compiled without atomics we know we are definitely not in a multithreaded environment.
-//!
-//! ### Backends, passed through to wgpu-hal
-//!
-//! - **`metal`** --- Enable the `metal` backend.
-//! - **`vulkan`** --- Enable the `vulkan` backend.
-//! - **`gles`** --- Enable the `GLES` backend.
-//!
-//! This is used for all of GLES, OpenGL, and WebGL.
-//! - **`dx12`** --- Enable the `dx12` backend.
// When we have no backends, we end up with a lot of dead or otherwise unreachable code.
#![cfg_attr(
diff --git a/third_party/rust/wgpu-core/src/pipeline.rs b/third_party/rust/wgpu-core/src/pipeline.rs
index acc1b24b0c..4a7651b327 100644
--- a/third_party/rust/wgpu-core/src/pipeline.rs
+++ b/third_party/rust/wgpu-core/src/pipeline.rs
@@ -92,6 +92,19 @@ impl<A: HalApi> ShaderModule<A> {
pub(crate) fn raw(&self) -> &A::ShaderModule {
self.raw.as_ref().unwrap()
}
+
+ pub(crate) fn finalize_entry_point_name(
+ &self,
+ stage_bit: wgt::ShaderStages,
+ entry_point: Option<&str>,
+ ) -> Result<String, validation::StageError> {
+ match &self.interface {
+ Some(interface) => interface.finalize_entry_point_name(stage_bit, entry_point),
+ None => entry_point
+ .map(|ep| ep.to_string())
+ .ok_or(validation::StageError::NoEntryPointFound),
+ }
+ }
}
#[derive(Clone, Debug)]
@@ -213,9 +226,13 @@ impl CreateShaderModuleError {
pub struct ProgrammableStageDescriptor<'a> {
/// The compiled shader module for this stage.
pub module: ShaderModuleId,
- /// The name of the entry point in the compiled shader. There must be a function with this name
- /// in the shader.
- pub entry_point: Cow<'a, str>,
+ /// The name of the entry point in the compiled shader. The name is selected using the
+ /// following logic:
+ ///
+ /// * If `Some(name)` is specified, there must be a function with this name in the shader.
+ /// * If a single entry point associated with this stage must be in the shader, then proceed as
+ /// if `Some(…)` was specified with that entry point's name.
+ pub entry_point: Option<Cow<'a, str>>,
}
/// Number of implicit bind groups derived at pipeline creation.
diff --git a/third_party/rust/wgpu-core/src/present.rs b/third_party/rust/wgpu-core/src/present.rs
index 4d8e1df73e..cb4e17798f 100644
--- a/third_party/rust/wgpu-core/src/present.rs
+++ b/third_party/rust/wgpu-core/src/present.rs
@@ -9,10 +9,7 @@ When this texture is presented, we remove it from the device tracker as well as
extract it from the hub.
!*/
-use std::{
- borrow::Borrow,
- sync::atomic::{AtomicBool, Ordering},
-};
+use std::borrow::Borrow;
#[cfg(feature = "trace")]
use crate::device::trace::Action;
@@ -73,7 +70,7 @@ pub enum ConfigureSurfaceError {
PreviousOutputExists,
#[error("Both `Surface` width and height must be non-zero. Wait to recreate the `Surface` until the window has non-zero area.")]
ZeroArea,
- #[error("`Surface` width and height must be within the maximum supported texture size. Requested was ({width}, {height}), maximum extent is {max_texture_dimension_2d}.")]
+ #[error("`Surface` width and height must be within the maximum supported texture size. Requested was ({width}, {height}), maximum extent for either dimension is {max_texture_dimension_2d}.")]
TooLarge {
width: u32,
height: u32,
@@ -213,7 +210,6 @@ impl Global {
inner: Snatchable::new(resource::TextureInner::Surface {
raw: Some(ast.texture),
parent_id: surface_id,
- has_work: AtomicBool::new(false),
}),
device: device.clone(),
desc: texture_desc,
@@ -224,7 +220,10 @@ impl Global {
layers: 0..1,
mips: 0..1,
},
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new(
+ "<Surface Texture>",
+ Some(device.tracker_indices.textures.clone()),
+ ),
clear_mode: RwLock::new(resource::TextureClearMode::Surface {
clear_view: Some(clear_view),
}),
@@ -240,7 +239,7 @@ impl Global {
let mut trackers = device.trackers.lock();
trackers
.textures
- .insert_single(id, resource, hal::TextureUses::UNINITIALIZED);
+ .insert_single(resource, hal::TextureUses::UNINITIALIZED);
}
if present.acquired_texture.is_some() {
@@ -298,8 +297,7 @@ impl Global {
if !device.is_valid() {
return Err(DeviceError::Lost.into());
}
- let queue_id = device.queue_id.read().unwrap();
- let queue = hub.queues.get(queue_id).unwrap();
+ let queue = device.get_queue().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@@ -318,10 +316,13 @@ impl Global {
"Removing swapchain texture {:?} from the device tracker",
texture_id
);
- device.trackers.lock().textures.remove(texture_id);
-
let texture = hub.textures.unregister(texture_id);
if let Some(texture) = texture {
+ device
+ .trackers
+ .lock()
+ .textures
+ .remove(texture.info.tracker_index());
let mut exclusive_snatch_guard = device.snatchable_lock.write();
let suf = A::get_surface(&surface);
let mut inner = texture.inner_mut(&mut exclusive_snatch_guard);
@@ -331,15 +332,10 @@ impl Global {
resource::TextureInner::Surface {
ref mut raw,
ref parent_id,
- ref has_work,
} => {
if surface_id != *parent_id {
log::error!("Presented frame is from a different surface");
Err(hal::SurfaceError::Lost)
- } else if !has_work.load(Ordering::Relaxed) {
- log::error!("No work has been submitted for this frame");
- unsafe { suf.unwrap().discard_texture(raw.take().unwrap()) };
- Err(hal::SurfaceError::Outdated)
} else {
unsafe {
queue
@@ -413,18 +409,19 @@ impl Global {
"Removing swapchain texture {:?} from the device tracker",
texture_id
);
- device.trackers.lock().textures.remove(texture_id);
let texture = hub.textures.unregister(texture_id);
+
if let Some(texture) = texture {
+ device
+ .trackers
+ .lock()
+ .textures
+ .remove(texture.info.tracker_index());
let suf = A::get_surface(&surface);
let exclusive_snatch_guard = device.snatchable_lock.write();
match texture.inner.snatch(exclusive_snatch_guard).unwrap() {
- resource::TextureInner::Surface {
- mut raw,
- parent_id,
- has_work: _,
- } => {
+ resource::TextureInner::Surface { mut raw, parent_id } => {
if surface_id == parent_id {
unsafe { suf.unwrap().discard_texture(raw.take().unwrap()) };
} else {
diff --git a/third_party/rust/wgpu-core/src/registry.rs b/third_party/rust/wgpu-core/src/registry.rs
index f55809770b..80394351af 100644
--- a/third_party/rust/wgpu-core/src/registry.rs
+++ b/third_party/rust/wgpu-core/src/registry.rs
@@ -60,7 +60,6 @@ impl<T: Resource> Registry<T> {
#[must_use]
pub(crate) struct FutureId<'a, T: Resource> {
id: Id<T::Marker>,
- identity: Arc<IdentityManager<T::Marker>>,
data: &'a RwLock<Storage<T>>,
}
@@ -75,7 +74,7 @@ impl<T: Resource> FutureId<'_, T> {
}
pub fn init(&self, mut value: T) -> Arc<T> {
- value.as_info_mut().set_id(self.id, &self.identity);
+ value.as_info_mut().set_id(self.id);
Arc::new(value)
}
@@ -117,7 +116,6 @@ impl<T: Resource> Registry<T> {
}
None => self.identity.process(self.backend),
},
- identity: self.identity.clone(),
data: &self.storage,
}
}
@@ -125,7 +123,6 @@ impl<T: Resource> Registry<T> {
pub(crate) fn request(&self) -> FutureId<T> {
FutureId {
id: self.identity.process(self.backend),
- identity: self.identity.clone(),
data: &self.storage,
}
}
@@ -142,11 +139,12 @@ impl<T: Resource> Registry<T> {
self.storage.write()
}
pub fn unregister_locked(&self, id: Id<T::Marker>, storage: &mut Storage<T>) -> Option<Arc<T>> {
+ self.identity.free(id);
storage.remove(id)
}
pub fn force_replace(&self, id: Id<T::Marker>, mut value: T) {
let mut storage = self.storage.write();
- value.as_info_mut().set_id(id, &self.identity);
+ value.as_info_mut().set_id(id);
storage.force_replace(id, value)
}
pub fn force_replace_with_error(&self, id: Id<T::Marker>, label: &str) {
@@ -155,6 +153,7 @@ impl<T: Resource> Registry<T> {
storage.insert_error(id, label);
}
pub(crate) fn unregister(&self, id: Id<T::Marker>) -> Option<Arc<T>> {
+ self.identity.free(id);
let value = self.storage.write().remove(id);
//Returning None is legal if it's an error ID
value
diff --git a/third_party/rust/wgpu-core/src/resource.rs b/third_party/rust/wgpu-core/src/resource.rs
index de5d1868a3..aca077caab 100644
--- a/third_party/rust/wgpu-core/src/resource.rs
+++ b/third_party/rust/wgpu-core/src/resource.rs
@@ -9,11 +9,10 @@ use crate::{
global::Global,
hal_api::HalApi,
id::{AdapterId, BufferId, DeviceId, Id, Marker, SurfaceId, TextureId},
- identity::IdentityManager,
init_tracker::{BufferInitTracker, TextureInitTracker},
resource, resource_log,
snatch::{ExclusiveSnatchGuard, SnatchGuard, Snatchable},
- track::TextureSelector,
+ track::{SharedTrackerIndexAllocator, TextureSelector, TrackerIndex},
validation::MissingBufferUsageError,
Label, SubmissionIndex,
};
@@ -31,7 +30,7 @@ use std::{
ops::Range,
ptr::NonNull,
sync::{
- atomic::{AtomicBool, AtomicUsize, Ordering},
+ atomic::{AtomicUsize, Ordering},
Arc, Weak,
},
};
@@ -58,7 +57,8 @@ use std::{
#[derive(Debug)]
pub struct ResourceInfo<T: Resource> {
id: Option<Id<T::Marker>>,
- identity: Option<Arc<IdentityManager<T::Marker>>>,
+ tracker_index: TrackerIndex,
+ tracker_indices: Option<Arc<SharedTrackerIndexAllocator>>,
/// The index of the last queue submission in which the resource
/// was used.
///
@@ -74,19 +74,26 @@ pub struct ResourceInfo<T: Resource> {
impl<T: Resource> Drop for ResourceInfo<T> {
fn drop(&mut self) {
- if let Some(identity) = self.identity.as_ref() {
- let id = self.id.as_ref().unwrap();
- identity.free(*id);
+ if let Some(indices) = &self.tracker_indices {
+ indices.free(self.tracker_index);
}
}
}
impl<T: Resource> ResourceInfo<T> {
#[allow(unused_variables)]
- pub(crate) fn new(label: &str) -> Self {
+ pub(crate) fn new(
+ label: &str,
+ tracker_indices: Option<Arc<SharedTrackerIndexAllocator>>,
+ ) -> Self {
+ let tracker_index = tracker_indices
+ .as_ref()
+ .map(|indices| indices.alloc())
+ .unwrap_or(TrackerIndex::INVALID);
Self {
id: None,
- identity: None,
+ tracker_index,
+ tracker_indices,
submission_index: AtomicUsize::new(0),
label: label.to_string(),
}
@@ -111,9 +118,13 @@ impl<T: Resource> ResourceInfo<T> {
self.id.unwrap()
}
- pub(crate) fn set_id(&mut self, id: Id<T::Marker>, identity: &Arc<IdentityManager<T::Marker>>) {
+ pub(crate) fn tracker_index(&self) -> TrackerIndex {
+ debug_assert!(self.tracker_index != TrackerIndex::INVALID);
+ self.tracker_index
+ }
+
+ pub(crate) fn set_id(&mut self, id: Id<T::Marker>) {
self.id = Some(id);
- self.identity = Some(identity.clone());
}
/// Record that this resource will be used by the queue submission with the
@@ -551,6 +562,7 @@ impl<A: HalApi> Buffer<A> {
device: Arc::clone(&self.device),
submission_index: self.info.submission_index(),
id: self.info.id.unwrap(),
+ tracker_index: self.info.tracker_index(),
label: self.info.label.clone(),
bind_groups,
}))
@@ -611,6 +623,7 @@ pub struct DestroyedBuffer<A: HalApi> {
device: Arc<Device<A>>,
label: String,
pub(crate) id: BufferId,
+ pub(crate) tracker_index: TrackerIndex,
pub(crate) submission_index: u64,
bind_groups: Vec<Weak<BindGroup<A>>>,
}
@@ -717,7 +730,6 @@ pub(crate) enum TextureInner<A: HalApi> {
Surface {
raw: Option<A::SurfaceTexture>,
parent_id: SurfaceId,
- has_work: AtomicBool,
},
}
@@ -886,6 +898,7 @@ impl<A: HalApi> Texture<A> {
views,
bind_groups,
device: Arc::clone(&self.device),
+ tracker_index: self.info.tracker_index(),
submission_index: self.info.submission_index(),
id: self.info.id.unwrap(),
label: self.info.label.clone(),
@@ -1003,6 +1016,7 @@ pub struct DestroyedTexture<A: HalApi> {
device: Arc<Device<A>>,
label: String,
pub(crate) id: TextureId,
+ pub(crate) tracker_index: TrackerIndex,
pub(crate) submission_index: u64,
}
diff --git a/third_party/rust/wgpu-core/src/track/buffer.rs b/third_party/rust/wgpu-core/src/track/buffer.rs
index 323d2dab9d..a30ac2a225 100644
--- a/third_party/rust/wgpu-core/src/track/buffer.rs
+++ b/third_party/rust/wgpu-core/src/track/buffer.rs
@@ -7,7 +7,7 @@
use std::{borrow::Cow, marker::PhantomData, sync::Arc};
-use super::{PendingTransition, ResourceTracker};
+use super::{PendingTransition, ResourceTracker, TrackerIndex};
use crate::{
hal_api::HalApi,
id::BufferId,
@@ -64,16 +64,16 @@ impl<A: HalApi> BufferBindGroupState<A> {
#[allow(clippy::pattern_type_mismatch)]
pub(crate) fn optimize(&self) {
let mut buffers = self.buffers.lock();
- buffers.sort_unstable_by_key(|(b, _)| b.as_info().id().unzip().0);
+ buffers.sort_unstable_by_key(|(b, _)| b.as_info().tracker_index());
}
/// Returns a list of all buffers tracked. May contain duplicates.
#[allow(clippy::pattern_type_mismatch)]
- pub fn used_ids(&self) -> impl Iterator<Item = BufferId> + '_ {
+ pub fn used_tracker_indices(&self) -> impl Iterator<Item = TrackerIndex> + '_ {
let buffers = self.buffers.lock();
buffers
.iter()
- .map(|(ref b, _)| b.as_info().id())
+ .map(|(ref b, _)| b.as_info().tracker_index())
.collect::<Vec<_>>()
.into_iter()
}
@@ -149,20 +149,6 @@ impl<A: HalApi> BufferUsageScope<A> {
resources.into_iter()
}
- pub fn get(&self, id: BufferId) -> Option<&Arc<Buffer<A>>> {
- let index = id.unzip().0 as usize;
- if index > self.metadata.size() {
- return None;
- }
- self.tracker_assert_in_bounds(index);
- unsafe {
- if self.metadata.contains_unchecked(index) {
- return Some(self.metadata.get_resource_unchecked(index));
- }
- }
- None
- }
-
/// Merge the list of buffer states in the given bind group into this usage scope.
///
/// If any of the resulting states is invalid, stops the merge and returns a usage
@@ -181,7 +167,7 @@ impl<A: HalApi> BufferUsageScope<A> {
) -> Result<(), UsageConflict> {
let buffers = bind_group.buffers.lock();
for &(ref resource, state) in &*buffers {
- let index = resource.as_info().id().unzip().0 as usize;
+ let index = resource.as_info().tracker_index().as_usize();
unsafe {
insert_or_merge(
@@ -255,7 +241,7 @@ impl<A: HalApi> BufferUsageScope<A> {
.get(id)
.map_err(|_| UsageConflict::BufferInvalid { id })?;
- let index = id.unzip().0 as usize;
+ let index = buffer.info.tracker_index().as_usize();
self.allow_index(index);
@@ -292,7 +278,7 @@ pub(crate) struct BufferTracker<A: HalApi> {
temp: Vec<PendingTransition<BufferUses>>,
}
-impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
+impl<A: HalApi> ResourceTracker for BufferTracker<A> {
/// Try to remove the buffer `id` from this tracker if it is otherwise unused.
///
/// A buffer is 'otherwise unused' when the only references to it are:
@@ -313,8 +299,8 @@ impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
/// [`Device::trackers`]: crate::device::Device
/// [`self.metadata`]: BufferTracker::metadata
/// [`Hub::buffers`]: crate::hub::Hub::buffers
- fn remove_abandoned(&mut self, id: BufferId) -> bool {
- let index = id.unzip().0 as usize;
+ fn remove_abandoned(&mut self, index: TrackerIndex) -> bool {
+ let index = index.as_usize();
if index > self.metadata.size() {
return false;
@@ -329,16 +315,10 @@ impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
//so it's already been released from user and so it's not inside Registry\Storage
if existing_ref_count <= 2 {
self.metadata.remove(index);
- log::trace!("Buffer {:?} is not tracked anymore", id,);
return true;
- } else {
- log::trace!(
- "Buffer {:?} is still referenced from {}",
- id,
- existing_ref_count
- );
- return false;
}
+
+ return false;
}
}
true
@@ -404,8 +384,8 @@ impl<A: HalApi> BufferTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
- pub fn insert_single(&mut self, id: BufferId, resource: Arc<Buffer<A>>, state: BufferUses) {
- let index = id.unzip().0 as usize;
+ pub fn insert_single(&mut self, resource: Arc<Buffer<A>>, state: BufferUses) {
+ let index = resource.info.tracker_index().as_usize();
self.allow_index(index);
@@ -440,7 +420,7 @@ impl<A: HalApi> BufferTracker<A> {
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn set_single(&mut self, buffer: &Arc<Buffer<A>>, state: BufferUses) -> SetSingleResult<A> {
- let index: usize = buffer.as_info().id().unzip().0 as usize;
+ let index: usize = buffer.as_info().tracker_index().as_usize();
self.allow_index(index);
@@ -561,16 +541,15 @@ impl<A: HalApi> BufferTracker<A> {
pub unsafe fn set_and_remove_from_usage_scope_sparse(
&mut self,
scope: &mut BufferUsageScope<A>,
- id_source: impl IntoIterator<Item = BufferId>,
+ index_source: impl IntoIterator<Item = TrackerIndex>,
) {
let incoming_size = scope.state.len();
if incoming_size > self.start.len() {
self.set_size(incoming_size);
}
- for id in id_source {
- let (index32, _, _) = id.unzip();
- let index = index32 as usize;
+ for index in index_source {
+ let index = index.as_usize();
scope.tracker_assert_in_bounds(index);
@@ -599,8 +578,8 @@ impl<A: HalApi> BufferTracker<A> {
}
#[allow(dead_code)]
- pub fn get(&self, id: BufferId) -> Option<&Arc<Buffer<A>>> {
- let index = id.unzip().0 as usize;
+ pub fn get(&self, index: TrackerIndex) -> Option<&Arc<Buffer<A>>> {
+ let index = index.as_usize();
if index > self.metadata.size() {
return None;
}
@@ -785,11 +764,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_buffer(
- BufferId::zip(
- index32,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
*current_state,
new_state,
));
diff --git a/third_party/rust/wgpu-core/src/track/metadata.rs b/third_party/rust/wgpu-core/src/track/metadata.rs
index e5f4d5e969..744783a7fa 100644
--- a/third_party/rust/wgpu-core/src/track/metadata.rs
+++ b/third_party/rust/wgpu-core/src/track/metadata.rs
@@ -1,6 +1,6 @@
//! The `ResourceMetadata` type.
-use crate::{resource::Resource, Epoch};
+use crate::resource::Resource;
use bit_vec::BitVec;
use std::{borrow::Cow, mem, sync::Arc};
use wgt::strict_assert;
@@ -194,15 +194,6 @@ impl<T: Resource> ResourceMetadataProvider<'_, T> {
}
}
}
- /// Get the epoch from this.
- ///
- /// # Safety
- ///
- /// - The index must be in bounds of the metadata tracker if this uses an indirect source.
- #[inline(always)]
- pub(super) unsafe fn get_epoch(self, index: usize) -> Epoch {
- unsafe { self.get_own(index).as_info().id().unzip().1 }
- }
}
/// Resizes the given bitvec to the given size. I'm not sure why this is hard to do but it is.
diff --git a/third_party/rust/wgpu-core/src/track/mod.rs b/third_party/rust/wgpu-core/src/track/mod.rs
index a36280d03b..9ca37ebadc 100644
--- a/third_party/rust/wgpu-core/src/track/mod.rs
+++ b/third_party/rust/wgpu-core/src/track/mod.rs
@@ -102,16 +102,11 @@ mod stateless;
mod texture;
use crate::{
- binding_model, command, conv,
- hal_api::HalApi,
- id::{self, Id},
- pipeline, resource,
- snatch::SnatchGuard,
- storage::Storage,
+ binding_model, command, conv, hal_api::HalApi, id, pipeline, resource, snatch::SnatchGuard,
};
-use parking_lot::RwLock;
-use std::{fmt, ops};
+use parking_lot::{Mutex, RwLock};
+use std::{fmt, ops, sync::Arc};
use thiserror::Error;
pub(crate) use buffer::{BufferBindGroupState, BufferTracker, BufferUsageScope};
@@ -122,6 +117,130 @@ pub(crate) use texture::{
};
use wgt::strict_assert_ne;
+#[repr(transparent)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub(crate) struct TrackerIndex(u32);
+
+impl TrackerIndex {
+ /// A dummy value to place in ResourceInfo for resources that are never tracked.
+ pub const INVALID: Self = TrackerIndex(u32::MAX);
+
+ pub fn as_usize(self) -> usize {
+ debug_assert!(self != Self::INVALID);
+ self.0 as usize
+ }
+}
+
+/// wgpu-core internally use some array-like storage for tracking resources.
+/// To that end, there needs to be a uniquely assigned index for each live resource
+/// of a certain type. This index is separate from the resource ID for various reasons:
+/// - There can be multiple resource IDs pointing the the same resource.
+/// - IDs of dead handles can be recycled while resources are internally held alive (and tracked).
+/// - The plan is to remove IDs in the long run (https://github.com/gfx-rs/wgpu/issues/5121).
+/// In order to produce these tracker indices, there is a shared TrackerIndexAllocator
+/// per resource type. Indices have the same lifetime as the internal resource they
+/// are associated to (alloc happens when creating the resource and free is called when
+/// the resource is dropped).
+struct TrackerIndexAllocator {
+ unused: Vec<TrackerIndex>,
+ next_index: TrackerIndex,
+}
+
+impl TrackerIndexAllocator {
+ pub fn new() -> Self {
+ TrackerIndexAllocator {
+ unused: Vec::new(),
+ next_index: TrackerIndex(0),
+ }
+ }
+
+ pub fn alloc(&mut self) -> TrackerIndex {
+ if let Some(index) = self.unused.pop() {
+ return index;
+ }
+
+ let index = self.next_index;
+ self.next_index.0 += 1;
+
+ index
+ }
+
+ pub fn free(&mut self, index: TrackerIndex) {
+ self.unused.push(index);
+ }
+
+ // This is used to pre-allocate the tracker storage.
+ pub fn size(&self) -> usize {
+ self.next_index.0 as usize
+ }
+}
+
+impl std::fmt::Debug for TrackerIndexAllocator {
+ fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
+ Ok(())
+ }
+}
+
+/// See TrackerIndexAllocator.
+#[derive(Debug)]
+pub(crate) struct SharedTrackerIndexAllocator {
+ inner: Mutex<TrackerIndexAllocator>,
+}
+
+impl SharedTrackerIndexAllocator {
+ pub fn new() -> Self {
+ SharedTrackerIndexAllocator {
+ inner: Mutex::new(TrackerIndexAllocator::new()),
+ }
+ }
+
+ pub fn alloc(&self) -> TrackerIndex {
+ self.inner.lock().alloc()
+ }
+
+ pub fn free(&self, index: TrackerIndex) {
+ self.inner.lock().free(index);
+ }
+
+ pub fn size(&self) -> usize {
+ self.inner.lock().size()
+ }
+}
+
+pub(crate) struct TrackerIndexAllocators {
+ pub buffers: Arc<SharedTrackerIndexAllocator>,
+ pub staging_buffers: Arc<SharedTrackerIndexAllocator>,
+ pub textures: Arc<SharedTrackerIndexAllocator>,
+ pub texture_views: Arc<SharedTrackerIndexAllocator>,
+ pub samplers: Arc<SharedTrackerIndexAllocator>,
+ pub bind_groups: Arc<SharedTrackerIndexAllocator>,
+ pub bind_group_layouts: Arc<SharedTrackerIndexAllocator>,
+ pub compute_pipelines: Arc<SharedTrackerIndexAllocator>,
+ pub render_pipelines: Arc<SharedTrackerIndexAllocator>,
+ pub pipeline_layouts: Arc<SharedTrackerIndexAllocator>,
+ pub bundles: Arc<SharedTrackerIndexAllocator>,
+ pub query_sets: Arc<SharedTrackerIndexAllocator>,
+}
+
+impl TrackerIndexAllocators {
+ pub fn new() -> Self {
+ TrackerIndexAllocators {
+ buffers: Arc::new(SharedTrackerIndexAllocator::new()),
+ staging_buffers: Arc::new(SharedTrackerIndexAllocator::new()),
+ textures: Arc::new(SharedTrackerIndexAllocator::new()),
+ texture_views: Arc::new(SharedTrackerIndexAllocator::new()),
+ samplers: Arc::new(SharedTrackerIndexAllocator::new()),
+ bind_groups: Arc::new(SharedTrackerIndexAllocator::new()),
+ bind_group_layouts: Arc::new(SharedTrackerIndexAllocator::new()),
+ compute_pipelines: Arc::new(SharedTrackerIndexAllocator::new()),
+ render_pipelines: Arc::new(SharedTrackerIndexAllocator::new()),
+ pipeline_layouts: Arc::new(SharedTrackerIndexAllocator::new()),
+ bundles: Arc::new(SharedTrackerIndexAllocator::new()),
+ query_sets: Arc::new(SharedTrackerIndexAllocator::new()),
+ }
+ }
+}
+
/// A structure containing all the information about a particular resource
/// transition. User code should be able to generate a pipeline barrier
/// based on the contents.
@@ -359,31 +478,14 @@ pub(crate) struct RenderBundleScope<A: HalApi> {
impl<A: HalApi> RenderBundleScope<A> {
/// Create the render bundle scope and pull the maximum IDs from the hubs.
- pub fn new(
- buffers: &Storage<resource::Buffer<A>>,
- textures: &Storage<resource::Texture<A>>,
- bind_groups: &Storage<binding_model::BindGroup<A>>,
- render_pipelines: &Storage<pipeline::RenderPipeline<A>>,
- query_sets: &Storage<resource::QuerySet<A>>,
- ) -> Self {
- let value = Self {
+ pub fn new() -> Self {
+ Self {
buffers: RwLock::new(BufferUsageScope::new()),
textures: RwLock::new(TextureUsageScope::new()),
bind_groups: RwLock::new(StatelessTracker::new()),
render_pipelines: RwLock::new(StatelessTracker::new()),
query_sets: RwLock::new(StatelessTracker::new()),
- };
-
- value.buffers.write().set_size(buffers.len());
- value.textures.write().set_size(textures.len());
- value.bind_groups.write().set_size(bind_groups.len());
- value
- .render_pipelines
- .write()
- .set_size(render_pipelines.len());
- value.query_sets.write().set_size(query_sets.len());
-
- value
+ }
}
/// Merge the inner contents of a bind group into the render bundle tracker.
@@ -420,17 +522,14 @@ pub(crate) struct UsageScope<A: HalApi> {
impl<A: HalApi> UsageScope<A> {
/// Create the render bundle scope and pull the maximum IDs from the hubs.
- pub fn new(
- buffers: &Storage<resource::Buffer<A>>,
- textures: &Storage<resource::Texture<A>>,
- ) -> Self {
+ pub fn new(tracker_indices: &TrackerIndexAllocators) -> Self {
let mut value = Self {
buffers: BufferUsageScope::new(),
textures: TextureUsageScope::new(),
};
- value.buffers.set_size(buffers.len());
- value.textures.set_size(textures.len());
+ value.buffers.set_size(tracker_indices.buffers.size());
+ value.textures.set_size(tracker_indices.textures.size());
value
}
@@ -478,11 +577,8 @@ impl<A: HalApi> UsageScope<A> {
}
}
-pub(crate) trait ResourceTracker<R>
-where
- R: resource::Resource,
-{
- fn remove_abandoned(&mut self, id: Id<R::Marker>) -> bool;
+pub(crate) trait ResourceTracker {
+ fn remove_abandoned(&mut self, index: TrackerIndex) -> bool;
}
/// A full double sided tracker used by CommandBuffers and the Device.
@@ -513,48 +609,6 @@ impl<A: HalApi> Tracker<A> {
}
}
- /// Pull the maximum IDs from the hubs.
- pub fn set_size(
- &mut self,
- buffers: Option<&Storage<resource::Buffer<A>>>,
- textures: Option<&Storage<resource::Texture<A>>>,
- views: Option<&Storage<resource::TextureView<A>>>,
- samplers: Option<&Storage<resource::Sampler<A>>>,
- bind_groups: Option<&Storage<binding_model::BindGroup<A>>>,
- compute_pipelines: Option<&Storage<pipeline::ComputePipeline<A>>>,
- render_pipelines: Option<&Storage<pipeline::RenderPipeline<A>>>,
- bundles: Option<&Storage<command::RenderBundle<A>>>,
- query_sets: Option<&Storage<resource::QuerySet<A>>>,
- ) {
- if let Some(buffers) = buffers {
- self.buffers.set_size(buffers.len());
- };
- if let Some(textures) = textures {
- self.textures.set_size(textures.len());
- };
- if let Some(views) = views {
- self.views.set_size(views.len());
- };
- if let Some(samplers) = samplers {
- self.samplers.set_size(samplers.len());
- };
- if let Some(bind_groups) = bind_groups {
- self.bind_groups.set_size(bind_groups.len());
- };
- if let Some(compute_pipelines) = compute_pipelines {
- self.compute_pipelines.set_size(compute_pipelines.len());
- }
- if let Some(render_pipelines) = render_pipelines {
- self.render_pipelines.set_size(render_pipelines.len());
- };
- if let Some(bundles) = bundles {
- self.bundles.set_size(bundles.len());
- };
- if let Some(query_sets) = query_sets {
- self.query_sets.set_size(query_sets.len());
- };
- }
-
/// Iterates through all resources in the given bind group and adopts
/// the state given for those resources in the UsageScope. It also
/// removes all touched resources from the usage scope.
@@ -585,7 +639,7 @@ impl<A: HalApi> Tracker<A> {
unsafe {
self.buffers.set_and_remove_from_usage_scope_sparse(
&mut scope.buffers,
- bind_group.buffers.used_ids(),
+ bind_group.buffers.used_tracker_indices(),
)
};
unsafe {
diff --git a/third_party/rust/wgpu-core/src/track/stateless.rs b/third_party/rust/wgpu-core/src/track/stateless.rs
index 4111a90f79..00225f2305 100644
--- a/third_party/rust/wgpu-core/src/track/stateless.rs
+++ b/third_party/rust/wgpu-core/src/track/stateless.rs
@@ -10,7 +10,7 @@ use parking_lot::Mutex;
use crate::{id::Id, resource::Resource, resource_log, storage::Storage, track::ResourceMetadata};
-use super::ResourceTracker;
+use super::{ResourceTracker, TrackerIndex};
/// Satisfy clippy.
type Pair<T> = (Id<<T as Resource>::Marker>, Arc<T>);
@@ -74,7 +74,7 @@ pub(crate) struct StatelessTracker<T: Resource> {
metadata: ResourceMetadata<T>,
}
-impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
+impl<T: Resource> ResourceTracker for StatelessTracker<T> {
/// Try to remove the given resource from the tracker iff we have the last reference to the
/// resource and the epoch matches.
///
@@ -82,14 +82,14 @@ impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
- fn remove_abandoned(&mut self, id: Id<T::Marker>) -> bool {
- let index = id.unzip().0 as usize;
+ fn remove_abandoned(&mut self, index: TrackerIndex) -> bool {
+ let index = index.as_usize();
if index >= self.metadata.size() {
return false;
}
- resource_log!("StatelessTracker::remove_abandoned {id:?}");
+ resource_log!("StatelessTracker::remove_abandoned {index:?}");
self.tracker_assert_in_bounds(index);
@@ -100,17 +100,10 @@ impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
//so it's already been released from user and so it's not inside Registry\Storage
if existing_ref_count <= 2 {
self.metadata.remove(index);
- log::trace!("{} {:?} is not tracked anymore", T::TYPE, id,);
return true;
- } else {
- log::trace!(
- "{} {:?} is still referenced from {}",
- T::TYPE,
- id,
- existing_ref_count
- );
- return false;
}
+
+ return false;
}
}
true
@@ -160,9 +153,8 @@ impl<T: Resource> StatelessTracker<T> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
- pub fn insert_single(&mut self, id: Id<T::Marker>, resource: Arc<T>) {
- let (index32, _epoch, _) = id.unzip();
- let index = index32 as usize;
+ pub fn insert_single(&mut self, resource: Arc<T>) {
+ let index = resource.as_info().tracker_index().as_usize();
self.allow_index(index);
@@ -184,8 +176,7 @@ impl<T: Resource> StatelessTracker<T> {
) -> Option<&'a Arc<T>> {
let resource = storage.get(id).ok()?;
- let (index32, _epoch, _) = id.unzip();
- let index = index32 as usize;
+ let index = resource.as_info().tracker_index().as_usize();
self.allow_index(index);
@@ -221,18 +212,4 @@ impl<T: Resource> StatelessTracker<T> {
}
}
}
-
- pub fn get(&self, id: Id<T::Marker>) -> Option<&Arc<T>> {
- let index = id.unzip().0 as usize;
- if index > self.metadata.size() {
- return None;
- }
- self.tracker_assert_in_bounds(index);
- unsafe {
- if self.metadata.contains_unchecked(index) {
- return Some(self.metadata.get_resource_unchecked(index));
- }
- }
- None
- }
}
diff --git a/third_party/rust/wgpu-core/src/track/texture.rs b/third_party/rust/wgpu-core/src/track/texture.rs
index 601df11e1b..e7c4707c93 100644
--- a/third_party/rust/wgpu-core/src/track/texture.rs
+++ b/third_party/rust/wgpu-core/src/track/texture.rs
@@ -19,10 +19,11 @@
* will treat the contents as junk.
!*/
-use super::{range::RangedStates, PendingTransition, PendingTransitionList, ResourceTracker};
+use super::{
+ range::RangedStates, PendingTransition, PendingTransitionList, ResourceTracker, TrackerIndex,
+};
use crate::{
hal_api::HalApi,
- id::TextureId,
resource::{Resource, Texture, TextureInner},
snatch::SnatchGuard,
track::{
@@ -173,7 +174,7 @@ impl<A: HalApi> TextureBindGroupState<A> {
/// accesses will be in a constant ascending order.
pub(crate) fn optimize(&self) {
let mut textures = self.textures.lock();
- textures.sort_unstable_by_key(|v| v.texture.as_info().id().unzip().0);
+ textures.sort_unstable_by_key(|v| v.texture.as_info().tracker_index());
}
/// Returns a list of all textures tracked. May contain duplicates.
@@ -359,7 +360,7 @@ impl<A: HalApi> TextureUsageScope<A> {
selector: Option<TextureSelector>,
new_state: TextureUses,
) -> Result<(), UsageConflict> {
- let index = texture.as_info().id().unzip().0 as usize;
+ let index = texture.as_info().tracker_index().as_usize();
self.tracker_assert_in_bounds(index);
@@ -393,7 +394,7 @@ pub(crate) struct TextureTracker<A: HalApi> {
_phantom: PhantomData<A>,
}
-impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
+impl<A: HalApi> ResourceTracker for TextureTracker<A> {
/// Try to remove the given resource from the tracker iff we have the last reference to the
/// resource and the epoch matches.
///
@@ -401,10 +402,10 @@ impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
- fn remove_abandoned(&mut self, id: TextureId) -> bool {
- let index = id.unzip().0 as usize;
+ fn remove_abandoned(&mut self, index: TrackerIndex) -> bool {
+ let index = index.as_usize();
- if index > self.metadata.size() {
+ if index >= self.metadata.size() {
return false;
}
@@ -419,16 +420,10 @@ impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
self.start_set.complex.remove(&index);
self.end_set.complex.remove(&index);
self.metadata.remove(index);
- log::trace!("Texture {:?} is not tracked anymore", id,);
return true;
- } else {
- log::trace!(
- "Texture {:?} is still referenced from {}",
- id,
- existing_ref_count
- );
- return false;
}
+
+ return false;
}
}
true
@@ -518,8 +513,8 @@ impl<A: HalApi> TextureTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
- pub fn insert_single(&mut self, id: TextureId, resource: Arc<Texture<A>>, usage: TextureUses) {
- let index = id.unzip().0 as usize;
+ pub fn insert_single(&mut self, resource: Arc<Texture<A>>, usage: TextureUses) {
+ let index = resource.info.tracker_index().as_usize();
self.allow_index(index);
@@ -560,7 +555,7 @@ impl<A: HalApi> TextureTracker<A> {
selector: TextureSelector,
new_state: TextureUses,
) -> Option<Drain<'_, PendingTransition<TextureUses>>> {
- let index = texture.as_info().id().unzip().0 as usize;
+ let index = texture.as_info().tracker_index().as_usize();
self.allow_index(index);
@@ -694,7 +689,7 @@ impl<A: HalApi> TextureTracker<A> {
let textures = bind_group_state.textures.lock();
for t in textures.iter() {
- let index = t.texture.as_info().id().unzip().0 as usize;
+ let index = t.texture.as_info().tracker_index().as_usize();
scope.tracker_assert_in_bounds(index);
if unsafe { !scope.metadata.contains_unchecked(index) } {
@@ -727,10 +722,10 @@ impl<A: HalApi> TextureTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
- pub fn remove(&mut self, id: TextureId) -> bool {
- let index = id.unzip().0 as usize;
+ pub fn remove(&mut self, index: TrackerIndex) -> bool {
+ let index = index.as_usize();
- if index > self.metadata.size() {
+ if index >= self.metadata.size() {
return false;
}
@@ -1080,11 +1075,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
- TextureId::zip(
- index as _,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
texture_selector.clone(),
*current_simple,
new_simple,
@@ -1111,11 +1102,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
- TextureId::zip(
- index as _,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
selector,
*current_simple,
new_state,
@@ -1156,11 +1143,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
- TextureId::zip(
- index as _,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
TextureSelector {
mips: mip_id..mip_id + 1,
layers: layers.clone(),
@@ -1201,11 +1184,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
- TextureId::zip(
- index as _,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
TextureSelector {
mips: mip_id..mip_id + 1,
layers: layers.clone(),
diff --git a/third_party/rust/wgpu-core/src/validation.rs b/third_party/rust/wgpu-core/src/validation.rs
index a0947ae83f..e4846c4000 100644
--- a/third_party/rust/wgpu-core/src/validation.rs
+++ b/third_party/rust/wgpu-core/src/validation.rs
@@ -1,4 +1,8 @@
-use crate::{device::bgl, FastHashMap, FastHashSet};
+use crate::{
+ device::bgl,
+ id::{markers::Buffer, Id},
+ FastHashMap, FastHashSet,
+};
use arrayvec::ArrayVec;
use std::{collections::hash_map::Entry, fmt};
use thiserror::Error;
@@ -134,8 +138,11 @@ pub struct Interface {
}
#[derive(Clone, Debug, Error)]
-#[error("Buffer usage is {actual:?} which does not contain required usage {expected:?}")]
+#[error(
+ "Usage flags {actual:?} for buffer {id:?} do not contain required usage flags {expected:?}"
+)]
pub struct MissingBufferUsageError {
+ pub(crate) id: Id<Buffer>,
pub(crate) actual: wgt::BufferUsages,
pub(crate) expected: wgt::BufferUsages,
}
@@ -143,11 +150,16 @@ pub struct MissingBufferUsageError {
/// Checks that the given buffer usage contains the required buffer usage,
/// returns an error otherwise.
pub fn check_buffer_usage(
+ id: Id<Buffer>,
actual: wgt::BufferUsages,
expected: wgt::BufferUsages,
) -> Result<(), MissingBufferUsageError> {
if !actual.contains(expected) {
- Err(MissingBufferUsageError { actual, expected })
+ Err(MissingBufferUsageError {
+ id,
+ actual,
+ expected,
+ })
} else {
Ok(())
}
@@ -271,6 +283,16 @@ pub enum StageError {
},
#[error("Location[{location}] is provided by the previous stage output but is not consumed as input by this stage.")]
InputNotConsumed { location: wgt::ShaderLocation },
+ #[error(
+ "Unable to select an entry point: no entry point was found in the provided shader module"
+ )]
+ NoEntryPointFound,
+ #[error(
+ "Unable to select an entry point: \
+ multiple entry points were found in the provided shader module, \
+ but no entry point was specified"
+ )]
+ MultipleEntryPointsFound,
}
fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option<naga::StorageFormat> {
@@ -892,9 +914,15 @@ impl Interface {
class,
},
naga::TypeInner::Sampler { comparison } => ResourceType::Sampler { comparison },
- naga::TypeInner::Array { stride, .. } => ResourceType::Buffer {
- size: wgt::BufferSize::new(stride as u64).unwrap(),
- },
+ naga::TypeInner::Array { stride, size, .. } => {
+ let size = match size {
+ naga::ArraySize::Constant(size) => size.get() * stride,
+ naga::ArraySize::Dynamic => stride,
+ };
+ ResourceType::Buffer {
+ size: wgt::BufferSize::new(size as u64).unwrap(),
+ }
+ }
ref other => ResourceType::Buffer {
size: wgt::BufferSize::new(other.size(module.to_ctx()) as u64).unwrap(),
},
@@ -953,6 +981,37 @@ impl Interface {
}
}
+ pub fn finalize_entry_point_name(
+ &self,
+ stage_bit: wgt::ShaderStages,
+ entry_point_name: Option<&str>,
+ ) -> Result<String, StageError> {
+ let stage = Self::shader_stage_from_stage_bit(stage_bit);
+ entry_point_name
+ .map(|ep| ep.to_string())
+ .map(Ok)
+ .unwrap_or_else(|| {
+ let mut entry_points = self
+ .entry_points
+ .keys()
+ .filter_map(|(ep_stage, name)| (ep_stage == &stage).then_some(name));
+ let first = entry_points.next().ok_or(StageError::NoEntryPointFound)?;
+ if entry_points.next().is_some() {
+ return Err(StageError::MultipleEntryPointsFound);
+ }
+ Ok(first.clone())
+ })
+ }
+
+ pub(crate) fn shader_stage_from_stage_bit(stage_bit: wgt::ShaderStages) -> naga::ShaderStage {
+ match stage_bit {
+ wgt::ShaderStages::VERTEX => naga::ShaderStage::Vertex,
+ wgt::ShaderStages::FRAGMENT => naga::ShaderStage::Fragment,
+ wgt::ShaderStages::COMPUTE => naga::ShaderStage::Compute,
+ _ => unreachable!(),
+ }
+ }
+
pub fn check_stage(
&self,
layouts: &mut BindingLayoutSource<'_>,
@@ -964,17 +1023,13 @@ impl Interface {
) -> Result<StageIo, StageError> {
// Since a shader module can have multiple entry points with the same name,
// we need to look for one with the right execution model.
- let shader_stage = match stage_bit {
- wgt::ShaderStages::VERTEX => naga::ShaderStage::Vertex,
- wgt::ShaderStages::FRAGMENT => naga::ShaderStage::Fragment,
- wgt::ShaderStages::COMPUTE => naga::ShaderStage::Compute,
- _ => unreachable!(),
- };
+ let shader_stage = Self::shader_stage_from_stage_bit(stage_bit);
let pair = (shader_stage, entry_point_name.to_string());
- let entry_point = self
- .entry_points
- .get(&pair)
- .ok_or(StageError::MissingEntryPoint(pair.1))?;
+ let entry_point = match self.entry_points.get(&pair) {
+ Some(some) => some,
+ None => return Err(StageError::MissingEntryPoint(pair.1)),
+ };
+ let (_stage, entry_point_name) = pair;
// check resources visibility
for &handle in entry_point.resources.iter() {
@@ -1246,3 +1301,31 @@ impl Interface {
.map(|ep| ep.dual_source_blending)
}
}
+
+// https://gpuweb.github.io/gpuweb/#abstract-opdef-calculating-color-attachment-bytes-per-sample
+pub fn validate_color_attachment_bytes_per_sample(
+ attachment_formats: impl Iterator<Item = Option<wgt::TextureFormat>>,
+ limit: u32,
+) -> Result<(), u32> {
+ let mut total_bytes_per_sample = 0;
+ for format in attachment_formats {
+ let Some(format) = format else {
+ continue;
+ };
+
+ let byte_cost = format.target_pixel_byte_cost().unwrap();
+ let alignment = format.target_component_alignment().unwrap();
+
+ let rem = total_bytes_per_sample % alignment;
+ if rem != 0 {
+ total_bytes_per_sample += alignment - rem;
+ }
+ total_bytes_per_sample += byte_cost;
+ }
+
+ if total_bytes_per_sample > limit {
+ return Err(total_bytes_per_sample);
+ }
+
+ Ok(())
+}