summaryrefslogtreecommitdiffstats
path: root/gfx/wgpu
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /gfx/wgpu
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'gfx/wgpu')
-rw-r--r--gfx/wgpu/.github/ISSUE_TEMPLATE/bug_report.md28
-rw-r--r--gfx/wgpu/.github/ISSUE_TEMPLATE/feature_request.md20
-rw-r--r--gfx/wgpu/.github/ISSUE_TEMPLATE/other.md10
-rw-r--r--gfx/wgpu/.github/pull_request_template.md17
-rw-r--r--gfx/wgpu/.github/workflows/ci.yml115
-rw-r--r--gfx/wgpu/.gitignore7
-rw-r--r--gfx/wgpu/.monocodus7
-rw-r--r--gfx/wgpu/CHANGELOG.md170
-rw-r--r--gfx/wgpu/Cargo.lock1886
-rw-r--r--gfx/wgpu/Cargo.toml7
-rw-r--r--gfx/wgpu/LICENSE373
-rw-r--r--gfx/wgpu/README.md32
-rw-r--r--gfx/wgpu/bors.toml10
-rw-r--r--gfx/wgpu/dummy/Cargo.toml16
-rw-r--r--gfx/wgpu/dummy/README.md3
-rw-r--r--gfx/wgpu/dummy/src/lib.rs3
-rw-r--r--gfx/wgpu/etc/big-picture.xml8
-rw-r--r--gfx/wgpu/logo.pngbin0 -> 37753 bytes
-rw-r--r--gfx/wgpu/player/Cargo.toml43
-rw-r--r--gfx/wgpu/player/README.md13
-rw-r--r--gfx/wgpu/player/src/bin/play.rs179
-rw-r--r--gfx/wgpu/player/src/lib.rs313
-rw-r--r--gfx/wgpu/player/tests/data/all.ron8
-rw-r--r--gfx/wgpu/player/tests/data/bind-group.ron75
-rw-r--r--gfx/wgpu/player/tests/data/buffer-copy.ron34
-rw-r--r--gfx/wgpu/player/tests/data/data1.binbin0 -> 16 bytes
-rw-r--r--gfx/wgpu/player/tests/data/empty.comp5
-rw-r--r--gfx/wgpu/player/tests/data/empty.comp.spvbin0 -> 280 bytes
-rw-r--r--gfx/wgpu/player/tests/data/quad.bin1
-rw-r--r--gfx/wgpu/player/tests/data/quad.frag7
-rw-r--r--gfx/wgpu/player/tests/data/quad.frag.spvbin0 -> 408 bytes
-rw-r--r--gfx/wgpu/player/tests/data/quad.ron153
-rw-r--r--gfx/wgpu/player/tests/data/quad.vert10
-rw-r--r--gfx/wgpu/player/tests/data/quad.vert.spvbin0 -> 964 bytes
-rw-r--r--gfx/wgpu/player/tests/test.rs217
-rw-r--r--gfx/wgpu/rustfmt.toml0
-rw-r--r--gfx/wgpu/wgpu-core/Cargo.toml68
-rw-r--r--gfx/wgpu/wgpu-core/build.rs20
-rw-r--r--gfx/wgpu/wgpu-core/src/binding_model.rs632
-rw-r--r--gfx/wgpu/wgpu-core/src/command/allocator.rs268
-rw-r--r--gfx/wgpu/wgpu-core/src/command/bind.rs295
-rw-r--r--gfx/wgpu/wgpu-core/src/command/bundle.rs1230
-rw-r--r--gfx/wgpu/wgpu-core/src/command/compute.rs657
-rw-r--r--gfx/wgpu/wgpu-core/src/command/draw.rs180
-rw-r--r--gfx/wgpu/wgpu-core/src/command/mod.rs362
-rw-r--r--gfx/wgpu/wgpu-core/src/command/render.rs2078
-rw-r--r--gfx/wgpu/wgpu-core/src/command/transfer.rs789
-rw-r--r--gfx/wgpu/wgpu-core/src/conv.rs833
-rw-r--r--gfx/wgpu/wgpu-core/src/device/alloc.rs294
-rw-r--r--gfx/wgpu/wgpu-core/src/device/descriptor.rs168
-rw-r--r--gfx/wgpu/wgpu-core/src/device/life.rs760
-rw-r--r--gfx/wgpu/wgpu-core/src/device/mod.rs4217
-rw-r--r--gfx/wgpu/wgpu-core/src/device/queue.rs696
-rw-r--r--gfx/wgpu/wgpu-core/src/device/trace.rs192
-rw-r--r--gfx/wgpu/wgpu-core/src/hub.rs866
-rw-r--r--gfx/wgpu/wgpu-core/src/id.rs196
-rw-r--r--gfx/wgpu/wgpu-core/src/instance.rs840
-rw-r--r--gfx/wgpu/wgpu-core/src/lib.rs271
-rw-r--r--gfx/wgpu/wgpu-core/src/macros.rs226
-rw-r--r--gfx/wgpu/wgpu-core/src/pipeline.rs254
-rw-r--r--gfx/wgpu/wgpu-core/src/resource.rs447
-rw-r--r--gfx/wgpu/wgpu-core/src/swap_chain.rs294
-rw-r--r--gfx/wgpu/wgpu-core/src/track/buffer.rs241
-rw-r--r--gfx/wgpu/wgpu-core/src/track/mod.rs593
-rw-r--r--gfx/wgpu/wgpu-core/src/track/range.rs399
-rw-r--r--gfx/wgpu/wgpu-core/src/track/texture.rs466
-rw-r--r--gfx/wgpu/wgpu-core/src/validation.rs966
-rw-r--r--gfx/wgpu/wgpu-types/Cargo.toml20
-rw-r--r--gfx/wgpu/wgpu-types/src/lib.rs2000
69 files changed, 25588 insertions, 0 deletions
diff --git a/gfx/wgpu/.github/ISSUE_TEMPLATE/bug_report.md b/gfx/wgpu/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000000..c059d23311
--- /dev/null
+++ b/gfx/wgpu/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,28 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: bug
+assignees: ''
+
+---
+
+<!-- Thank you for filing this! Please read the [debugging tips](https://github.com/gfx-rs/wgpu/wiki/Debbugging-wgpu-Applications).
+That may let you investigate on your own, or provide additional information that helps us to assist.-->
+
+**Description**
+A clear and concise description of what the bug is.
+
+**Repro steps**
+Ideally, a runnable example we can check out.
+
+**Expected vs observed behavior**
+Clearly describe what you get, and how it goes across your expectations.
+
+**Extra materials**
+Screenshots to help explain your problem.
+Validation logs can be attached in case there are warnings and errors.
+Zip-compressed API traces and GPU captures can also land here.
+
+**Platform**
+Information about your OS, version of `wgpu`, your tech stack, etc.
diff --git a/gfx/wgpu/.github/ISSUE_TEMPLATE/feature_request.md b/gfx/wgpu/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000..11fc491ef1
--- /dev/null
+++ b/gfx/wgpu/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: enhancement
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/gfx/wgpu/.github/ISSUE_TEMPLATE/other.md b/gfx/wgpu/.github/ISSUE_TEMPLATE/other.md
new file mode 100644
index 0000000000..52ef2e8a47
--- /dev/null
+++ b/gfx/wgpu/.github/ISSUE_TEMPLATE/other.md
@@ -0,0 +1,10 @@
+---
+name: Other
+about: Strange things you want to tell us
+title: ''
+labels: question
+assignees: ''
+
+---
+
+
diff --git a/gfx/wgpu/.github/pull_request_template.md b/gfx/wgpu/.github/pull_request_template.md
new file mode 100644
index 0000000000..55be6c377a
--- /dev/null
+++ b/gfx/wgpu/.github/pull_request_template.md
@@ -0,0 +1,17 @@
+**Connections**
+_Link to the issues addressed by this PR, or dependent PRs in other repositories_
+
+**Description**
+_Describe what problem this is solving, and how it's solved._
+
+**Testing**
+_Explain how this change is tested._
+<!--
+Non-trivial functional changes would need to be tested through:
+ - [wgpu-rs](https://github.com/gfx-rs/wgpu-rs) - test the examples.
+ - [wgpu-native](https://github.com/gfx-rs/wgpu-native/) - check the generated C header for sanity.
+
+Ideally, a PR needs to link to the draft PRs in these projects with relevant modifications.
+See https://github.com/gfx-rs/wgpu/pull/666 for an example.
+If you can add a unit/integration test here in `wgpu`, that would be best.
+-->
diff --git a/gfx/wgpu/.github/workflows/ci.yml b/gfx/wgpu/.github/workflows/ci.yml
new file mode 100644
index 0000000000..662eb9990f
--- /dev/null
+++ b/gfx/wgpu/.github/workflows/ci.yml
@@ -0,0 +1,115 @@
+name: CI
+
+on:
+ push:
+ branches-ignore: [staging.tmp]
+ pull_request:
+ branches-ignore: [staging.tmp]
+
+jobs:
+ ios_build:
+ name: iOS Stable
+ runs-on: macos-10.15
+ env:
+ TARGET: aarch64-apple-ios
+ steps:
+ - uses: actions/checkout@v2
+ - run: rustup component add clippy
+ - run: rustup target add ${{ env.TARGET }}
+ - run: cargo clippy --target ${{ env.TARGET }}
+
+ android_build:
+ name: Android Stable
+ runs-on: ubuntu-18.04
+ env:
+ TARGET: aarch64-linux-android
+ PKG_CONFIG_ALLOW_CROSS: 1
+ steps:
+ - uses: actions/checkout@v2
+ - name: Prepare
+ run: |
+ sudo apt-get update -y -qq
+ sudo apt-get install -y -qq libegl1-mesa-dev
+ echo "$ANDROID_HOME/ndk-bundle/toolchains/llvm/prebuilt/linux-x86_64/bin" >> $GITHUB_PATH
+ - run: rustup component add clippy
+ - run: rustup target add ${{ env.TARGET }}
+ - run: cargo clippy --target ${{ env.TARGET }}
+ - name: Additional core features
+ run: cargo check --manifest-path wgpu-core/Cargo.toml --features trace --target ${{ env.TARGET }}
+
+ build:
+ name: ${{ matrix.name }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [macos-10.15, ubuntu-18.04, windows-2019]
+ channel: [stable, nightly]
+ include:
+ - name: MacOS Stable
+ channel: stable
+ os: macos-10.15
+ prepare_command:
+ additional_core_features: trace
+ additional_player_features: winit
+ - name: MacOS Nightly
+ os: macos-10.15
+ channel: nightly
+ prepare_command:
+ additional_core_features:
+ additional_player_features:
+ - name: Ubuntu Stable
+ os: ubuntu-18.04
+ channel: stable
+ prepare_command: |
+ sudo apt-get update -y -qq
+ sudo apt-get install -y -qq libegl1-mesa-dev
+ additional_core_features: trace,replay
+ additional_player_features:
+ - name: Ubuntu Nightly
+ os: ubuntu-18.04
+ channel: nightly
+ prepare_command: |
+ sudo apt-get update -y -qq
+ echo "Installing EGL"
+ sudo apt-get install -y -qq libegl1-mesa-dev
+ echo "Installing Vulkan"
+ sudo apt-get install -y -qq mesa-vulkan-drivers
+ additional_core_features: serial-pass
+ additional_player_features: winit
+ - name: Windows Stable
+ os: windows-2019
+ channel: stable
+ prepare_command: rustup default stable-msvc
+ additional_core_features: trace,serial-pass
+ additional_player_features: renderdoc
+ - name: Windows Nightly
+ os: windows-2019
+ channel: nightly
+ prepare_command: rustup default nightly-msvc
+ additional_core_features:
+ additional_player_features:
+ steps:
+ - uses: actions/checkout@v2
+ - if: matrix.channel == 'nightly'
+ name: Install latest nightly
+ uses: actions-rs/toolchain@v1
+ with:
+ toolchain: nightly
+ override: true
+ - if: matrix.channel == 'stable'
+ run: rustup component add clippy
+ # prepare
+ - if: matrix.prepare_command != ''
+ run: ${{ matrix.prepare_command }}
+ # build with no features first
+ - if: matrix.additional_core_features == ''
+ run: cargo check --manifest-path wgpu-core/Cargo.toml --no-default-features
+ - if: matrix.additional_core_features != ''
+ run: cargo check --manifest-path wgpu-core/Cargo.toml --features ${{ matrix.additional_core_features }}
+ - if: matrix.additional_player_features != ''
+ run: cargo check --manifest-path player/Cargo.toml --features ${{ matrix.additional_player_features }}
+ - if: matrix.channel == 'stable'
+ run: cargo clippy
+ - if: matrix.channel == 'nightly'
+ run: cargo test -- --nocapture
diff --git a/gfx/wgpu/.gitignore b/gfx/wgpu/.gitignore
new file mode 100644
index 0000000000..0dd6fef443
--- /dev/null
+++ b/gfx/wgpu/.gitignore
@@ -0,0 +1,7 @@
+/target
+**/*.rs.bk
+#Cargo.lock
+.DS_Store
+.vscode
+.vs
+.idea
diff --git a/gfx/wgpu/.monocodus b/gfx/wgpu/.monocodus
new file mode 100644
index 0000000000..7900103af1
--- /dev/null
+++ b/gfx/wgpu/.monocodus
@@ -0,0 +1,7 @@
+version: 1.1.0
+
+rust:
+ formatter:
+ name: rustfmt
+ repo_checkers:
+ - name: rust-clippy
diff --git a/gfx/wgpu/CHANGELOG.md b/gfx/wgpu/CHANGELOG.md
new file mode 100644
index 0000000000..6655091d0b
--- /dev/null
+++ b/gfx/wgpu/CHANGELOG.md
@@ -0,0 +1,170 @@
+# Change Log
+
+## v0.6 (2020-08-17)
+ - Crates:
+ - C API is moved to [another repository](https://github.com/gfx-rs/wgpu-native)
+ - `player`: standalone API replayer and tester
+ - Features:
+ - Proper error handling with all functions returning `Result`
+ - Graceful handling of "error" objects
+ - API tracing [infrastructure](http://kvark.github.io/wgpu/debug/test/ron/2020/07/18/wgpu-api-tracing.html)
+ - uploading data with `write_buffer`/`write_texture` queue operations
+ - reusable render bundles
+ - read-only depth/stencil attachments
+ - bind group layout deduplication
+ - Cows, cows everywhere
+ - Web+Native features:
+ - Depth clamping (feature)
+ - BC texture compression
+ - Native-only features:
+ - mappable primary buffers
+ - texture array bindings
+ - push constants
+ - multi-draw indirect
+ - Validation:
+ - all transfer operations
+ - all resource creation
+ - bind group matching to the layout
+ - experimental shader interface matching with Naga
+
+## v0.5.6 (2020-07-09)
+ - add debug markers support
+
+## v0.5.5 (2020-05-20)
+ - fix destruction of adapters, swap chains, and bind group layouts
+ - fix command pool leak with temporary threads
+ - improve assertion messages
+ - implement `From<TextureFormat>` for `TextureComponentType`
+
+## v0.5.4 (2020-04-24)
+ - fix memory management of staging buffers
+
+## v0.5.3 (2020-04-18)
+ - fix reading access to storage textures
+ - another fix to layout transitions for swapchain images
+
+## v0.5.2 (2020-04-15)
+ - fix read-only storage flags
+ - fix pipeline layout life time
+ - improve various assert messages
+
+## v0.5.1 (2020-04-10)
+ - fix tracking of swapchain images that are used multiple times in a command buffer
+ - fix tracking of initial usage of a resource across a command buffer
+
+## v0.5 (2020-04-06)
+ - Crates:
+ - `wgpu-types`: common types between native and web targets
+ - `wgpu-core`: internal API for the native and remote wrappers
+ - Features:
+ - based on gfx-hal-0.5
+ - moved from Rendy to the new `gfx-memory` and `gfx-descriptor` crates
+ - passes are now recorded on the client side. The user is also responsible to keep all resources referenced in the pass up until it ends recording.
+ - coordinate system is changed to have Y up in the rendering space
+ - revised GPU lifetime tracking of all resources
+ - revised usage tracking logic
+ - all IDs are now non-zero
+ - Mailbox present mode
+ - Validation:
+ - active pipeline
+ - Fixes:
+ - lots of small API changes to closely match upstream WebGPU
+ - true read-only storage bindings
+ - unmapping dropped buffers
+ - better error messages on misused swapchain frames
+
+## v0.4.3 (2020-01-20)
+ - improved swap chain error handling
+
+## v0.4.2 (2019-12-15)
+ - fixed render pass transitions
+
+## v0.4.1 (2019-11-28)
+ - fixed depth/stencil transitions
+ - fixed dynamic offset iteration
+
+## v0.4 (2019-11-03)
+ - Platforms: removed OpenGL/WebGL support temporarily
+ - Features:
+ - based on gfx-hal-0.4 with the new swapchain model
+ - exposing adapters from all available backends on a system
+ - tracking of samplers
+ - cube map support with an example
+ - Validation:
+ - buffer and texture usage
+
+## v0.3.3 (2019-08-22)
+ - fixed instance creation on Windows
+
+## v0.3.1 (2019-08-21)
+ - fixed pipeline barriers that aren't transitions
+
+## v0.3 (2019-08-21)
+ - Platforms: experimental OpenGL/WebGL
+ - Crates:
+ - Rust API is moved out to [another repository](https://github.com/gfx-rs/wgpu-rs)
+ - Features:
+ - based on gfx-hal-0.3 with help of `rendy-memory` and `rendy-descriptor`
+ - type-system-assisted deadlock prevention (for locking internal structures)
+ - texture sub-resource tracking
+ - `raw-window-handle` integration instead of `winit`
+ - multisampling with an example
+ - indirect draws and dispatches
+ - stencil masks and reference values
+ - native "compute" example
+ - everything implements `Debug`
+ - Validation
+ - vertex/index/instance ranges at draw calls
+ - bing groups vs their expected layouts
+ - bind group buffer ranges
+ - required stencil reference, blend color
+
+## v0.2.6 (2019-04-04)
+ - fixed frame acquisition GPU waits
+
+## v0.2.5 (2019-03-31)
+ - fixed submission tracking
+ - added support for blend colors
+ - fixed bind group compatibility at the gfx-hal level
+ - validating the bind groups and blend colors
+
+## v0.2.3 (2019-03-20)
+ - fixed vertex format mapping
+ - fixed building with "empty" backend on Windows
+ - bumped the default descriptor pool size
+ - fixed host mapping alignments
+ - validating the uniform buffer offset
+
+## v0.2 (2019-03-06)
+ - Platforms: iOS/Metal, D3D11
+ - Crates:
+ - `wgpu-remote`: remoting layer for the cross-process boundary
+ - `gfx-examples`: selected gfx pre-ll examples ported over
+ - Features:
+ - native example for compute
+ - "gfx-cube" and "gfx-shadow" examples
+ - copies between buffers and textures
+ - separate object identity for the remote client
+ - texture view tracking
+ - native swapchain resize support
+ - buffer mapping
+ - object index epochs
+ - comprehensive list of vertex and texture formats
+ - validation of pipeline compatibility with the pass
+ - Fixes
+ - fixed resource destruction
+
+## v0.1 (2019-01-24)
+ - Platforms: Linux/Vulkan, Windows/Vulkan, D3D12, macOS/Metal
+ - Crates:
+ - `wgpu-native`: C API implementation of WebGPU, based on gfx-hal
+ - `wgpu-bindings`: auto-generated C headers
+ - `wgpu`: idiomatic Rust wrapper
+ - `examples`: native C examples
+ - Features:
+ - native examples for triangle rendering
+ - basic native swapchain integration
+ - concept of the storage hub
+ - basic recording of passes and command buffers
+ - submission-based lifetime tracking and command buffer recycling
+ - automatic resource transitions
diff --git a/gfx/wgpu/Cargo.lock b/gfx/wgpu/Cargo.lock
new file mode 100644
index 0000000000..89dd50d22e
--- /dev/null
+++ b/gfx/wgpu/Cargo.lock
@@ -0,0 +1,1886 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "ahash"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6789e291be47ace86a60303502173d84af8327e3627ecf334356ee0f87a164c"
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "andrew"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b7f09f89872c2b6b29e319377b1fbe91c6f5947df19a25596e121cf19a7b35e"
+dependencies = [
+ "bitflags",
+ "line_drawing",
+ "rusttype 0.7.9",
+ "walkdir",
+ "xdg",
+ "xml-rs",
+]
+
+[[package]]
+name = "android_log-sys"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8052e2d8aabbb8d556d6abbcce2a22b9590996c5f849b9c7ce4544a2e3b984e"
+
+[[package]]
+name = "ansi_term"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
+dependencies = [
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "approx"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "arrayvec"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "ash"
+version = "0.31.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c69a8137596e84c22d57f3da1b5de1d4230b1742a710091c85f4d7ce50f00f38"
+dependencies = [
+ "libloading",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "base64"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff"
+
+[[package]]
+name = "bit-set"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de"
+dependencies = [
+ "bit-vec",
+]
+
+[[package]]
+name = "bit-vec"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f0dc55f2d8a1a85650ac47858bb001b4c0dd73d79e3c455a842925e68d29cd3"
+
+[[package]]
+name = "bitflags"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+
+[[package]]
+name = "block"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
+
+[[package]]
+name = "bumpalo"
+version = "3.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820"
+
+[[package]]
+name = "byteorder"
+version = "1.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
+
+[[package]]
+name = "calloop"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7aa2097be53a00de9e8fc349fea6d76221f398f5c4fa550d420669906962d160"
+dependencies = [
+ "mio",
+ "mio-extras",
+ "nix",
+]
+
+[[package]]
+name = "cc"
+version = "1.0.62"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1770ced377336a88a67c473594ccc14eca6f4559217c34f64aac8f83d641b40"
+dependencies = [
+ "jobserver",
+]
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cfg_aliases"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
+
+[[package]]
+name = "chrono"
+version = "0.4.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
+dependencies = [
+ "libc",
+ "num-integer",
+ "num-traits",
+ "time",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "cloudabi"
+version = "0.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "cloudabi"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "cocoa"
+version = "0.20.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c49e86fc36d5704151f5996b7b3795385f50ce09e3be0f47a0cfde869681cf8"
+dependencies = [
+ "bitflags",
+ "block",
+ "core-foundation 0.7.0",
+ "core-graphics",
+ "foreign-types",
+ "libc",
+ "objc",
+]
+
+[[package]]
+name = "cocoa-foundation"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ade49b65d560ca58c403a479bb396592b155c0185eada742ee323d1d68d6318"
+dependencies = [
+ "bitflags",
+ "block",
+ "core-foundation 0.9.1",
+ "core-graphics-types",
+ "foreign-types",
+ "libc",
+ "objc",
+]
+
+[[package]]
+name = "copyless"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2df960f5d869b2dd8532793fde43eb5427cceb126c929747a26823ab0eeb536"
+
+[[package]]
+name = "core-foundation"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171"
+dependencies = [
+ "core-foundation-sys 0.7.0",
+ "libc",
+]
+
+[[package]]
+name = "core-foundation"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62"
+dependencies = [
+ "core-foundation-sys 0.8.2",
+ "libc",
+]
+
+[[package]]
+name = "core-foundation-sys"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac"
+
+[[package]]
+name = "core-foundation-sys"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b"
+
+[[package]]
+name = "core-graphics"
+version = "0.19.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3889374e6ea6ab25dba90bb5d96202f61108058361f6dc72e8b03e6f8bbe923"
+dependencies = [
+ "bitflags",
+ "core-foundation 0.7.0",
+ "foreign-types",
+ "libc",
+]
+
+[[package]]
+name = "core-graphics-types"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3a68b68b3446082644c91ac778bf50cd4104bfb002b5a6a7c44cca5a2c70788b"
+dependencies = [
+ "bitflags",
+ "core-foundation 0.9.1",
+ "foreign-types",
+ "libc",
+]
+
+[[package]]
+name = "core-video-sys"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34ecad23610ad9757664d644e369246edde1803fcb43ed72876565098a5d3828"
+dependencies = [
+ "cfg-if 0.1.10",
+ "core-foundation-sys 0.7.0",
+ "core-graphics",
+ "libc",
+ "objc",
+]
+
+[[package]]
+name = "d3d12"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d0a60cceb22c7c53035f8980524fdc7f17cf49681a3c154e6757d30afbec6ec4"
+dependencies = [
+ "bitflags",
+ "libloading",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "derivative"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f"
+dependencies = [
+ "proc-macro2 1.0.24",
+ "quote 1.0.7",
+ "syn",
+]
+
+[[package]]
+name = "dispatch"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
+
+[[package]]
+name = "dlib"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b11f15d1e3268f140f68d390637d5e76d849782d971ae7063e0da69fe9709a76"
+dependencies = [
+ "libloading",
+]
+
+[[package]]
+name = "downcast-rs"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650"
+
+[[package]]
+name = "dummy"
+version = "0.1.0"
+dependencies = [
+ "wgpu-core",
+]
+
+[[package]]
+name = "env_logger"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36"
+dependencies = [
+ "atty",
+ "humantime",
+ "log",
+ "regex",
+ "termcolor",
+]
+
+[[package]]
+name = "fixedbitset"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d"
+
+[[package]]
+name = "float-cmp"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "499a1bfa03d254b047e7e5c1fc8dd23a8cf6b344a8eb7e622ae4bc76bfac8e68"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "foreign-types"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
+dependencies = [
+ "foreign-types-shared",
+]
+
+[[package]]
+name = "foreign-types-shared"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
+
+[[package]]
+name = "fuchsia-zircon"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
+dependencies = [
+ "bitflags",
+ "fuchsia-zircon-sys",
+]
+
+[[package]]
+name = "fuchsia-zircon-sys"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
+
+[[package]]
+name = "fxhash"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
+dependencies = [
+ "byteorder",
+]
+
+[[package]]
+name = "generator"
+version = "0.6.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc"
+dependencies = [
+ "cc",
+ "libc",
+ "log",
+ "rustc_version",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "gfx-auxil"
+version = "0.5.0"
+source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354"
+dependencies = [
+ "fxhash",
+ "gfx-hal",
+ "spirv_cross",
+]
+
+[[package]]
+name = "gfx-backend-dx11"
+version = "0.6.0"
+source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354"
+dependencies = [
+ "arrayvec",
+ "bitflags",
+ "gfx-auxil",
+ "gfx-hal",
+ "libloading",
+ "log",
+ "parking_lot 0.11.0",
+ "range-alloc",
+ "raw-window-handle",
+ "smallvec",
+ "spirv_cross",
+ "thunderdome",
+ "winapi 0.3.9",
+ "wio",
+]
+
+[[package]]
+name = "gfx-backend-dx12"
+version = "0.6.2"
+source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354"
+dependencies = [
+ "arrayvec",
+ "bit-set",
+ "bitflags",
+ "d3d12",
+ "gfx-auxil",
+ "gfx-hal",
+ "log",
+ "parking_lot 0.11.0",
+ "range-alloc",
+ "raw-window-handle",
+ "smallvec",
+ "spirv_cross",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "gfx-backend-empty"
+version = "0.6.0"
+source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354"
+dependencies = [
+ "gfx-hal",
+ "log",
+ "raw-window-handle",
+]
+
+[[package]]
+name = "gfx-backend-metal"
+version = "0.6.0"
+source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354"
+dependencies = [
+ "arrayvec",
+ "bitflags",
+ "block",
+ "cocoa-foundation",
+ "copyless",
+ "foreign-types",
+ "gfx-auxil",
+ "gfx-hal",
+ "lazy_static",
+ "log",
+ "metal",
+ "objc",
+ "parking_lot 0.11.0",
+ "range-alloc",
+ "raw-window-handle",
+ "spirv_cross",
+ "storage-map",
+]
+
+[[package]]
+name = "gfx-backend-vulkan"
+version = "0.6.5"
+source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354"
+dependencies = [
+ "arrayvec",
+ "ash",
+ "byteorder",
+ "core-graphics-types",
+ "gfx-hal",
+ "inplace_it",
+ "lazy_static",
+ "log",
+ "objc",
+ "raw-window-handle",
+ "smallvec",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "gfx-hal"
+version = "0.6.0"
+source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354"
+dependencies = [
+ "bitflags",
+ "raw-window-handle",
+]
+
+[[package]]
+name = "gpu-alloc"
+version = "0.2.1"
+source = "git+https://github.com/zakarumych/gpu-alloc?rev=d07be73f9439a37c89f5b72f2500cbf0eb4ff613#d07be73f9439a37c89f5b72f2500cbf0eb4ff613"
+dependencies = [
+ "bitflags",
+ "gpu-alloc-types",
+]
+
+[[package]]
+name = "gpu-alloc-types"
+version = "0.1.0"
+source = "git+https://github.com/zakarumych/gpu-alloc?rev=d07be73f9439a37c89f5b72f2500cbf0eb4ff613#d07be73f9439a37c89f5b72f2500cbf0eb4ff613"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "gpu-descriptor"
+version = "0.1.0"
+source = "git+https://github.com/zakarumych/gpu-descriptor?rev=831460c4b5120d9a74744d542f39a95b9816b5ab#831460c4b5120d9a74744d542f39a95b9816b5ab"
+dependencies = [
+ "bitflags",
+ "gpu-descriptor-types",
+ "hashbrown",
+]
+
+[[package]]
+name = "gpu-descriptor-types"
+version = "0.1.0"
+source = "git+https://github.com/zakarumych/gpu-descriptor?rev=831460c4b5120d9a74744d542f39a95b9816b5ab#831460c4b5120d9a74744d542f39a95b9816b5ab"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+dependencies = [
+ "ahash",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "humantime"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f"
+dependencies = [
+ "quick-error",
+]
+
+[[package]]
+name = "indexmap"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "inplace_it"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd01a2a73f2f399df96b22dc88ea687ef4d76226284e7531ae3c7ee1dc5cb534"
+
+[[package]]
+name = "instant"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb1fc4429a33e1f80d41dc9fea4d108a88bec1de8053878898ae448a0b52f613"
+dependencies = [
+ "cfg-if 1.0.0",
+]
+
+[[package]]
+name = "iovec"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "itoa"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6"
+
+[[package]]
+name = "jni-sys"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
+
+[[package]]
+name = "jobserver"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "js-sys"
+version = "0.3.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "kernel32-sys"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
+dependencies = [
+ "winapi 0.2.8",
+ "winapi-build",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "lazycell"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
+
+[[package]]
+name = "libc"
+version = "0.2.80"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614"
+
+[[package]]
+name = "libloading"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1090080fe06ec2648d0da3881d9453d97e71a45f00eb179af7fdd7e3f686fdb0"
+dependencies = [
+ "cfg-if 1.0.0",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "line_drawing"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5cc7ad3d82c845bdb5dde34ffdcc7a5fb4d2996e1e1ee0f19c33bc80e15196b9"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "lock_api"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75"
+dependencies = [
+ "scopeguard",
+]
+
+[[package]]
+name = "lock_api"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c"
+dependencies = [
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
+dependencies = [
+ "cfg-if 0.1.10",
+]
+
+[[package]]
+name = "loom"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed"
+dependencies = [
+ "cfg-if 0.1.10",
+ "generator",
+ "scoped-tls",
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "malloc_buf"
+version = "0.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "matchers"
+version = "0.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1"
+dependencies = [
+ "regex-automata",
+]
+
+[[package]]
+name = "maybe-uninit"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
+
+[[package]]
+name = "memchr"
+version = "2.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
+
+[[package]]
+name = "memmap"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
+dependencies = [
+ "libc",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "metal"
+version = "0.20.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c4e8a431536529327e28c9ba6992f2cb0c15d4222f0602a16e6d7695ff3bccf"
+dependencies = [
+ "bitflags",
+ "block",
+ "cocoa-foundation",
+ "foreign-types",
+ "log",
+ "objc",
+]
+
+[[package]]
+name = "mio"
+version = "0.6.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430"
+dependencies = [
+ "cfg-if 0.1.10",
+ "fuchsia-zircon",
+ "fuchsia-zircon-sys",
+ "iovec",
+ "kernel32-sys",
+ "libc",
+ "log",
+ "miow",
+ "net2",
+ "slab",
+ "winapi 0.2.8",
+]
+
+[[package]]
+name = "mio-extras"
+version = "2.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19"
+dependencies = [
+ "lazycell",
+ "log",
+ "mio",
+ "slab",
+]
+
+[[package]]
+name = "miow"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
+dependencies = [
+ "kernel32-sys",
+ "net2",
+ "winapi 0.2.8",
+ "ws2_32-sys",
+]
+
+[[package]]
+name = "naga"
+version = "0.2.0"
+source = "git+https://github.com/gfx-rs/naga?rev=96c80738650822de35f77ab6a589f309460c8f39#96c80738650822de35f77ab6a589f309460c8f39"
+dependencies = [
+ "bitflags",
+ "fxhash",
+ "log",
+ "num-traits",
+ "petgraph",
+ "spirv_headers",
+ "thiserror",
+]
+
+[[package]]
+name = "ndk"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95a356cafe20aee088789830bfea3a61336e84ded9e545e00d3869ce95dcb80c"
+dependencies = [
+ "jni-sys",
+ "ndk-sys",
+ "num_enum",
+]
+
+[[package]]
+name = "ndk-glue"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1730ee2e3de41c3321160a6da815f008c4006d71b095880ea50e17cf52332b8"
+dependencies = [
+ "android_log-sys",
+ "lazy_static",
+ "libc",
+ "log",
+ "ndk",
+ "ndk-sys",
+]
+
+[[package]]
+name = "ndk-sys"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b2820aca934aba5ed91c79acc72b6a44048ceacc5d36c035ed4e051f12d887d"
+
+[[package]]
+name = "net2"
+version = "0.2.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853"
+dependencies = [
+ "cfg-if 0.1.10",
+ "libc",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "nix"
+version = "0.14.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c722bee1037d430d0f8e687bbdbf222f27cc6e4e68d5caf630857bb2b6dbdce"
+dependencies = [
+ "bitflags",
+ "cc",
+ "cfg-if 0.1.10",
+ "libc",
+ "void",
+]
+
+[[package]]
+name = "num-integer"
+version = "0.1.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
+dependencies = [
+ "autocfg",
+ "num-traits",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_enum"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca565a7df06f3d4b485494f25ba05da1435950f4dc263440eda7a6fa9b8e36e4"
+dependencies = [
+ "derivative",
+ "num_enum_derive",
+]
+
+[[package]]
+name = "num_enum_derive"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ffa5a33ddddfee04c0283a7653987d634e880347e96b5b2ed64de07efb59db9d"
+dependencies = [
+ "proc-macro-crate",
+ "proc-macro2 1.0.24",
+ "quote 1.0.7",
+ "syn",
+]
+
+[[package]]
+name = "objc"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1"
+dependencies = [
+ "malloc_buf",
+ "objc_exception",
+]
+
+[[package]]
+name = "objc_exception"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad970fb455818ad6cba4c122ad012fae53ae8b4795f86378bce65e4f6bab2ca4"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0"
+
+[[package]]
+name = "ordered-float"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3741934be594d77de1c8461ebcbbe866f585ea616a9753aa78f2bdc69f0e4579"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "parking_lot"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e"
+dependencies = [
+ "lock_api 0.3.4",
+ "parking_lot_core 0.7.2",
+]
+
+[[package]]
+name = "parking_lot"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733"
+dependencies = [
+ "instant",
+ "lock_api 0.4.1",
+ "parking_lot_core 0.8.0",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3"
+dependencies = [
+ "cfg-if 0.1.10",
+ "cloudabi 0.0.3",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b"
+dependencies = [
+ "cfg-if 0.1.10",
+ "cloudabi 0.1.0",
+ "instant",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "percent-encoding"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
+
+[[package]]
+name = "petgraph"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7"
+dependencies = [
+ "fixedbitset",
+ "indexmap",
+]
+
+[[package]]
+name = "pin-project-lite"
+version = "0.1.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b"
+
+[[package]]
+name = "pkg-config"
+version = "0.3.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
+
+[[package]]
+name = "player"
+version = "0.1.0"
+dependencies = [
+ "env_logger",
+ "log",
+ "raw-window-handle",
+ "renderdoc",
+ "ron",
+ "serde",
+ "wgpu-core",
+ "wgpu-subscriber",
+ "wgpu-types",
+ "winit",
+]
+
+[[package]]
+name = "proc-macro-crate"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785"
+dependencies = [
+ "toml",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "0.4.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759"
+dependencies = [
+ "unicode-xid 0.1.0",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
+dependencies = [
+ "unicode-xid 0.2.1",
+]
+
+[[package]]
+name = "quick-error"
+version = "1.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
+
+[[package]]
+name = "quote"
+version = "0.6.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1"
+dependencies = [
+ "proc-macro2 0.4.30",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
+dependencies = [
+ "proc-macro2 1.0.24",
+]
+
+[[package]]
+name = "range-alloc"
+version = "0.1.1"
+source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354"
+
+[[package]]
+name = "raw-window-handle"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0a441a7a6c80ad6473bd4b74ec1c9a4c951794285bf941c2126f607c72e48211"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.1.57"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
+
+[[package]]
+name = "regex"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+ "thread_local",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
+dependencies = [
+ "byteorder",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189"
+
+[[package]]
+name = "renderdoc"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c9e8488c98756911664c8cc7b86284c320b6a6357d95908458136d7ebe9280c"
+dependencies = [
+ "bitflags",
+ "float-cmp",
+ "libloading",
+ "once_cell",
+ "renderdoc-sys",
+ "winapi 0.3.9",
+ "wio",
+]
+
+[[package]]
+name = "renderdoc-sys"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60d4a9058849c3e765fe2fa68b72c1416b1766f27eac3c52d7bac8712ea0d390"
+
+[[package]]
+name = "ron"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8a58080b7bb83b2ea28c3b7a9a994fd5e310330b7c8ca5258d99b98128ecfe4"
+dependencies = [
+ "base64",
+ "bitflags",
+ "serde",
+]
+
+[[package]]
+name = "rustc_version"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+dependencies = [
+ "semver",
+]
+
+[[package]]
+name = "rusttype"
+version = "0.7.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "310942406a39981bed7e12b09182a221a29e0990f3e7e0c971f131922ed135d5"
+dependencies = [
+ "rusttype 0.8.3",
+]
+
+[[package]]
+name = "rusttype"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f61411055101f7b60ecf1041d87fb74205fb20b0c7a723f07ef39174cf6b4c0"
+dependencies = [
+ "approx",
+ "ordered-float",
+ "stb_truetype",
+]
+
+[[package]]
+name = "ryu"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "scoped-tls"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2"
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "semver"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
+dependencies = [
+ "semver-parser",
+]
+
+[[package]]
+name = "semver-parser"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
+
+[[package]]
+name = "serde"
+version = "1.0.117"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.117"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e"
+dependencies = [
+ "proc-macro2 1.0.24",
+ "quote 1.0.7",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.59"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95"
+dependencies = [
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "sharded-slab"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127"
+dependencies = [
+ "lazy_static",
+ "loom",
+]
+
+[[package]]
+name = "slab"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
+
+[[package]]
+name = "smallvec"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252"
+
+[[package]]
+name = "smithay-client-toolkit"
+version = "0.6.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "421c8dc7acf5cb205b88160f8b4cc2c5cfabe210e43b2f80f009f4c1ef910f1d"
+dependencies = [
+ "andrew",
+ "bitflags",
+ "dlib",
+ "lazy_static",
+ "memmap",
+ "nix",
+ "wayland-client",
+ "wayland-protocols",
+]
+
+[[package]]
+name = "spirv_cross"
+version = "0.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8221f4aebf53a4447aebd4fe29ebff2c66dd2c2821e63675e09e85bd21c8633"
+dependencies = [
+ "cc",
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "spirv_headers"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f5b132530b1ac069df335577e3581765995cba5a13995cdbbdbc8fb057c532c"
+dependencies = [
+ "bitflags",
+ "num-traits",
+]
+
+[[package]]
+name = "stb_truetype"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f77b6b07e862c66a9f3e62a07588fee67cd90a9135a2b942409f195507b4fb51"
+dependencies = [
+ "byteorder",
+]
+
+[[package]]
+name = "storage-map"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "418bb14643aa55a7841d5303f72cf512cfb323b8cc221d51580500a1ca75206c"
+dependencies = [
+ "lock_api 0.4.1",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.48"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac"
+dependencies = [
+ "proc-macro2 1.0.24",
+ "quote 1.0.7",
+ "unicode-xid 0.2.1",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "thiserror"
+version = "1.0.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56"
+dependencies = [
+ "proc-macro2 1.0.24",
+ "quote 1.0.7",
+ "syn",
+]
+
+[[package]]
+name = "thread-id"
+version = "3.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1"
+dependencies = [
+ "libc",
+ "redox_syscall",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "thread_local"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
+dependencies = [
+ "lazy_static",
+]
+
+[[package]]
+name = "thunderdome"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7572415bd688d401c52f6e36f4c8e805b9ae1622619303b9fa835d531db0acae"
+
+[[package]]
+name = "time"
+version = "0.1.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
+dependencies = [
+ "libc",
+ "wasi",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "toml"
+version = "0.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "tracing"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27"
+dependencies = [
+ "cfg-if 0.1.10",
+ "pin-project-lite",
+ "tracing-attributes",
+ "tracing-core",
+]
+
+[[package]]
+name = "tracing-attributes"
+version = "0.1.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada"
+dependencies = [
+ "proc-macro2 1.0.24",
+ "quote 1.0.7",
+ "syn",
+]
+
+[[package]]
+name = "tracing-core"
+version = "0.1.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f"
+dependencies = [
+ "lazy_static",
+]
+
+[[package]]
+name = "tracing-log"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9"
+dependencies = [
+ "lazy_static",
+ "log",
+ "tracing-core",
+]
+
+[[package]]
+name = "tracing-serde"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b"
+dependencies = [
+ "serde",
+ "tracing-core",
+]
+
+[[package]]
+name = "tracing-subscriber"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401"
+dependencies = [
+ "ansi_term",
+ "chrono",
+ "lazy_static",
+ "matchers",
+ "regex",
+ "serde",
+ "serde_json",
+ "sharded-slab",
+ "smallvec",
+ "thread_local",
+ "tracing",
+ "tracing-core",
+ "tracing-log",
+ "tracing-serde",
+]
+
+[[package]]
+name = "unicode-xid"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+
+[[package]]
+name = "void"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
+
+[[package]]
+name = "walkdir"
+version = "2.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d"
+dependencies = [
+ "same-file",
+ "winapi 0.3.9",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasi"
+version = "0.10.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.68"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42"
+dependencies = [
+ "cfg-if 0.1.10",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.68"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f22b422e2a757c35a73774860af8e112bff612ce6cb604224e8e47641a9e4f68"
+dependencies = [
+ "bumpalo",
+ "lazy_static",
+ "log",
+ "proc-macro2 1.0.24",
+ "quote 1.0.7",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.68"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b13312a745c08c469f0b292dd2fcd6411dba5f7160f593da6ef69b64e407038"
+dependencies = [
+ "quote 1.0.7",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.68"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe"
+dependencies = [
+ "proc-macro2 1.0.24",
+ "quote 1.0.7",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.68"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307"
+
+[[package]]
+name = "wayland-client"
+version = "0.23.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af1080ebe0efabcf12aef2132152f616038f2d7dcbbccf7b2d8c5270fe14bcda"
+dependencies = [
+ "bitflags",
+ "calloop",
+ "downcast-rs",
+ "libc",
+ "mio",
+ "nix",
+ "wayland-commons",
+ "wayland-scanner",
+ "wayland-sys",
+]
+
+[[package]]
+name = "wayland-commons"
+version = "0.23.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb66b0d1a27c39bbce712b6372131c6e25149f03ffb0cd017cf8f7de8d66dbdb"
+dependencies = [
+ "nix",
+ "wayland-sys",
+]
+
+[[package]]
+name = "wayland-protocols"
+version = "0.23.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6cc286643656742777d55dc8e70d144fa4699e426ca8e9d4ef454f4bf15ffcf9"
+dependencies = [
+ "bitflags",
+ "wayland-client",
+ "wayland-commons",
+ "wayland-scanner",
+]
+
+[[package]]
+name = "wayland-scanner"
+version = "0.23.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93b02247366f395b9258054f964fe293ddd019c3237afba9be2ccbe9e1651c3d"
+dependencies = [
+ "proc-macro2 0.4.30",
+ "quote 0.6.13",
+ "xml-rs",
+]
+
+[[package]]
+name = "wayland-sys"
+version = "0.23.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d94e89a86e6d6d7c7c9b19ebf48a03afaac4af6bc22ae570e9a24124b75358f4"
+dependencies = [
+ "dlib",
+ "lazy_static",
+]
+
+[[package]]
+name = "wgpu-core"
+version = "0.6.0"
+dependencies = [
+ "arrayvec",
+ "bitflags",
+ "cfg_aliases",
+ "copyless",
+ "fxhash",
+ "gfx-backend-dx11",
+ "gfx-backend-dx12",
+ "gfx-backend-empty",
+ "gfx-backend-metal",
+ "gfx-backend-vulkan",
+ "gfx-hal",
+ "gpu-alloc",
+ "gpu-descriptor",
+ "loom",
+ "naga",
+ "parking_lot 0.11.0",
+ "raw-window-handle",
+ "ron",
+ "serde",
+ "smallvec",
+ "thiserror",
+ "tracing",
+ "wgpu-types",
+]
+
+[[package]]
+name = "wgpu-subscriber"
+version = "0.1.0"
+source = "git+https://github.com/gfx-rs/subscriber.git?rev=cdc9feb53f152f9c41905ed9efeff2c1ed214361#cdc9feb53f152f9c41905ed9efeff2c1ed214361"
+dependencies = [
+ "parking_lot 0.11.0",
+ "thread-id",
+ "tracing",
+ "tracing-log",
+ "tracing-subscriber",
+]
+
+[[package]]
+name = "wgpu-types"
+version = "0.6.0"
+dependencies = [
+ "bitflags",
+ "serde",
+]
+
+[[package]]
+name = "winapi"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-build"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "winit"
+version = "0.22.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e4ccbf7ddb6627828eace16cacde80fc6bf4dbb3469f88487262a02cf8e7862"
+dependencies = [
+ "bitflags",
+ "cocoa",
+ "core-foundation 0.7.0",
+ "core-graphics",
+ "core-video-sys",
+ "dispatch",
+ "instant",
+ "lazy_static",
+ "libc",
+ "log",
+ "mio",
+ "mio-extras",
+ "ndk",
+ "ndk-glue",
+ "ndk-sys",
+ "objc",
+ "parking_lot 0.10.2",
+ "percent-encoding",
+ "raw-window-handle",
+ "smithay-client-toolkit",
+ "wayland-client",
+ "winapi 0.3.9",
+ "x11-dl",
+]
+
+[[package]]
+name = "wio"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d129932f4644ac2396cb456385cbf9e63b5b30c6e8dc4820bdca4eb082037a5"
+dependencies = [
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "ws2_32-sys"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
+dependencies = [
+ "winapi 0.2.8",
+ "winapi-build",
+]
+
+[[package]]
+name = "x11-dl"
+version = "2.18.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2bf981e3a5b3301209754218f962052d4d9ee97e478f4d26d4a6eced34c1fef8"
+dependencies = [
+ "lazy_static",
+ "libc",
+ "maybe-uninit",
+ "pkg-config",
+]
+
+[[package]]
+name = "xdg"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d089681aa106a86fade1b0128fb5daf07d5867a509ab036d99988dec80429a57"
+
+[[package]]
+name = "xml-rs"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b07db065a5cf61a7e4ba64f29e67db906fb1787316516c4e6e5ff0fea1efcd8a"
diff --git a/gfx/wgpu/Cargo.toml b/gfx/wgpu/Cargo.toml
new file mode 100644
index 0000000000..0c0dfcc41f
--- /dev/null
+++ b/gfx/wgpu/Cargo.toml
@@ -0,0 +1,7 @@
+[workspace]
+members = [
+ "dummy",
+ "player",
+ "wgpu-core",
+ "wgpu-types",
+]
diff --git a/gfx/wgpu/LICENSE b/gfx/wgpu/LICENSE
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/gfx/wgpu/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/gfx/wgpu/README.md b/gfx/wgpu/README.md
new file mode 100644
index 0000000000..15f05f9a28
--- /dev/null
+++ b/gfx/wgpu/README.md
@@ -0,0 +1,32 @@
+<img align="right" width="25%" src="logo.png">
+
+This is an active GitHub mirror of the WebGPU implementation in Rust, which now lives in "gfx/wgpu" of [Mozilla-central](https://hg.mozilla.org/mozilla-central/file/tip/gfx/wgpu). Issues and pull requests are accepted, but some bidirectional synchronization may be involved.
+
+# WebGPU
+
+[![Matrix](https://img.shields.io/badge/Dev_Matrix-%23wgpu%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu:matrix.org) [![Matrix](https://img.shields.io/badge/User_Matrix-%23wgpu--users%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu-users:matrix.org)
+[![Build Status](https://github.com/gfx-rs/wgpu/workflows/CI/badge.svg)](https://github.com/gfx-rs/wgpu/actions)
+
+This is the core logic of an experimental [WebGPU](https://www.w3.org/community/gpu/) implementation. It's written in Rust and is based on [gfx-hal](https://github.com/gfx-rs/gfx) with help of [gpu-alloc](https://github.com/zakarumych/gpu-alloc) and [gpu-descriptor](https://github.com/zakarumych/gpu-descriptor). See the upstream [WebGPU specification](https://gpuweb.github.io/gpuweb/) (work in progress).
+
+The implementation consists of the following parts:
+
+ - [![Crates.io](https://img.shields.io/crates/v/wgpu-core.svg?label=wgpu-core)](https://crates.io/crates/wgpu-core) [![docs.rs](https://docs.rs/wgpu-core/badge.svg)](https://docs.rs/wgpu-core/) - internal Rust API for WebGPU implementations to use
+ - [![Crates.io](https://img.shields.io/crates/v/wgpu-types.svg?label=wgpu-types)](https://crates.io/crates/wgpu-types) [![docs.rs](https://docs.rs/wgpu-types/badge.svg)](https://docs.rs/wgpu-types/) - Rust types shared between `wgpu-core`, `wgpu-native`, and `wgpu-rs`
+ - `player` - standalone application for replaying the API traces, uses `winit`
+
+This repository contains the core of `wgpu`, and is not usable directly by applications.
+If you are looking for the user-facing Rust API, you need [wgpu-rs](https://github.com/gfx-rs/wgpu-rs).
+If you are looking for the native implementation or bindings to the API in other languages, you need [wgpu-native](https://github.com/gfx-rs/wgpu-native).
+
+## Supported Platforms
+
+ API | Windows 7/10 | Linux & Android | macOS & iOS |
+ ----- | ------------------ | ------------------ | ------------------ |
+ DX11 | :white_check_mark: | | |
+ DX12 | :heavy_check_mark: | | |
+ Vulkan | :heavy_check_mark: | :heavy_check_mark: | |
+ Metal | | | :heavy_check_mark: |
+ OpenGL | | :construction: | :construction: |
+
+:heavy_check_mark: = Primary support — :white_check_mark: = Secondary support — :construction: = Unsupported, but support in progress
diff --git a/gfx/wgpu/bors.toml b/gfx/wgpu/bors.toml
new file mode 100644
index 0000000000..2cbeb4c7a7
--- /dev/null
+++ b/gfx/wgpu/bors.toml
@@ -0,0 +1,10 @@
+status = [
+ "iOS Stable",
+ "MacOS Stable",
+ "MacOS Nightly",
+ "Android Stable",
+ "Ubuntu Stable",
+ "Ubuntu Nightly",
+ "Windows Stable",
+ "Windows Nightly",
+]
diff --git a/gfx/wgpu/dummy/Cargo.toml b/gfx/wgpu/dummy/Cargo.toml
new file mode 100644
index 0000000000..3d253f05ed
--- /dev/null
+++ b/gfx/wgpu/dummy/Cargo.toml
@@ -0,0 +1,16 @@
+[package]
+name = "dummy"
+version = "0.1.0"
+authors = [
+ "Dzmitry Malyshau <kvark@mozilla.com>",
+]
+edition = "2018"
+publish = false
+
+[features]
+
+[dependencies.wgc]
+path = "../wgpu-core"
+package = "wgpu-core"
+version = "0.6"
+features = ["serial-pass", "trace"]
diff --git a/gfx/wgpu/dummy/README.md b/gfx/wgpu/dummy/README.md
new file mode 100644
index 0000000000..5a95a787a5
--- /dev/null
+++ b/gfx/wgpu/dummy/README.md
@@ -0,0 +1,3 @@
+# wgpu dummy
+
+This is a dummy build target that makes `cargo check` and `cargo test` in the workspace to cover all the API.
diff --git a/gfx/wgpu/dummy/src/lib.rs b/gfx/wgpu/dummy/src/lib.rs
new file mode 100644
index 0000000000..e0032240a4
--- /dev/null
+++ b/gfx/wgpu/dummy/src/lib.rs
@@ -0,0 +1,3 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
diff --git a/gfx/wgpu/etc/big-picture.xml b/gfx/wgpu/etc/big-picture.xml
new file mode 100644
index 0000000000..7c8d164d5f
--- /dev/null
+++ b/gfx/wgpu/etc/big-picture.xml
@@ -0,0 +1,8 @@
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ - You can obtain one at http://mozilla.org/MPL/2.0/. -->
+<mxfile host="app.diagrams.net" modified="2020-11-16T21:27:20.203Z" agent="5.0 (Macintosh)" etag="9OfcpEHAtX6aquDOhW6o" version="13.9.9" type="device">
+ <diagram id="D9TiAgX86u4XTTQGlvGa" name="Page-1">
+ 7V1tk5s2EP41nmk/+AYk3vzxXtI0aZrenKe9Jl8yspExPQwuYJ+vv77CIBskgTEGgdNeZjJICDB69tldrVZiBO9Xu/chWi9/DWzsjYBi70bwYQTARDPI/0nFW1qhQz2tcELXTqvUY8XU/QdnlUpWu3FtHBUaxkHgxe66WDkPfB/P40IdCsPgtdhsEXjFp66Rg7mK6Rx5fO2za8fLtNbSlWP9z9h1lvTJqpKdmaH5ixMGGz97nh/4OD2zQvQ2WdNoiezgNfc8+G4E78MgiNOj1e4ee0mv0h5Lr/up5OzhJ4fYj+tc4H18dh6/or9/Htvbr78+xar2dTU2QXqbLfI2WV98nI59FLtb8h7KM569f/w96eQ1QSJ9jfiN9trr0o3xdI3mSfmVCMYI3i3jlUdKKjm0UbTEdlZYuJ53H3hBuL8ULhYLMJ+T+i0OY5cAceu5jk/OxUFyF5SV5uTdcJhcHvjxNHtycrsoDoMXnLuhbcwM3SBn+G7Jeip5Et6x/U8kGgcrHIdvpMmOSm8GWSbNwMjKr0fZUGndMicXtA5l4ugcbn3EhRxk0JwBk2FxMH3uHCOs2jo2W8RoYpgQtYWRUcRItQQY6QKMzBYwQr9vP8cPzz+9C55U7+nbt1/86YexanCdj22iZbJiEMbLwAl85L071t4daz8FSa/uO+4vHMdvmZJEmzgoQoZ3bvwnOVay4y/J8Y2elR52uVMPb1mhAHP6M5PfVqpJsqoo2IRzXCWXhhikEHupeBYUtaDHs0sfA5c8+gCuxmCr68U7xCh0cJxdxMB2+BUXsM0oZ9sfG+8F+f81th3sHUUECNhmdsQ2MUYKhxFRhAO0VC30PmR0HYR87wNL0PtWV72vT7jeHwHDi5O+dLfk0EkOnzZRfEAkPTsL6UlaQ56fu6RV5Kw5rkCuA6DApAiUwCZBEU6dsQRYw7VJpJvDtz/pDZJC7qqkeLxsX6LXpYqOuumgCrmT9ouONfb2pKqdJjSaFxi+i2CFpmRYb4CeQ1ath2sOyi8FJOvgWsnIk7jCuri2Dev+0tswRG+5BuvEUYkq3B1Gv3MjObY90Kvak4P0F7TqFEFZqqRELYBTeqGJDmpTlxhmr0LHSYnOGCOgMV50+kadedFwIllglBuzoHDUugJT1G6KNIkBtc3PINSUrond0FI1petV7btRU7oqSeqaCE9jUZXg4YBBeTg6H5cUDDCe308/lY0juMbRZr0mQDceiSQ98wnNsFcUBG4AyI42Vq5t76UsxJH7D5rt75eAmvGN3Fy/G+kPQpgrZZwbmBwC5NlTRplnUDpgUW5UMMl826bmhjYJFosId2JJ1FJZSMZwBZSMvzcBPTGO9uS9JQ1Ip+6OJynEr856kwM/vVkJ+tESrZNDAg7yPOwFTohWCYg4dMn7JcgXzz0eT9ydGMQu3B2mkyKCcaytY8vWRCEeC8yg0VKIBxqMU6kJQjxAZtBb12Sr8u4HORc4CzUVORyEr6BZbAj3hK8wqWzfja9Qy8bc4e1bTQvRo444xLo4HTGzdE2vFL36OkIDDEpqTR0BW9AR4kkXXa6OUEf1B5dHHXHmSLar2Zq6UZF0JNtbcItjZWKnxyEf1f/ubbLGTEQLJzm74pvYJkvmW0ObbEoyytpVGWWDncUzqo2yzqp7Q4JR1jj639zcDIn6NUxtcgWlRCbbLWgDNv4i1fqKtQG8Cm0AJGkD/aq0ATurfEobsLMUUrSBzmmDz8j3g82VKYQW2A9Z5d07+4H0WchGkVYgKdJq1GT/MIL57BziqWA+nPQQzOfzsKY43AaDIj9ND+o0I5UNzh3KvZHf5KF5/PD0x/g+DKKz86/4vjMnMyWho+OhKMqoeUigLnGxohccz5dlMGFjD1MLYADGLYa9j8ro5E0ODR85aFg8yQAox7oNaNgwotk3NEB6ENsctJVU6w6ZBzYhCXlPVBAsdrCPQxQnicLJyhDs2/WnJ4l2w2HzRMnhTE9Skb98enJM5BIqVoHSdPQ03OlKICvxReK8lSAk3Sn7zWGRn3dE/yd/tfC3Qn6V/BXt+dC5r8lKP2qYPF1K3dP22GibkuKBJoBa0btWGNesJG3x3AHwwTVknlM2AObaZ1OP3QbD+XV/9o56RoPkv1bmszfITYKTCbgy+ssOiJ/v6relMFr09Fs39o0UAqCrKmgCFJRBcD6Osn0ZMr3p7HwL9FYUq9jj4yHxW5xuIla9ktZTmd0sqBKyrz3OCztS64Ph6oQ1+ZNqk8+219vVCGIR4/NfiMkXu5VSdEJtqhdFpoI/FTpBNY3B6wA+Q2EVe33qBXCVekHcUL9QMVwWR+ehdRa7oaWeIWwthJF0Y27h2aKlSDozySFayyyMpGstRNKFvJMdVStQrJphbTLlUgKUTN8yaJpyd9RQ+eg5uSXy/pPUYtK4hLsLSU2ivo6czgZJXO0wUpDY0YlPexGGA97ooXUsutaONLQnTTvy0ws2tFXwv3bMhX/7W2KicCB8l9S6dNFHPccDSqYWH19D0XJIrFroyT8hq/Z/o3Qxa64+/evGFxHt6yU1X0aT7Yyc74oMMIJuCRWB9Ag6mxinVU+pAeXC9hMZEXreODviaM9AIvR0qNXGBLyiXl2IXnZSek/W2pJhrQ9J37KsNT/d7XjB65DMdT9OcO8ZxodXOCLzHs9f+FT8k73bQ/a8rjLdWTfi0kYsU7ywQbSlCau2fPs22dqclPa7igsy5uk2VHv3MZ8dTwniuf5LVR+edBhyPSTaTJnWXZrzU8RHU5l+L0n5OXWfA6ay9g0GAlS5XL/fprXTAu2Q9OkxLfBk+/0+4FeWMGiU0blBRoFpQVh0V8CA3BXx60sPvhrN9vLUVPOcAOy+lDO7/AqeVvderD1San0j12YjJUU87i5VbGZl+45W3/GzoaKs58VunOyrhmau58alW+Vw1/1AfpRy+/jhx8ZJz9TILfEOETIw/l9WexBBcL7rx+/t3YcvqDIrNQ9fNejNGTT4oNp+ixb62ZDvEmspG77oJrMJW++LvE3Z+2meMZ4ui8iZp0JyFXbosvUtVE3LXwR+Gcj8xxQesB+UszXP7Y/Tqye3pGGozhhxmaN6YaxswF+sEfLjJCm7yzU9GSrr7hs15S9VNMGuj9ZuKbmucWpLMCbkBKB2DK3ufqYN2EaKx++4pQgfP5MH3/0L
+ </diagram>
+</mxfile>
diff --git a/gfx/wgpu/logo.png b/gfx/wgpu/logo.png
new file mode 100644
index 0000000000..17f35634db
--- /dev/null
+++ b/gfx/wgpu/logo.png
Binary files differ
diff --git a/gfx/wgpu/player/Cargo.toml b/gfx/wgpu/player/Cargo.toml
new file mode 100644
index 0000000000..7e4a766be7
--- /dev/null
+++ b/gfx/wgpu/player/Cargo.toml
@@ -0,0 +1,43 @@
+[package]
+name = "player"
+version = "0.1.0"
+authors = [
+ "Dzmitry Malyshau <kvark@mozilla.com>",
+]
+edition = "2018"
+description = "WebGPU trace player"
+homepage = "https://github.com/gfx-rs/wgpu"
+repository = "https://github.com/gfx-rs/wgpu"
+keywords = ["graphics"]
+license = "MPL-2.0"
+publish = false
+
+[features]
+
+[dependencies]
+env_logger = "0.7"
+log = "0.4"
+raw-window-handle = "0.3"
+renderdoc = { version = "0.8", optional = true, default_features = false }
+ron = "0.6"
+winit = { version = "0.22", optional = true }
+
+[dependencies.wgt]
+path = "../wgpu-types"
+package = "wgpu-types"
+version = "0.6"
+features = ["replay"]
+
+[dependencies.wgc]
+path = "../wgpu-core"
+package = "wgpu-core"
+version = "0.6"
+features = ["replay", "raw-window-handle"]
+
+[dependencies.wgpu-subscriber]
+git = "https://github.com/gfx-rs/subscriber.git"
+rev = "cdc9feb53f152f9c41905ed9efeff2c1ed214361"
+version = "0.1"
+
+[dev-dependencies]
+serde = "1"
diff --git a/gfx/wgpu/player/README.md b/gfx/wgpu/player/README.md
new file mode 100644
index 0000000000..859e079c7e
--- /dev/null
+++ b/gfx/wgpu/player/README.md
@@ -0,0 +1,13 @@
+# wgpu player
+
+This is an application that allows replaying the `wgpu` workloads recorded elsewhere. It requires the player to be built from
+the same revision as an application was linking to, or otherwise the data may fail to load.
+
+Launch as:
+```rust
+play <trace-dir>
+```
+
+When built with "winit" feature, it's able to replay the workloads that operate on a swapchain. It renders each frame sequentially, then waits for the user to close the window. When built without "winit", it launches in console mode and can replay any trace that doesn't use swapchains.
+
+Note: replaying is currently restricted to the same backend, as one used for recording a trace. It is straightforward, however, to just replace the backend in RON, since it's serialized as plain text. Valid values are: Vulkan, Metal, Dx12, and Dx11.
diff --git a/gfx/wgpu/player/src/bin/play.rs b/gfx/wgpu/player/src/bin/play.rs
new file mode 100644
index 0000000000..522d7e64b7
--- /dev/null
+++ b/gfx/wgpu/player/src/bin/play.rs
@@ -0,0 +1,179 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*! This is a player for WebGPU traces.
+!*/
+
+use player::{GlobalPlay as _, IdentityPassThroughFactory};
+use wgc::{device::trace, gfx_select};
+
+use std::{
+ fs,
+ path::{Path, PathBuf},
+};
+
+fn main() {
+ #[cfg(feature = "winit")]
+ use winit::{event_loop::EventLoop, window::WindowBuilder};
+
+ wgpu_subscriber::initialize_default_subscriber(
+ std::env::var("WGPU_CHROME_TRACE")
+ .as_ref()
+ .map(Path::new)
+ .ok(),
+ );
+
+ #[cfg(feature = "renderdoc")]
+ #[cfg_attr(feature = "winit", allow(unused))]
+ let mut rd = renderdoc::RenderDoc::<renderdoc::V110>::new()
+ .expect("Failed to connect to RenderDoc: are you running without it?");
+
+ //TODO: setting for the backend bits
+ //TODO: setting for the target frame, or controls
+
+ let dir = match std::env::args().nth(1) {
+ Some(arg) if Path::new(&arg).is_dir() => PathBuf::from(arg),
+ _ => panic!("Provide the dir path as the parameter"),
+ };
+
+ log::info!("Loading trace '{:?}'", dir);
+ let file = fs::File::open(dir.join(trace::FILE_NAME)).unwrap();
+ let mut actions: Vec<trace::Action> = ron::de::from_reader(file).unwrap();
+ actions.reverse(); // allows us to pop from the top
+ log::info!("Found {} actions", actions.len());
+
+ #[cfg(feature = "winit")]
+ let event_loop = {
+ log::info!("Creating a window");
+ EventLoop::new()
+ };
+ #[cfg(feature = "winit")]
+ let window = WindowBuilder::new()
+ .with_title("wgpu player")
+ .with_resizable(false)
+ .build(&event_loop)
+ .unwrap();
+
+ let global = wgc::hub::Global::new(
+ "player",
+ IdentityPassThroughFactory,
+ wgt::BackendBit::PRIMARY,
+ );
+ let mut command_buffer_id_manager = wgc::hub::IdentityManager::default();
+
+ #[cfg(feature = "winit")]
+ let surface =
+ global.instance_create_surface(&window, wgc::id::TypedId::zip(0, 1, wgt::Backend::Empty));
+
+ let device = match actions.pop() {
+ Some(trace::Action::Init { desc, backend }) => {
+ log::info!("Initializing the device for backend: {:?}", backend);
+ let adapter = global
+ .request_adapter(
+ &wgc::instance::RequestAdapterOptions {
+ power_preference: wgt::PowerPreference::LowPower,
+ #[cfg(feature = "winit")]
+ compatible_surface: Some(surface),
+ #[cfg(not(feature = "winit"))]
+ compatible_surface: None,
+ },
+ wgc::instance::AdapterInputs::IdSet(
+ &[wgc::id::TypedId::zip(0, 0, backend)],
+ |id| id.backend(),
+ ),
+ )
+ .expect("Unable to find an adapter for selected backend");
+
+ let info = gfx_select!(adapter => global.adapter_get_info(adapter)).unwrap();
+ log::info!("Picked '{}'", info.name);
+ let id = wgc::id::TypedId::zip(1, 0, backend);
+ let (_, error) = gfx_select!(adapter => global.adapter_request_device(
+ adapter,
+ &desc,
+ None,
+ id
+ ));
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ id
+ }
+ _ => panic!("Expected Action::Init"),
+ };
+
+ log::info!("Executing actions");
+ #[cfg(not(feature = "winit"))]
+ {
+ #[cfg(feature = "renderdoc")]
+ rd.start_frame_capture(std::ptr::null(), std::ptr::null());
+
+ while let Some(action) = actions.pop() {
+ gfx_select!(device => global.process(device, action, &dir, &mut command_buffer_id_manager));
+ }
+
+ #[cfg(feature = "renderdoc")]
+ rd.end_frame_capture(std::ptr::null(), std::ptr::null());
+ gfx_select!(device => global.device_poll(device, true)).unwrap();
+ }
+ #[cfg(feature = "winit")]
+ {
+ use winit::{
+ event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent},
+ event_loop::ControlFlow,
+ };
+
+ let mut frame_count = 0;
+ event_loop.run(move |event, _, control_flow| {
+ *control_flow = ControlFlow::Poll;
+ match event {
+ Event::MainEventsCleared => {
+ window.request_redraw();
+ }
+ Event::RedrawRequested(_) => loop {
+ match actions.pop() {
+ Some(trace::Action::CreateSwapChain(id, desc)) => {
+ log::info!("Initializing the swapchain");
+ assert_eq!(id.to_surface_id(), surface);
+ window.set_inner_size(winit::dpi::PhysicalSize::new(
+ desc.width,
+ desc.height,
+ ));
+ gfx_select!(device => global.device_create_swap_chain(device, surface, &desc));
+ }
+ Some(trace::Action::PresentSwapChain(id)) => {
+ frame_count += 1;
+ log::debug!("Presenting frame {}", frame_count);
+ gfx_select!(device => global.swap_chain_present(id));
+ break;
+ }
+ Some(action) => {
+ gfx_select!(device => global.process(device, action, &dir, &mut command_buffer_id_manager));
+ }
+ None => break,
+ }
+ },
+ Event::WindowEvent { event, .. } => match event {
+ WindowEvent::KeyboardInput {
+ input:
+ KeyboardInput {
+ virtual_keycode: Some(VirtualKeyCode::Escape),
+ state: ElementState::Pressed,
+ ..
+ },
+ ..
+ }
+ | WindowEvent::CloseRequested => {
+ *control_flow = ControlFlow::Exit;
+ }
+ _ => {}
+ },
+ Event::LoopDestroyed => {
+ log::info!("Closing");
+ gfx_select!(device => global.device_poll(device, true));
+ }
+ _ => {}
+ }
+ });
+ }
+}
diff --git a/gfx/wgpu/player/src/lib.rs b/gfx/wgpu/player/src/lib.rs
new file mode 100644
index 0000000000..140ff70503
--- /dev/null
+++ b/gfx/wgpu/player/src/lib.rs
@@ -0,0 +1,313 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*! This is a player library for WebGPU traces.
+ *
+ * # Notes
+ * - we call device_maintain_ids() before creating any refcounted resource,
+ * which is basically everything except for BGL and shader modules,
+ * so that we don't accidentally try to use the same ID.
+!*/
+
+use wgc::device::trace;
+
+use std::{borrow::Cow, fmt::Debug, fs, marker::PhantomData, path::Path};
+
+#[derive(Debug)]
+pub struct IdentityPassThrough<I>(PhantomData<I>);
+
+impl<I: Clone + Debug + wgc::id::TypedId> wgc::hub::IdentityHandler<I> for IdentityPassThrough<I> {
+ type Input = I;
+ fn process(&self, id: I, backend: wgt::Backend) -> I {
+ let (index, epoch, _backend) = id.unzip();
+ I::zip(index, epoch, backend)
+ }
+ fn free(&self, _id: I) {}
+}
+
+pub struct IdentityPassThroughFactory;
+
+impl<I: Clone + Debug + wgc::id::TypedId> wgc::hub::IdentityHandlerFactory<I>
+ for IdentityPassThroughFactory
+{
+ type Filter = IdentityPassThrough<I>;
+ fn spawn(&self, _min_index: u32) -> Self::Filter {
+ IdentityPassThrough(PhantomData)
+ }
+}
+impl wgc::hub::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {}
+
+pub trait GlobalPlay {
+ fn encode_commands<B: wgc::hub::GfxBackend>(
+ &self,
+ encoder: wgc::id::CommandEncoderId,
+ commands: Vec<trace::Command>,
+ ) -> wgc::id::CommandBufferId;
+ fn process<B: wgc::hub::GfxBackend>(
+ &self,
+ device: wgc::id::DeviceId,
+ action: trace::Action,
+ dir: &Path,
+ comb_manager: &mut wgc::hub::IdentityManager,
+ );
+}
+
+impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
+ fn encode_commands<B: wgc::hub::GfxBackend>(
+ &self,
+ encoder: wgc::id::CommandEncoderId,
+ commands: Vec<trace::Command>,
+ ) -> wgc::id::CommandBufferId {
+ for command in commands {
+ match command {
+ trace::Command::CopyBufferToBuffer {
+ src,
+ src_offset,
+ dst,
+ dst_offset,
+ size,
+ } => self
+ .command_encoder_copy_buffer_to_buffer::<B>(
+ encoder, src, src_offset, dst, dst_offset, size,
+ )
+ .unwrap(),
+ trace::Command::CopyBufferToTexture { src, dst, size } => self
+ .command_encoder_copy_buffer_to_texture::<B>(encoder, &src, &dst, &size)
+ .unwrap(),
+ trace::Command::CopyTextureToBuffer { src, dst, size } => self
+ .command_encoder_copy_texture_to_buffer::<B>(encoder, &src, &dst, &size)
+ .unwrap(),
+ trace::Command::CopyTextureToTexture { src, dst, size } => self
+ .command_encoder_copy_texture_to_texture::<B>(encoder, &src, &dst, &size)
+ .unwrap(),
+ trace::Command::RunComputePass { base } => {
+ self.command_encoder_run_compute_pass_impl::<B>(encoder, base.as_ref())
+ .unwrap();
+ }
+ trace::Command::RunRenderPass {
+ base,
+ target_colors,
+ target_depth_stencil,
+ } => {
+ self.command_encoder_run_render_pass_impl::<B>(
+ encoder,
+ base.as_ref(),
+ &target_colors,
+ target_depth_stencil.as_ref(),
+ )
+ .unwrap();
+ }
+ }
+ }
+ let (cmd_buf, error) = self
+ .command_encoder_finish::<B>(encoder, &wgt::CommandBufferDescriptor { label: None });
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ cmd_buf
+ }
+
+ fn process<B: wgc::hub::GfxBackend>(
+ &self,
+ device: wgc::id::DeviceId,
+ action: trace::Action,
+ dir: &Path,
+ comb_manager: &mut wgc::hub::IdentityManager,
+ ) {
+ use wgc::device::trace::Action as A;
+ log::info!("action {:?}", action);
+ match action {
+ A::Init { .. } => panic!("Unexpected Action::Init: has to be the first action only"),
+ A::CreateSwapChain { .. } | A::PresentSwapChain(_) => {
+ panic!("Unexpected SwapChain action: winit feature is not enabled")
+ }
+ A::CreateBuffer(id, desc) => {
+ self.device_maintain_ids::<B>(device).unwrap();
+ let (_, error) = self.device_create_buffer::<B>(device, &desc, id);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::FreeBuffer(id) => {
+ self.buffer_destroy::<B>(id).unwrap();
+ }
+ A::DestroyBuffer(id) => {
+ self.buffer_drop::<B>(id, true);
+ }
+ A::CreateTexture(id, desc) => {
+ self.device_maintain_ids::<B>(device).unwrap();
+ let (_, error) = self.device_create_texture::<B>(device, &desc, id);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::FreeTexture(id) => {
+ self.texture_destroy::<B>(id).unwrap();
+ }
+ A::DestroyTexture(id) => {
+ self.texture_drop::<B>(id, true);
+ }
+ A::CreateTextureView {
+ id,
+ parent_id,
+ desc,
+ } => {
+ self.device_maintain_ids::<B>(device).unwrap();
+ let (_, error) = self.texture_create_view::<B>(parent_id, &desc, id);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::DestroyTextureView(id) => {
+ self.texture_view_drop::<B>(id).unwrap();
+ }
+ A::CreateSampler(id, desc) => {
+ self.device_maintain_ids::<B>(device).unwrap();
+ let (_, error) = self.device_create_sampler::<B>(device, &desc, id);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::DestroySampler(id) => {
+ self.sampler_drop::<B>(id);
+ }
+ A::GetSwapChainTexture { id, parent_id } => {
+ if let Some(id) = id {
+ self.swap_chain_get_current_texture_view::<B>(parent_id, id)
+ .unwrap()
+ .view_id
+ .unwrap();
+ }
+ }
+ A::CreateBindGroupLayout(id, desc) => {
+ let (_, error) = self.device_create_bind_group_layout::<B>(device, &desc, id);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::DestroyBindGroupLayout(id) => {
+ self.bind_group_layout_drop::<B>(id);
+ }
+ A::CreatePipelineLayout(id, desc) => {
+ self.device_maintain_ids::<B>(device).unwrap();
+ let (_, error) = self.device_create_pipeline_layout::<B>(device, &desc, id);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::DestroyPipelineLayout(id) => {
+ self.pipeline_layout_drop::<B>(id);
+ }
+ A::CreateBindGroup(id, desc) => {
+ self.device_maintain_ids::<B>(device).unwrap();
+ let (_, error) = self.device_create_bind_group::<B>(device, &desc, id);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::DestroyBindGroup(id) => {
+ self.bind_group_drop::<B>(id);
+ }
+ A::CreateShaderModule { id, data, label } => {
+ let desc = wgc::pipeline::ShaderModuleDescriptor {
+ source: if data.ends_with(".wgsl") {
+ let code = fs::read_to_string(dir.join(data)).unwrap();
+ wgc::pipeline::ShaderModuleSource::Wgsl(Cow::Owned(code))
+ } else {
+ let byte_vec = fs::read(dir.join(data)).unwrap();
+ let spv = byte_vec
+ .chunks(4)
+ .map(|c| u32::from_le_bytes([c[0], c[1], c[2], c[3]]))
+ .collect::<Vec<_>>();
+ wgc::pipeline::ShaderModuleSource::SpirV(Cow::Owned(spv))
+ },
+ label,
+ };
+ let (_, error) = self.device_create_shader_module::<B>(device, &desc, id);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::DestroyShaderModule(id) => {
+ self.shader_module_drop::<B>(id);
+ }
+ A::CreateComputePipeline(id, desc) => {
+ self.device_maintain_ids::<B>(device).unwrap();
+ let (_, _, error) =
+ self.device_create_compute_pipeline::<B>(device, &desc, id, None);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::DestroyComputePipeline(id) => {
+ self.compute_pipeline_drop::<B>(id);
+ }
+ A::CreateRenderPipeline(id, desc) => {
+ self.device_maintain_ids::<B>(device).unwrap();
+ let (_, _, error) =
+ self.device_create_render_pipeline::<B>(device, &desc, id, None);
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::DestroyRenderPipeline(id) => {
+ self.render_pipeline_drop::<B>(id);
+ }
+ A::CreateRenderBundle { id, desc, base } => {
+ let bundle =
+ wgc::command::RenderBundleEncoder::new(&desc, device, Some(base)).unwrap();
+ let (_, error) = self.render_bundle_encoder_finish::<B>(
+ bundle,
+ &wgt::RenderBundleDescriptor { label: desc.label },
+ id,
+ );
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ }
+ A::DestroyRenderBundle(id) => {
+ self.render_bundle_drop::<B>(id);
+ }
+ A::WriteBuffer {
+ id,
+ data,
+ range,
+ queued,
+ } => {
+ let bin = std::fs::read(dir.join(data)).unwrap();
+ let size = (range.end - range.start) as usize;
+ if queued {
+ self.queue_write_buffer::<B>(device, id, range.start, &bin)
+ .unwrap();
+ } else {
+ self.device_wait_for_buffer::<B>(device, id).unwrap();
+ self.device_set_buffer_sub_data::<B>(device, id, range.start, &bin[..size])
+ .unwrap();
+ }
+ }
+ A::WriteTexture {
+ to,
+ data,
+ layout,
+ size,
+ } => {
+ let bin = std::fs::read(dir.join(data)).unwrap();
+ self.queue_write_texture::<B>(device, &to, &bin, &layout, &size)
+ .unwrap();
+ }
+ A::Submit(_index, commands) => {
+ let (encoder, error) = self.device_create_command_encoder::<B>(
+ device,
+ &wgt::CommandEncoderDescriptor { label: None },
+ comb_manager.alloc(device.backend()),
+ );
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+ let cmdbuf = self.encode_commands::<B>(encoder, commands);
+ self.queue_submit::<B>(device, &[cmdbuf]).unwrap();
+ }
+ }
+ }
+}
diff --git a/gfx/wgpu/player/tests/data/all.ron b/gfx/wgpu/player/tests/data/all.ron
new file mode 100644
index 0000000000..ef76e7dac8
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/all.ron
@@ -0,0 +1,8 @@
+(
+ backends: (bits: 0xF),
+ tests: [
+ "buffer-copy.ron",
+ "bind-group.ron",
+ "quad.ron",
+ ],
+) \ No newline at end of file
diff --git a/gfx/wgpu/player/tests/data/bind-group.ron b/gfx/wgpu/player/tests/data/bind-group.ron
new file mode 100644
index 0000000000..8a4ecb3c5f
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/bind-group.ron
@@ -0,0 +1,75 @@
+(
+ features: (bits: 0x0),
+ expectations: [], //not crash!
+ actions: [
+ CreatePipelineLayout(Id(0, 1, Empty), (
+ label: Some("empty"),
+ bind_group_layouts: [],
+ push_constant_ranges: [],
+ )),
+ CreateShaderModule(
+ id: Id(0, 1, Empty),
+ label: None,
+ data: "empty.comp.spv",
+ ),
+ CreateComputePipeline(Id(0, 1, Empty), (
+ label: None,
+ layout: Some(Id(0, 1, Empty)),
+ compute_stage: (
+ module: Id(0, 1, Empty),
+ entry_point: "main",
+ ),
+ )),
+ CreateBuffer(Id(0, 1, Empty), (
+ label: None,
+ size: 16,
+ usage: (
+ bits: 64,
+ ),
+ mapped_at_creation: false,
+ )),
+ CreateBindGroupLayout(Id(0, 1, Empty), (
+ label: None,
+ entries: [
+ (
+ binding: 0,
+ visibility: (bits: 0x3),
+ ty: Buffer(
+ ty: Uniform,
+ ),
+ ),
+ ],
+ )),
+ CreateBindGroup(Id(0, 1, Empty), (
+ label: None,
+ layout: Id(0, 1, Empty),
+ entries: [
+ (
+ binding: 0,
+ resource: Buffer((
+ buffer_id: Id(0, 1, Empty),
+ offset: 0,
+ size: None,
+ )),
+ )
+ ],
+ )),
+ Submit(1, [
+ RunComputePass(
+ base: (
+ commands: [
+ SetPipeline(Id(0, 1, Empty)),
+ SetBindGroup(
+ index: 0,
+ num_dynamic_offsets: 0,
+ bind_group_id: Id(0, 1, Empty),
+ ),
+ ],
+ dynamic_offsets: [],
+ string_data: [],
+ push_constant_data: [],
+ ),
+ ),
+ ]),
+ ],
+) \ No newline at end of file
diff --git a/gfx/wgpu/player/tests/data/buffer-copy.ron b/gfx/wgpu/player/tests/data/buffer-copy.ron
new file mode 100644
index 0000000000..b9cb8d4546
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/buffer-copy.ron
@@ -0,0 +1,34 @@
+(
+ features: (bits: 0x0),
+ expectations: [
+ (
+ name: "basic",
+ buffer: (index: 0, epoch: 1),
+ offset: 0,
+ data: Raw([0x00, 0x00, 0x80, 0xBF]),
+ )
+ ],
+ actions: [
+ CreateBuffer(
+ Id(0, 1, Empty),
+ (
+ label: Some("dummy"),
+ size: 16,
+ usage: (
+ bits: 41,
+ ),
+ mapped_at_creation: false,
+ ),
+ ),
+ WriteBuffer(
+ id: Id(0, 1, Empty),
+ data: "data1.bin",
+ range: (
+ start: 0,
+ end: 16,
+ ),
+ queued: true,
+ ),
+ Submit(1, []),
+ ],
+) \ No newline at end of file
diff --git a/gfx/wgpu/player/tests/data/data1.bin b/gfx/wgpu/player/tests/data/data1.bin
new file mode 100644
index 0000000000..1e54fc7a5e
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/data1.bin
Binary files differ
diff --git a/gfx/wgpu/player/tests/data/empty.comp b/gfx/wgpu/player/tests/data/empty.comp
new file mode 100644
index 0000000000..e1ed54725b
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/empty.comp
@@ -0,0 +1,5 @@
+#version 450
+layout(local_size_x = 1) in;
+
+void main() {
+}
diff --git a/gfx/wgpu/player/tests/data/empty.comp.spv b/gfx/wgpu/player/tests/data/empty.comp.spv
new file mode 100644
index 0000000000..484a9bca4f
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/empty.comp.spv
Binary files differ
diff --git a/gfx/wgpu/player/tests/data/quad.bin b/gfx/wgpu/player/tests/data/quad.bin
new file mode 100644
index 0000000000..171fa2bbce
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/quad.bin
@@ -0,0 +1 @@
+ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ \ No newline at end of file
diff --git a/gfx/wgpu/player/tests/data/quad.frag b/gfx/wgpu/player/tests/data/quad.frag
new file mode 100644
index 0000000000..d3b9253bda
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/quad.frag
@@ -0,0 +1,7 @@
+#version 450
+
+layout(location = 0) out vec4 outColor;
+
+void main() {
+ outColor = vec4(1.0, 1.0, 1.0, 1.0);
+} \ No newline at end of file
diff --git a/gfx/wgpu/player/tests/data/quad.frag.spv b/gfx/wgpu/player/tests/data/quad.frag.spv
new file mode 100644
index 0000000000..2abc8a88e8
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/quad.frag.spv
Binary files differ
diff --git a/gfx/wgpu/player/tests/data/quad.ron b/gfx/wgpu/player/tests/data/quad.ron
new file mode 100644
index 0000000000..9bba47dc7f
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/quad.ron
@@ -0,0 +1,153 @@
+(
+ features: (bits: 0x0),
+ expectations: [
+ (
+ name: "Quad",
+ buffer: (index: 0, epoch: 1),
+ offset: 0,
+ data: File("quad.bin", 16384),
+ )
+ ],
+ actions: [
+ CreateShaderModule(
+ id: Id(0, 1, Empty),
+ data: "quad.vert.spv",
+ ),
+ CreateShaderModule(
+ id: Id(1, 1, Empty),
+ data: "quad.frag.spv",
+ ),
+ CreateTexture(Id(0, 1, Empty), (
+ label: Some("Output Texture"),
+ size: (
+ width: 64,
+ height: 64,
+ depth: 1,
+ ),
+ mip_level_count: 1,
+ sample_count: 1,
+ dimension: D2,
+ format: Rgba8Unorm,
+ usage: (
+ bits: 27,
+ ),
+ )),
+ CreateTextureView(
+ id: Id(0, 1, Empty),
+ parent_id: Id(0, 1, Empty),
+ desc: (),
+ ),
+ CreateBuffer(
+ Id(0, 1, Empty),
+ (
+ label: Some("Output Buffer"),
+ size: 16384,
+ usage: (
+ bits: 9,
+ ),
+ mapped_at_creation: false,
+ ),
+ ),
+ CreatePipelineLayout(Id(0, 1, Empty), (
+ label: None,
+ bind_group_layouts: [],
+ push_constant_ranges: [],
+ )),
+ CreateRenderPipeline(Id(0, 1, Empty), (
+ label: None,
+ layout: Some(Id(0, 1, Empty)),
+ vertex_stage: (
+ module: Id(0, 1, Empty),
+ entry_point: "main",
+ ),
+ fragment_stage: Some((
+ module: Id(1, 1, Empty),
+ entry_point: "main",
+ )),
+ rasterization_state: None,
+ primitive_topology: TriangleList,
+ color_states: [
+ (
+ format: Rgba8Unorm,
+ alpha_blend: (
+ src_factor: One,
+ dst_factor: Zero,
+ operation: Add,
+ ),
+ color_blend: (
+ src_factor: One,
+ dst_factor: Zero,
+ operation: Add,
+ ),
+ write_mask: (
+ bits: 15,
+ ),
+ ),
+ ],
+ depth_stencil_state: None,
+ vertex_state: (
+ index_format: Uint16,
+ vertex_buffers: [],
+ ),
+ sample_count: 1,
+ sample_mask: 4294967295,
+ alpha_to_coverage_enabled: false,
+ )),
+ Submit(1, [
+ RunRenderPass(
+ base: (
+ commands: [
+ SetPipeline(Id(0, 1, Empty)),
+ Draw(
+ vertex_count: 3,
+ instance_count: 1,
+ first_vertex: 0,
+ first_instance: 0,
+ ),
+ ],
+ dynamic_offsets: [],
+ string_data: [],
+ push_constant_data: [],
+ ),
+ target_colors: [
+ (
+ attachment: Id(0, 1, Empty),
+ resolve_target: None,
+ channel: (
+ load_op: Clear,
+ store_op: Store,
+ clear_value: (
+ r: 0,
+ g: 0,
+ b: 0,
+ a: 1,
+ ),
+ read_only: false,
+ ),
+ ),
+ ],
+ target_depth_stencil: None,
+ ),
+ CopyTextureToBuffer(
+ src: (
+ texture: Id(0, 1, Empty),
+ mip_level: 0,
+ array_layer: 0,
+ ),
+ dst: (
+ buffer: Id(0, 1, Empty),
+ layout: (
+ offset: 0,
+ bytes_per_row: 256,
+ rows_per_image: 64,
+ ),
+ ),
+ size: (
+ width: 64,
+ height: 64,
+ depth: 1,
+ ),
+ ),
+ ]),
+ ],
+) \ No newline at end of file
diff --git a/gfx/wgpu/player/tests/data/quad.vert b/gfx/wgpu/player/tests/data/quad.vert
new file mode 100644
index 0000000000..e8dbb80ba6
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/quad.vert
@@ -0,0 +1,10 @@
+#version 450
+
+out gl_PerVertex {
+ vec4 gl_Position;
+};
+
+void main() {
+ vec2 pos = vec2(gl_VertexIndex == 2 ? 3.0 : -1.0, gl_VertexIndex == 1 ? 3.0 : -1.0);
+ gl_Position = vec4(pos, 0.0, 1.0);
+} \ No newline at end of file
diff --git a/gfx/wgpu/player/tests/data/quad.vert.spv b/gfx/wgpu/player/tests/data/quad.vert.spv
new file mode 100644
index 0000000000..5ed23e2e16
--- /dev/null
+++ b/gfx/wgpu/player/tests/data/quad.vert.spv
Binary files differ
diff --git a/gfx/wgpu/player/tests/test.rs b/gfx/wgpu/player/tests/test.rs
new file mode 100644
index 0000000000..1b33fff58f
--- /dev/null
+++ b/gfx/wgpu/player/tests/test.rs
@@ -0,0 +1,217 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*! Tester for WebGPU
+ * It enumerates the available backends on the system,
+ * and run the tests through them.
+ *
+ * Test requirements:
+ * - all IDs have the backend `Empty`
+ * - all expected buffers have `MAP_READ` usage
+ * - last action is `Submit`
+ * - no swapchain use
+!*/
+
+use player::{GlobalPlay, IdentityPassThroughFactory};
+use std::{
+ fs::{read_to_string, File},
+ io::{Read, Seek, SeekFrom},
+ path::{Path, PathBuf},
+ ptr, slice,
+};
+
+#[derive(serde::Deserialize)]
+struct RawId {
+ index: u32,
+ epoch: u32,
+}
+
+#[derive(serde::Deserialize)]
+enum ExpectedData {
+ Raw(Vec<u8>),
+ File(String, usize),
+}
+
+impl ExpectedData {
+ fn len(&self) -> usize {
+ match self {
+ ExpectedData::Raw(vec) => vec.len(),
+ ExpectedData::File(_, size) => *size,
+ }
+ }
+}
+
+#[derive(serde::Deserialize)]
+struct Expectation {
+ name: String,
+ buffer: RawId,
+ offset: wgt::BufferAddress,
+ data: ExpectedData,
+}
+
+#[derive(serde::Deserialize)]
+struct Test<'a> {
+ features: wgt::Features,
+ expectations: Vec<Expectation>,
+ actions: Vec<wgc::device::trace::Action<'a>>,
+}
+
+extern "C" fn map_callback(status: wgc::resource::BufferMapAsyncStatus, _user_data: *mut u8) {
+ match status {
+ wgc::resource::BufferMapAsyncStatus::Success => (),
+ _ => panic!("Unable to map"),
+ }
+}
+
+impl Test<'_> {
+ fn load(path: PathBuf, backend: wgt::Backend) -> Self {
+ let backend_name = match backend {
+ wgt::Backend::Vulkan => "Vulkan",
+ wgt::Backend::Metal => "Metal",
+ wgt::Backend::Dx12 => "Dx12",
+ wgt::Backend::Dx11 => "Dx11",
+ wgt::Backend::Gl => "Gl",
+ _ => unreachable!(),
+ };
+ let string = read_to_string(path).unwrap().replace("Empty", backend_name);
+ ron::de::from_str(&string).unwrap()
+ }
+
+ fn run(
+ self,
+ dir: &Path,
+ global: &wgc::hub::Global<IdentityPassThroughFactory>,
+ adapter: wgc::id::AdapterId,
+ test_num: u32,
+ ) {
+ let backend = adapter.backend();
+ let device = wgc::id::TypedId::zip(test_num, 0, backend);
+ let (_, error) = wgc::gfx_select!(adapter => global.adapter_request_device(
+ adapter,
+ &wgt::DeviceDescriptor {
+ label: None,
+ features: self.features | wgt::Features::MAPPABLE_PRIMARY_BUFFERS,
+ limits: wgt::Limits::default(),
+ shader_validation: true,
+ },
+ None,
+ device
+ ));
+ if let Some(e) = error {
+ panic!("{:?}", e);
+ }
+
+ let mut command_buffer_id_manager = wgc::hub::IdentityManager::default();
+ println!("\t\t\tRunning...");
+ for action in self.actions {
+ wgc::gfx_select!(device => global.process(device, action, dir, &mut command_buffer_id_manager));
+ }
+ println!("\t\t\tMapping...");
+ for expect in &self.expectations {
+ let buffer = wgc::id::TypedId::zip(expect.buffer.index, expect.buffer.epoch, backend);
+ wgc::gfx_select!(device => global.buffer_map_async(
+ buffer,
+ expect.offset .. expect.offset+expect.data.len() as wgt::BufferAddress,
+ wgc::resource::BufferMapOperation {
+ host: wgc::device::HostMap::Read,
+ callback: map_callback,
+ user_data: ptr::null_mut(),
+ }
+ ))
+ .unwrap();
+ }
+
+ println!("\t\t\tWaiting...");
+ wgc::gfx_select!(device => global.device_poll(device, true)).unwrap();
+
+ for expect in self.expectations {
+ println!("\t\t\tChecking {}", expect.name);
+ let buffer = wgc::id::TypedId::zip(expect.buffer.index, expect.buffer.epoch, backend);
+ let ptr =
+ wgc::gfx_select!(device => global.buffer_get_mapped_range(buffer, expect.offset, None))
+ .unwrap();
+ let contents = unsafe { slice::from_raw_parts(ptr, expect.data.len()) };
+ let expected_data = match expect.data {
+ ExpectedData::Raw(vec) => vec,
+ ExpectedData::File(name, size) => {
+ let mut bin = vec![0; size];
+ let mut file = File::open(dir.join(name)).unwrap();
+ file.seek(SeekFrom::Start(expect.offset)).unwrap();
+ file.read_exact(&mut bin[..]).unwrap();
+
+ bin
+ }
+ };
+
+ assert_eq!(&expected_data[..], contents);
+ }
+
+ wgc::gfx_select!(device => global.clear_backend(()));
+ }
+}
+
+#[derive(serde::Deserialize)]
+struct Corpus {
+ backends: wgt::BackendBit,
+ tests: Vec<String>,
+}
+
+const BACKENDS: &[wgt::Backend] = &[
+ wgt::Backend::Vulkan,
+ wgt::Backend::Metal,
+ wgt::Backend::Dx12,
+ wgt::Backend::Dx11,
+ wgt::Backend::Gl,
+];
+
+impl Corpus {
+ fn run_from(path: PathBuf) {
+ println!("Corpus {:?}", path);
+ let dir = path.parent().unwrap();
+ let corpus: Corpus = ron::de::from_reader(File::open(&path).unwrap()).unwrap();
+
+ let global = wgc::hub::Global::new("test", IdentityPassThroughFactory, corpus.backends);
+ for &backend in BACKENDS {
+ if !corpus.backends.contains(backend.into()) {
+ continue;
+ }
+ let adapter = match global.request_adapter(
+ &wgc::instance::RequestAdapterOptions {
+ power_preference: wgt::PowerPreference::LowPower,
+ compatible_surface: None,
+ },
+ wgc::instance::AdapterInputs::IdSet(
+ &[wgc::id::TypedId::zip(0, 0, backend)],
+ |id| id.backend(),
+ ),
+ ) {
+ Ok(adapter) => adapter,
+ Err(_) => continue,
+ };
+
+ println!("\tBackend {:?}", backend);
+ let supported_features =
+ wgc::gfx_select!(adapter => global.adapter_features(adapter)).unwrap();
+ let mut test_num = 0;
+ for test_path in &corpus.tests {
+ println!("\t\tTest '{:?}'", test_path);
+ let test = Test::load(dir.join(test_path), adapter.backend());
+ if !supported_features.contains(test.features) {
+ println!(
+ "\t\tSkipped due to missing features {:?}",
+ test.features - supported_features
+ );
+ continue;
+ }
+ test.run(dir, &global, adapter, test_num);
+ test_num += 1;
+ }
+ }
+ }
+}
+
+#[test]
+fn test_api() {
+ Corpus::run_from(PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/data/all.ron"))
+}
diff --git a/gfx/wgpu/rustfmt.toml b/gfx/wgpu/rustfmt.toml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/gfx/wgpu/rustfmt.toml
diff --git a/gfx/wgpu/wgpu-core/Cargo.toml b/gfx/wgpu/wgpu-core/Cargo.toml
new file mode 100644
index 0000000000..2868fc9343
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/Cargo.toml
@@ -0,0 +1,68 @@
+[package]
+name = "wgpu-core"
+version = "0.6.0"
+authors = ["wgpu developers"]
+edition = "2018"
+description = "WebGPU core logic on gfx-hal"
+homepage = "https://github.com/gfx-rs/wgpu"
+repository = "https://github.com/gfx-rs/wgpu"
+keywords = ["graphics"]
+license = "MPL-2.0"
+
+[lib]
+
+[features]
+default = []
+# Enable API tracing
+trace = ["ron", "serde", "wgt/trace"]
+# Enable API replaying
+replay = ["serde", "wgt/replay"]
+# Enable serializable compute/render passes, and bundle encoders.
+serial-pass = ["serde", "wgt/serde", "arrayvec/serde"]
+
+[dependencies]
+arrayvec = "0.5"
+bitflags = "1.0"
+copyless = "0.1"
+fxhash = "0.2"
+hal = { package = "gfx-hal", git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
+gfx-backend-empty = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
+parking_lot = "0.11"
+raw-window-handle = { version = "0.3", optional = true }
+ron = { version = "0.6", optional = true }
+serde = { version = "1.0", features = ["serde_derive"], optional = true }
+smallvec = "1"
+tracing = { version = "0.1", default-features = false, features = ["std"] }
+thiserror = "1"
+gpu-alloc = { git = "https://github.com/zakarumych/gpu-alloc", rev = "d07be73f9439a37c89f5b72f2500cbf0eb4ff613" }
+gpu-descriptor = { git = "https://github.com/zakarumych/gpu-descriptor", rev = "831460c4b5120d9a74744d542f39a95b9816b5ab"}
+
+[dependencies.naga]
+version = "0.2"
+git = "https://github.com/gfx-rs/naga"
+rev = "96c80738650822de35f77ab6a589f309460c8f39"
+features = ["spv-in", "spv-out", "wgsl-in"]
+
+[dependencies.wgt]
+path = "../wgpu-types"
+package = "wgpu-types"
+version = "0.6"
+
+[target.'cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies]
+gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
+#gfx-backend-gl = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
+
+[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies]
+gfx-backend-metal = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
+gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354", optional = true }
+
+[target.'cfg(windows)'.dependencies]
+gfx-backend-dx12 = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
+gfx-backend-dx11 = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
+gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
+
+[dev-dependencies]
+loom = "0.3"
+
+[build-dependencies]
+cfg_aliases = "0.1"
diff --git a/gfx/wgpu/wgpu-core/build.rs b/gfx/wgpu/wgpu-core/build.rs
new file mode 100644
index 0000000000..382e47db90
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/build.rs
@@ -0,0 +1,20 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+fn main() {
+ // Setup cfg aliases
+ cfg_aliases::cfg_aliases! {
+ // Vendors/systems
+ ios: { target_os = "ios" },
+ macos: { target_os = "macos" },
+ apple: { any(ios, macos) },
+
+ // Backends
+ vulkan: { any(windows, all(unix, not(apple)), feature = "gfx-backend-vulkan") },
+ metal: { apple },
+ dx12: { windows },
+ dx11: { windows },
+ gl: { all(not(unix), not(apple), not(windows)) },
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/binding_model.rs b/gfx/wgpu/wgpu-core/src/binding_model.rs
new file mode 100644
index 0000000000..10126a9b97
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/binding_model.rs
@@ -0,0 +1,632 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ device::{
+ descriptor::{DescriptorSet, DescriptorTotalCount},
+ DeviceError, SHADER_STAGE_COUNT,
+ },
+ hub::Resource,
+ id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId, Valid},
+ track::{TrackerSet, DUMMY_SELECTOR},
+ validation::{MissingBufferUsageError, MissingTextureUsageError},
+ FastHashMap, Label, LifeGuard, MultiRefCount, Stored, MAX_BIND_GROUPS,
+};
+
+use arrayvec::ArrayVec;
+
+#[cfg(feature = "replay")]
+use serde::Deserialize;
+#[cfg(feature = "trace")]
+use serde::Serialize;
+
+use std::{
+ borrow::{Borrow, Cow},
+ ops::Range,
+};
+
+use thiserror::Error;
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateBindGroupLayoutError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("arrays of bindings unsupported for this type of binding")]
+ ArrayUnsupported,
+ #[error("conflicting binding at index {0}")]
+ ConflictBinding(u32),
+ #[error("required device feature is missing: {0:?}")]
+ MissingFeature(wgt::Features),
+ #[error(transparent)]
+ TooManyBindings(BindingTypeMaxCountError),
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateBindGroupError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("bind group layout is invalid")]
+ InvalidLayout,
+ #[error("buffer {0:?} is invalid or destroyed")]
+ InvalidBuffer(BufferId),
+ #[error("texture view {0:?} is invalid")]
+ InvalidTextureView(TextureViewId),
+ #[error("sampler {0:?} is invalid")]
+ InvalidSampler(SamplerId),
+ #[error("binding count declared with {expected} items, but {actual} items were provided")]
+ BindingArrayLengthMismatch { actual: usize, expected: usize },
+ #[error("bound buffer range {range:?} does not fit in buffer of size {size}")]
+ BindingRangeTooLarge {
+ range: Range<wgt::BufferAddress>,
+ size: u64,
+ },
+ #[error("buffer binding size {actual} is less than minimum {min}")]
+ BindingSizeTooSmall { actual: u64, min: u64 },
+ #[error("number of bindings in bind group descriptor ({actual}) does not match the number of bindings defined in the bind group layout ({expected})")]
+ BindingsNumMismatch { actual: usize, expected: usize },
+ #[error("binding {0} is used at least twice in the descriptor")]
+ DuplicateBinding(u32),
+ #[error("unable to find a corresponding declaration for the given binding {0}")]
+ MissingBindingDeclaration(u32),
+ #[error(transparent)]
+ MissingBufferUsage(#[from] MissingBufferUsageError),
+ #[error(transparent)]
+ MissingTextureUsage(#[from] MissingTextureUsageError),
+ #[error("required device features not enabled: {0:?}")]
+ MissingFeatures(wgt::Features),
+ #[error("binding declared as a single item, but bind group is using it as an array")]
+ SingleBindingExpected,
+ #[error("unable to create a bind group with a swap chain image")]
+ SwapChainImage,
+ #[error("buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")]
+ UnalignedBufferOffset(wgt::BufferAddress),
+ #[error("uniform buffer binding range exceeds `max_uniform_buffer_binding_size` limit")]
+ UniformBufferRangeTooLarge,
+ #[error("binding {binding} has a different type ({actual:?}) than the one in the layout ({expected:?})")]
+ WrongBindingType {
+ // Index of the binding
+ binding: u32,
+ // The type given to the function
+ actual: wgt::BindingType,
+ // Human-readable description of expected types
+ expected: &'static str,
+ },
+ #[error("the given sampler is/is not a comparison sampler, while the layout type indicates otherwise")]
+ WrongSamplerComparison,
+ #[error("bound texture views can not have both depth and stencil aspects enabled")]
+ DepthStencilAspect,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum BindingZone {
+ #[error("stage {0:?}")]
+ Stage(wgt::ShaderStage),
+ #[error("whole pipeline")]
+ Pipeline,
+}
+
+#[derive(Clone, Debug, Error)]
+#[error("too many bindings of type {kind:?} in {zone}, limit is {count}")]
+pub struct BindingTypeMaxCountError {
+ pub kind: BindingTypeMaxCountErrorKind,
+ pub zone: BindingZone,
+ pub count: u32,
+}
+
+#[derive(Clone, Debug)]
+pub enum BindingTypeMaxCountErrorKind {
+ DynamicUniformBuffers,
+ DynamicStorageBuffers,
+ SampledTextures,
+ Samplers,
+ StorageBuffers,
+ StorageTextures,
+ UniformBuffers,
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct PerStageBindingTypeCounter {
+ vertex: u32,
+ fragment: u32,
+ compute: u32,
+}
+
+impl PerStageBindingTypeCounter {
+ pub(crate) fn add(&mut self, stage: wgt::ShaderStage, count: u32) {
+ if stage.contains(wgt::ShaderStage::VERTEX) {
+ self.vertex += count;
+ }
+ if stage.contains(wgt::ShaderStage::FRAGMENT) {
+ self.fragment += count;
+ }
+ if stage.contains(wgt::ShaderStage::COMPUTE) {
+ self.compute += count;
+ }
+ }
+
+ pub(crate) fn max(&self) -> (BindingZone, u32) {
+ let max_value = self.vertex.max(self.fragment.max(self.compute));
+ let mut stage = wgt::ShaderStage::NONE;
+ if max_value == self.vertex {
+ stage |= wgt::ShaderStage::VERTEX
+ }
+ if max_value == self.fragment {
+ stage |= wgt::ShaderStage::FRAGMENT
+ }
+ if max_value == self.compute {
+ stage |= wgt::ShaderStage::COMPUTE
+ }
+ (BindingZone::Stage(stage), max_value)
+ }
+
+ pub(crate) fn merge(&mut self, other: &Self) {
+ self.vertex = self.vertex.max(other.vertex);
+ self.fragment = self.fragment.max(other.fragment);
+ self.compute = self.compute.max(other.compute);
+ }
+
+ pub(crate) fn validate(
+ &self,
+ limit: u32,
+ kind: BindingTypeMaxCountErrorKind,
+ ) -> Result<(), BindingTypeMaxCountError> {
+ let (zone, count) = self.max();
+ if limit < count {
+ Err(BindingTypeMaxCountError { kind, zone, count })
+ } else {
+ Ok(())
+ }
+ }
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct BindingTypeMaxCountValidator {
+ dynamic_uniform_buffers: u32,
+ dynamic_storage_buffers: u32,
+ sampled_textures: PerStageBindingTypeCounter,
+ samplers: PerStageBindingTypeCounter,
+ storage_buffers: PerStageBindingTypeCounter,
+ storage_textures: PerStageBindingTypeCounter,
+ uniform_buffers: PerStageBindingTypeCounter,
+}
+
+impl BindingTypeMaxCountValidator {
+ pub(crate) fn add_binding(&mut self, binding: &wgt::BindGroupLayoutEntry) {
+ let count = binding.count.map_or(1, |count| count.get());
+ match binding.ty {
+ wgt::BindingType::Buffer {
+ ty: wgt::BufferBindingType::Uniform,
+ has_dynamic_offset,
+ ..
+ } => {
+ self.uniform_buffers.add(binding.visibility, count);
+ if has_dynamic_offset {
+ self.dynamic_uniform_buffers += count;
+ }
+ }
+ wgt::BindingType::Buffer {
+ ty: wgt::BufferBindingType::Storage { .. },
+ has_dynamic_offset,
+ ..
+ } => {
+ self.storage_buffers.add(binding.visibility, count);
+ if has_dynamic_offset {
+ self.dynamic_storage_buffers += count;
+ }
+ }
+ wgt::BindingType::Sampler { .. } => {
+ self.samplers.add(binding.visibility, count);
+ }
+ wgt::BindingType::Texture { .. } => {
+ self.sampled_textures.add(binding.visibility, count);
+ }
+ wgt::BindingType::StorageTexture { .. } => {
+ self.storage_textures.add(binding.visibility, count);
+ }
+ }
+ }
+
+ pub(crate) fn merge(&mut self, other: &Self) {
+ self.dynamic_uniform_buffers += other.dynamic_uniform_buffers;
+ self.dynamic_storage_buffers += other.dynamic_storage_buffers;
+ self.sampled_textures.merge(&other.sampled_textures);
+ self.samplers.merge(&other.samplers);
+ self.storage_buffers.merge(&other.storage_buffers);
+ self.storage_textures.merge(&other.storage_textures);
+ self.uniform_buffers.merge(&other.uniform_buffers);
+ }
+
+ pub(crate) fn validate(&self, limits: &wgt::Limits) -> Result<(), BindingTypeMaxCountError> {
+ if limits.max_dynamic_uniform_buffers_per_pipeline_layout < self.dynamic_uniform_buffers {
+ return Err(BindingTypeMaxCountError {
+ kind: BindingTypeMaxCountErrorKind::DynamicUniformBuffers,
+ zone: BindingZone::Pipeline,
+ count: self.dynamic_uniform_buffers,
+ });
+ }
+ if limits.max_dynamic_storage_buffers_per_pipeline_layout < self.dynamic_storage_buffers {
+ return Err(BindingTypeMaxCountError {
+ kind: BindingTypeMaxCountErrorKind::DynamicStorageBuffers,
+ zone: BindingZone::Pipeline,
+ count: self.dynamic_storage_buffers,
+ });
+ }
+ self.sampled_textures.validate(
+ limits.max_sampled_textures_per_shader_stage,
+ BindingTypeMaxCountErrorKind::SampledTextures,
+ )?;
+ self.storage_buffers.validate(
+ limits.max_storage_buffers_per_shader_stage,
+ BindingTypeMaxCountErrorKind::StorageBuffers,
+ )?;
+ self.samplers.validate(
+ limits.max_samplers_per_shader_stage,
+ BindingTypeMaxCountErrorKind::Samplers,
+ )?;
+ self.storage_buffers.validate(
+ limits.max_storage_buffers_per_shader_stage,
+ BindingTypeMaxCountErrorKind::StorageBuffers,
+ )?;
+ self.storage_textures.validate(
+ limits.max_storage_textures_per_shader_stage,
+ BindingTypeMaxCountErrorKind::StorageTextures,
+ )?;
+ self.uniform_buffers.validate(
+ limits.max_uniform_buffers_per_shader_stage,
+ BindingTypeMaxCountErrorKind::UniformBuffers,
+ )?;
+ Ok(())
+ }
+}
+
+/// Bindable resource and the slot to bind it to.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct BindGroupEntry<'a> {
+ /// Slot for which binding provides resource. Corresponds to an entry of the same
+ /// binding index in the [`BindGroupLayoutDescriptor`].
+ pub binding: u32,
+ /// Resource to attach to the binding
+ pub resource: BindingResource<'a>,
+}
+
+/// Describes a group of bindings and the resources to be bound.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct BindGroupDescriptor<'a> {
+ /// Debug label of the bind group. This will show up in graphics debuggers for easy identification.
+ pub label: Label<'a>,
+ /// The [`BindGroupLayout`] that corresponds to this bind group.
+ pub layout: BindGroupLayoutId,
+ /// The resources to bind to this bind group.
+ pub entries: Cow<'a, [BindGroupEntry<'a>]>,
+}
+
+/// Describes a [`BindGroupLayout`].
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct BindGroupLayoutDescriptor<'a> {
+ /// Debug label of the bind group layout. This will show up in graphics debuggers for easy identification.
+ pub label: Label<'a>,
+ /// Array of entries in this BindGroupLayout
+ pub entries: Cow<'a, [wgt::BindGroupLayoutEntry]>,
+}
+
+pub(crate) type BindEntryMap = FastHashMap<u32, wgt::BindGroupLayoutEntry>;
+
+#[derive(Debug)]
+pub struct BindGroupLayout<B: hal::Backend> {
+ pub(crate) raw: B::DescriptorSetLayout,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) multi_ref_count: MultiRefCount,
+ pub(crate) entries: BindEntryMap,
+ pub(crate) desc_count: DescriptorTotalCount,
+ pub(crate) dynamic_count: usize,
+ pub(crate) count_validator: BindingTypeMaxCountValidator,
+ #[cfg(debug_assertions)]
+ pub(crate) label: String,
+}
+
+impl<B: hal::Backend> Resource for BindGroupLayout<B> {
+ const TYPE: &'static str = "BindGroupLayout";
+
+ fn life_guard(&self) -> &LifeGuard {
+ unreachable!()
+ }
+
+ fn label(&self) -> &str {
+ #[cfg(debug_assertions)]
+ return &self.label;
+ #[cfg(not(debug_assertions))]
+ return "";
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreatePipelineLayoutError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("bind group layout {0:?} is invalid")]
+ InvalidBindGroupLayout(BindGroupLayoutId),
+ #[error(
+ "push constant at index {index} has range bound {bound} not aligned to {}",
+ wgt::PUSH_CONSTANT_ALIGNMENT
+ )]
+ MisalignedPushConstantRange { index: usize, bound: u32 },
+ #[error("device does not have required feature: {0:?}")]
+ MissingFeature(wgt::Features),
+ #[error("push constant range (index {index}) provides for stage(s) {provided:?} but there exists another range that provides stage(s) {intersected:?}. Each stage may only be provided by one range")]
+ MoreThanOnePushConstantRangePerStage {
+ index: usize,
+ provided: wgt::ShaderStage,
+ intersected: wgt::ShaderStage,
+ },
+ #[error("push constant at index {index} has range {}..{} which exceeds device push constant size limit 0..{max}", range.start, range.end)]
+ PushConstantRangeTooLarge {
+ index: usize,
+ range: Range<u32>,
+ max: u32,
+ },
+ #[error(transparent)]
+ TooManyBindings(BindingTypeMaxCountError),
+ #[error("bind group layout count {actual} exceeds device bind group limit {max}")]
+ TooManyGroups { actual: usize, max: usize },
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum PushConstantUploadError {
+ #[error("provided push constant with indices {offset}..{end_offset} overruns matching push constant range at index {idx}, with stage(s) {:?} and indices {:?}", range.stages, range.range)]
+ TooLarge {
+ offset: u32,
+ end_offset: u32,
+ idx: usize,
+ range: wgt::PushConstantRange,
+ },
+ #[error("provided push constant is for stage(s) {actual:?}, stage with a partial match found at index {idx} with stage(s) {matched:?}, however push constants must be complete matches")]
+ PartialRangeMatch {
+ actual: wgt::ShaderStage,
+ idx: usize,
+ matched: wgt::ShaderStage,
+ },
+ #[error("provided push constant is for stage(s) {actual:?}, but intersects a push constant range (at index {idx}) with stage(s) {missing:?}. Push constants must provide the stages for all ranges they intersect")]
+ MissingStages {
+ actual: wgt::ShaderStage,
+ idx: usize,
+ missing: wgt::ShaderStage,
+ },
+ #[error("provided push constant is for stage(s) {actual:?}, however the pipeline layout has no push constant range for the stage(s) {unmatched:?}")]
+ UnmatchedStages {
+ actual: wgt::ShaderStage,
+ unmatched: wgt::ShaderStage,
+ },
+ #[error("provided push constant offset {0} does not respect `PUSH_CONSTANT_ALIGNMENT`")]
+ Unaligned(u32),
+}
+
+/// Describes a pipeline layout.
+///
+/// A `PipelineLayoutDescriptor` can be used to create a pipeline layout.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct PipelineLayoutDescriptor<'a> {
+ /// Debug label of the pipeine layout. This will show up in graphics debuggers for easy identification.
+ pub label: Label<'a>,
+ /// Bind groups that this pipeline uses. The first entry will provide all the bindings for
+ /// "set = 0", second entry will provide all the bindings for "set = 1" etc.
+ pub bind_group_layouts: Cow<'a, [BindGroupLayoutId]>,
+ /// Set of push constant ranges this pipeline uses. Each shader stage that uses push constants
+ /// must define the range in push constant memory that corresponds to its single `layout(push_constant)`
+ /// uniform block.
+ ///
+ /// If this array is non-empty, the [`Features::PUSH_CONSTANTS`] must be enabled.
+ pub push_constant_ranges: Cow<'a, [wgt::PushConstantRange]>,
+}
+
+#[derive(Debug)]
+pub struct PipelineLayout<B: hal::Backend> {
+ pub(crate) raw: B::PipelineLayout,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) life_guard: LifeGuard,
+ pub(crate) bind_group_layout_ids: ArrayVec<[Valid<BindGroupLayoutId>; MAX_BIND_GROUPS]>,
+ pub(crate) push_constant_ranges: ArrayVec<[wgt::PushConstantRange; SHADER_STAGE_COUNT]>,
+}
+
+impl<B: hal::Backend> PipelineLayout<B> {
+ /// Validate push constants match up with expected ranges.
+ pub(crate) fn validate_push_constant_ranges(
+ &self,
+ stages: wgt::ShaderStage,
+ offset: u32,
+ end_offset: u32,
+ ) -> Result<(), PushConstantUploadError> {
+ // Don't need to validate size against the push constant size limit here,
+ // as push constant ranges are already validated to be within bounds,
+ // and we validate that they are within the ranges.
+
+ if offset % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
+ return Err(PushConstantUploadError::Unaligned(offset));
+ }
+
+ // Push constant validation looks very complicated on the surface, but
+ // the problem can be range-reduced pretty well.
+ //
+ // Push constants require (summarized from the vulkan spec):
+ // 1. For each byte in the range and for each shader stage in stageFlags,
+ // there must be a push constant range in the layout that includes that
+ // byte and that stage.
+ // 2. For each byte in the range and for each push constant range that overlaps that byte,
+ // `stage` must include all stages in that push constant range’s `stage`.
+ //
+ // However there are some additional constraints that help us:
+ // 3. All push constant ranges are the only range that can access that stage.
+ // i.e. if one range has VERTEX, no other range has VERTEX
+ //
+ // Therefore we can simplify the checks in the following ways:
+ // - Because 3 guarantees that the push constant range has a unique stage,
+ // when we check for 1, we can simply check that our entire updated range
+ // is within a push constant range. i.e. our range for a specific stage cannot
+ // intersect more than one push constant range.
+ let mut used_stages = wgt::ShaderStage::NONE;
+ for (idx, range) in self.push_constant_ranges.iter().enumerate() {
+ // contains not intersects due to 2
+ if stages.contains(range.stages) {
+ if !(range.range.start <= offset && end_offset <= range.range.end) {
+ return Err(PushConstantUploadError::TooLarge {
+ offset,
+ end_offset,
+ idx,
+ range: range.clone(),
+ });
+ }
+ used_stages |= range.stages;
+ } else if stages.intersects(range.stages) {
+ // Will be caught by used stages check below, but we can do this because of 1
+ // and is more helpful to the user.
+ return Err(PushConstantUploadError::PartialRangeMatch {
+ actual: stages,
+ idx,
+ matched: range.stages,
+ });
+ }
+
+ // The push constant range intersects range we are uploading
+ if offset < range.range.end && range.range.start < end_offset {
+ // But requires stages we don't provide
+ if !stages.contains(range.stages) {
+ return Err(PushConstantUploadError::MissingStages {
+ actual: stages,
+ idx,
+ missing: stages,
+ });
+ }
+ }
+ }
+ if used_stages != stages {
+ return Err(PushConstantUploadError::UnmatchedStages {
+ actual: stages,
+ unmatched: stages - used_stages,
+ });
+ }
+ Ok(())
+ }
+}
+
+impl<B: hal::Backend> Resource for PipelineLayout<B> {
+ const TYPE: &'static str = "PipelineLayout";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+#[repr(C)]
+#[derive(Clone, Debug, Hash, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct BufferBinding {
+ pub buffer_id: BufferId,
+ pub offset: wgt::BufferAddress,
+ pub size: Option<wgt::BufferSize>,
+}
+
+// Note: Duplicated in `wgpu-rs` as `BindingResource`
+// They're different enough that it doesn't make sense to share a common type
+#[derive(Debug, Clone)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum BindingResource<'a> {
+ Buffer(BufferBinding),
+ Sampler(SamplerId),
+ TextureView(TextureViewId),
+ TextureViewArray(Cow<'a, [TextureViewId]>),
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum BindError {
+ #[error("number of dynamic offsets ({actual}) doesn't match the number of dynamic bindings in the bind group layout ({expected})")]
+ MismatchedDynamicOffsetCount { actual: usize, expected: usize },
+ #[error(
+ "dynamic binding at index {idx}: offset {offset} does not respect `BIND_BUFFER_ALIGNMENT`"
+ )]
+ UnalignedDynamicBinding { idx: usize, offset: u32 },
+ #[error("dynamic binding at index {idx} with offset {offset} would overrun the buffer (limit: {max})")]
+ DynamicBindingOutOfBounds { idx: usize, offset: u32, max: u64 },
+}
+
+#[derive(Debug)]
+pub struct BindGroupDynamicBindingData {
+ /// The maximum value the dynamic offset can have before running off the end of the buffer.
+ pub(crate) maximum_dynamic_offset: wgt::BufferAddress,
+}
+
+#[derive(Debug)]
+pub struct BindGroup<B: hal::Backend> {
+ pub(crate) raw: DescriptorSet<B>,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) layout_id: Valid<BindGroupLayoutId>,
+ pub(crate) life_guard: LifeGuard,
+ pub(crate) used: TrackerSet,
+ pub(crate) dynamic_binding_info: Vec<BindGroupDynamicBindingData>,
+}
+
+impl<B: hal::Backend> BindGroup<B> {
+ pub(crate) fn validate_dynamic_bindings(
+ &self,
+ offsets: &[wgt::DynamicOffset],
+ ) -> Result<(), BindError> {
+ if self.dynamic_binding_info.len() != offsets.len() {
+ return Err(BindError::MismatchedDynamicOffsetCount {
+ expected: self.dynamic_binding_info.len(),
+ actual: offsets.len(),
+ });
+ }
+
+ for (idx, (info, &offset)) in self
+ .dynamic_binding_info
+ .iter()
+ .zip(offsets.iter())
+ .enumerate()
+ {
+ if offset as wgt::BufferAddress % wgt::BIND_BUFFER_ALIGNMENT != 0 {
+ return Err(BindError::UnalignedDynamicBinding { idx, offset });
+ }
+
+ if offset as wgt::BufferAddress > info.maximum_dynamic_offset {
+ return Err(BindError::DynamicBindingOutOfBounds {
+ idx,
+ offset,
+ max: info.maximum_dynamic_offset,
+ });
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl<B: hal::Backend> Borrow<()> for BindGroup<B> {
+ fn borrow(&self) -> &() {
+ &DUMMY_SELECTOR
+ }
+}
+
+impl<B: hal::Backend> Resource for BindGroup<B> {
+ const TYPE: &'static str = "BindGroup";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum GetBindGroupLayoutError {
+ #[error("pipeline is invalid")]
+ InvalidPipeline,
+ #[error("invalid group index {0}")]
+ InvalidGroupIndex(u32),
+}
diff --git a/gfx/wgpu/wgpu-core/src/command/allocator.rs b/gfx/wgpu/wgpu-core/src/command/allocator.rs
new file mode 100644
index 0000000000..cfaa6258c2
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/command/allocator.rs
@@ -0,0 +1,268 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use super::CommandBuffer;
+use crate::{
+ device::DeviceError, hub::GfxBackend, id::DeviceId, track::TrackerSet, FastHashMap,
+ PrivateFeatures, Stored, SubmissionIndex,
+};
+
+#[cfg(debug_assertions)]
+use crate::LabelHelpers;
+
+use hal::{command::CommandBuffer as _, device::Device as _, pool::CommandPool as _};
+use parking_lot::Mutex;
+use thiserror::Error;
+
+use std::thread;
+
+const GROW_AMOUNT: usize = 20;
+
+#[derive(Debug)]
+struct CommandPool<B: hal::Backend> {
+ raw: B::CommandPool,
+ total: usize,
+ available: Vec<B::CommandBuffer>,
+ pending: Vec<(B::CommandBuffer, SubmissionIndex)>,
+}
+
+impl<B: hal::Backend> CommandPool<B> {
+ fn maintain(&mut self, last_done_index: SubmissionIndex) {
+ for i in (0..self.pending.len()).rev() {
+ if self.pending[i].1 <= last_done_index {
+ let (cmd_buf, index) = self.pending.swap_remove(i);
+ tracing::trace!(
+ "recycling cmdbuf submitted in {} when {} is last done",
+ index,
+ last_done_index,
+ );
+ self.recycle(cmd_buf);
+ }
+ }
+ }
+
+ fn recycle(&mut self, mut raw: B::CommandBuffer) {
+ unsafe {
+ raw.reset(false);
+ }
+ self.available.push(raw);
+ }
+
+ fn allocate(&mut self) -> B::CommandBuffer {
+ if self.available.is_empty() {
+ self.total += GROW_AMOUNT;
+ unsafe {
+ self.raw.allocate(
+ GROW_AMOUNT,
+ hal::command::Level::Primary,
+ &mut self.available,
+ )
+ };
+ }
+ self.available.pop().unwrap()
+ }
+}
+
+#[derive(Debug)]
+struct Inner<B: hal::Backend> {
+ pools: FastHashMap<thread::ThreadId, CommandPool<B>>,
+}
+
+#[derive(Debug)]
+pub struct CommandAllocator<B: hal::Backend> {
+ queue_family: hal::queue::QueueFamilyId,
+ internal_thread_id: thread::ThreadId,
+ inner: Mutex<Inner<B>>,
+}
+
+impl<B: GfxBackend> CommandAllocator<B> {
+ pub(crate) fn allocate(
+ &self,
+ device_id: Stored<DeviceId>,
+ device: &B::Device,
+ limits: wgt::Limits,
+ private_features: PrivateFeatures,
+ label: &crate::Label,
+ #[cfg(feature = "trace")] enable_tracing: bool,
+ ) -> Result<CommandBuffer<B>, CommandAllocatorError> {
+ //debug_assert_eq!(device_id.backend(), B::VARIANT);
+ let _ = label; // silence warning on release
+ let thread_id = thread::current().id();
+ let mut inner = self.inner.lock();
+
+ use std::collections::hash_map::Entry;
+ let pool = match inner.pools.entry(thread_id) {
+ Entry::Occupied(e) => e.into_mut(),
+ Entry::Vacant(e) => {
+ tracing::info!("Starting on thread {:?}", thread_id);
+ let raw = unsafe {
+ device
+ .create_command_pool(
+ self.queue_family,
+ hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
+ )
+ .or(Err(DeviceError::OutOfMemory))?
+ };
+ let pool = CommandPool {
+ raw,
+ total: 0,
+ available: Vec::new(),
+ pending: Vec::new(),
+ };
+ e.insert(pool)
+ }
+ };
+
+ let init = pool.allocate();
+
+ Ok(CommandBuffer {
+ raw: vec![init],
+ is_recording: true,
+ recorded_thread_id: thread_id,
+ device_id,
+ trackers: TrackerSet::new(B::VARIANT),
+ used_swap_chain: None,
+ limits,
+ private_features,
+ #[cfg(feature = "trace")]
+ commands: if enable_tracing {
+ Some(Vec::new())
+ } else {
+ None
+ },
+ #[cfg(debug_assertions)]
+ label: label.to_string_or_default(),
+ })
+ }
+}
+
+impl<B: hal::Backend> CommandAllocator<B> {
+ pub fn new(
+ queue_family: hal::queue::QueueFamilyId,
+ device: &B::Device,
+ ) -> Result<Self, CommandAllocatorError> {
+ let internal_thread_id = thread::current().id();
+ tracing::info!("Starting on (internal) thread {:?}", internal_thread_id);
+ let mut pools = FastHashMap::default();
+ pools.insert(
+ internal_thread_id,
+ CommandPool {
+ raw: unsafe {
+ device
+ .create_command_pool(
+ queue_family,
+ hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
+ )
+ .or(Err(DeviceError::OutOfMemory))?
+ },
+ total: 0,
+ available: Vec::new(),
+ pending: Vec::new(),
+ },
+ );
+ Ok(Self {
+ queue_family,
+ internal_thread_id,
+ inner: Mutex::new(Inner { pools }),
+ })
+ }
+
+ fn allocate_for_thread_id(&self, thread_id: thread::ThreadId) -> B::CommandBuffer {
+ let mut inner = self.inner.lock();
+ inner.pools.get_mut(&thread_id).unwrap().allocate()
+ }
+
+ pub fn allocate_internal(&self) -> B::CommandBuffer {
+ self.allocate_for_thread_id(self.internal_thread_id)
+ }
+
+ pub fn extend(&self, cmd_buf: &CommandBuffer<B>) -> B::CommandBuffer {
+ self.allocate_for_thread_id(cmd_buf.recorded_thread_id)
+ }
+
+ pub fn discard_internal(&self, raw: B::CommandBuffer) {
+ let mut inner = self.inner.lock();
+ inner
+ .pools
+ .get_mut(&self.internal_thread_id)
+ .unwrap()
+ .recycle(raw);
+ }
+
+ pub fn discard(&self, mut cmd_buf: CommandBuffer<B>) {
+ cmd_buf.trackers.clear();
+ let mut inner = self.inner.lock();
+ let pool = inner.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap();
+ for raw in cmd_buf.raw {
+ pool.recycle(raw);
+ }
+ }
+
+ pub fn after_submit_internal(&self, raw: B::CommandBuffer, submit_index: SubmissionIndex) {
+ let mut inner = self.inner.lock();
+ inner
+ .pools
+ .get_mut(&self.internal_thread_id)
+ .unwrap()
+ .pending
+ .push((raw, submit_index));
+ }
+
+ pub fn after_submit(&self, cmd_buf: CommandBuffer<B>, submit_index: SubmissionIndex) {
+ // Record this command buffer as pending
+ let mut inner = self.inner.lock();
+ inner
+ .pools
+ .get_mut(&cmd_buf.recorded_thread_id)
+ .unwrap()
+ .pending
+ .extend(cmd_buf.raw.into_iter().map(|raw| (raw, submit_index)));
+ }
+
+ pub fn maintain(&self, device: &B::Device, last_done_index: SubmissionIndex) {
+ let mut inner = self.inner.lock();
+ let mut remove_threads = Vec::new();
+ for (&thread_id, pool) in inner.pools.iter_mut() {
+ pool.maintain(last_done_index);
+ if pool.total == pool.available.len() && thread_id != self.internal_thread_id {
+ assert!(pool.pending.is_empty());
+ remove_threads.push(thread_id);
+ }
+ }
+ for thread_id in remove_threads {
+ tracing::info!("Removing from thread {:?}", thread_id);
+ let mut pool = inner.pools.remove(&thread_id).unwrap();
+ unsafe {
+ pool.raw.free(pool.available);
+ device.destroy_command_pool(pool.raw);
+ }
+ }
+ }
+
+ pub fn destroy(self, device: &B::Device) {
+ let mut inner = self.inner.lock();
+ for (_, mut pool) in inner.pools.drain() {
+ while let Some((raw, _)) = pool.pending.pop() {
+ pool.recycle(raw);
+ }
+ if pool.total != pool.available.len() {
+ tracing::error!(
+ "Some command buffers are still recorded, only tracking {} / {}",
+ pool.available.len(),
+ pool.total
+ );
+ }
+ unsafe {
+ pool.raw.free(pool.available);
+ device.destroy_command_pool(pool.raw);
+ }
+ }
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CommandAllocatorError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+}
diff --git a/gfx/wgpu/wgpu-core/src/command/bind.rs b/gfx/wgpu/wgpu-core/src/command/bind.rs
new file mode 100644
index 0000000000..a62b38d5b7
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/command/bind.rs
@@ -0,0 +1,295 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ binding_model::{BindGroup, PipelineLayout},
+ device::SHADER_STAGE_COUNT,
+ hub::{GfxBackend, Storage},
+ id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId, Valid},
+ Stored, MAX_BIND_GROUPS,
+};
+
+use arrayvec::ArrayVec;
+use std::slice;
+use wgt::DynamicOffset;
+
+type BindGroupMask = u8;
+
+#[derive(Clone, Debug)]
+pub(super) struct BindGroupPair {
+ layout_id: Valid<BindGroupLayoutId>,
+ group_id: Stored<BindGroupId>,
+}
+
+#[derive(Debug)]
+pub(super) enum LayoutChange<'a> {
+ Unchanged,
+ Match(Valid<BindGroupId>, &'a [DynamicOffset]),
+ Mismatch,
+}
+
+#[derive(Debug)]
+pub enum Provision {
+ Unchanged,
+ Changed { was_compatible: bool },
+}
+
+#[derive(Clone)]
+pub(super) struct FollowUpIter<'a> {
+ iter: slice::Iter<'a, BindGroupEntry>,
+}
+impl<'a> Iterator for FollowUpIter<'a> {
+ type Item = (Valid<BindGroupId>, &'a [DynamicOffset]);
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter
+ .next()
+ .and_then(|entry| Some((entry.actual_value()?, entry.dynamic_offsets.as_slice())))
+ }
+}
+
+#[derive(Clone, Default, Debug)]
+pub(super) struct BindGroupEntry {
+ expected_layout_id: Option<Valid<BindGroupLayoutId>>,
+ provided: Option<BindGroupPair>,
+ dynamic_offsets: Vec<DynamicOffset>,
+}
+
+impl BindGroupEntry {
+ fn provide<B: GfxBackend>(
+ &mut self,
+ bind_group_id: Valid<BindGroupId>,
+ bind_group: &BindGroup<B>,
+ offsets: &[DynamicOffset],
+ ) -> Provision {
+ debug_assert_eq!(B::VARIANT, bind_group_id.0.backend());
+
+ let was_compatible = match self.provided {
+ Some(BindGroupPair {
+ layout_id,
+ ref group_id,
+ }) => {
+ if group_id.value == bind_group_id && offsets == self.dynamic_offsets.as_slice() {
+ assert_eq!(layout_id, bind_group.layout_id);
+ return Provision::Unchanged;
+ }
+ self.expected_layout_id == Some(layout_id)
+ }
+ None => false,
+ };
+
+ self.provided = Some(BindGroupPair {
+ layout_id: bind_group.layout_id,
+ group_id: Stored {
+ value: bind_group_id,
+ ref_count: bind_group.life_guard.add_ref(),
+ },
+ });
+ self.dynamic_offsets.clear();
+ self.dynamic_offsets.extend_from_slice(offsets);
+
+ Provision::Changed { was_compatible }
+ }
+
+ pub fn expect_layout(
+ &mut self,
+ bind_group_layout_id: Valid<BindGroupLayoutId>,
+ ) -> LayoutChange {
+ let some = Some(bind_group_layout_id);
+ if self.expected_layout_id != some {
+ self.expected_layout_id = some;
+ match self.provided {
+ Some(BindGroupPair {
+ layout_id,
+ ref group_id,
+ }) if layout_id == bind_group_layout_id => {
+ LayoutChange::Match(group_id.value, &self.dynamic_offsets)
+ }
+ Some(_) | None => LayoutChange::Mismatch,
+ }
+ } else {
+ LayoutChange::Unchanged
+ }
+ }
+
+ fn is_valid(&self) -> Option<bool> {
+ match (self.expected_layout_id, self.provided.as_ref()) {
+ (None, None) => Some(true),
+ (None, Some(_)) => None,
+ (Some(_), None) => Some(false),
+ (Some(layout), Some(pair)) => Some(layout == pair.layout_id),
+ }
+ }
+
+ fn actual_value(&self) -> Option<Valid<BindGroupId>> {
+ self.expected_layout_id.and_then(|layout_id| {
+ self.provided.as_ref().and_then(|pair| {
+ if pair.layout_id == layout_id {
+ Some(pair.group_id.value)
+ } else {
+ None
+ }
+ })
+ })
+ }
+}
+
+#[derive(Debug)]
+pub struct Binder {
+ pub(super) pipeline_layout_id: Option<Valid<PipelineLayoutId>>, //TODO: strongly `Stored`
+ pub(super) entries: ArrayVec<[BindGroupEntry; MAX_BIND_GROUPS]>,
+}
+
+impl Binder {
+ pub(super) fn new(max_bind_groups: u32) -> Self {
+ Self {
+ pipeline_layout_id: None,
+ entries: (0..max_bind_groups)
+ .map(|_| BindGroupEntry::default())
+ .collect(),
+ }
+ }
+
+ pub(super) fn reset(&mut self) {
+ self.pipeline_layout_id = None;
+ self.entries.clear();
+ }
+
+ pub(super) fn change_pipeline_layout<B: GfxBackend>(
+ &mut self,
+ guard: &Storage<PipelineLayout<B>, PipelineLayoutId>,
+ new_id: Valid<PipelineLayoutId>,
+ ) {
+ let old_id_opt = self.pipeline_layout_id.replace(new_id);
+ let new = &guard[new_id];
+
+ let length = if let Some(old_id) = old_id_opt {
+ let old = &guard[old_id];
+ if old.push_constant_ranges == new.push_constant_ranges {
+ new.bind_group_layout_ids.len()
+ } else {
+ 0
+ }
+ } else {
+ 0
+ };
+
+ for entry in self.entries[length..].iter_mut() {
+ entry.expected_layout_id = None;
+ }
+ }
+
+ /// Attempt to set the value of the specified bind group index.
+ /// Returns Some() when the new bind group is ready to be actually bound
+ /// (i.e. compatible with current expectations). Also returns an iterator
+ /// of bind group IDs to be bound with it: those are compatible bind groups
+ /// that were previously blocked because the current one was incompatible.
+ pub(super) fn provide_entry<'a, B: GfxBackend>(
+ &'a mut self,
+ index: usize,
+ bind_group_id: Valid<BindGroupId>,
+ bind_group: &BindGroup<B>,
+ offsets: &[DynamicOffset],
+ ) -> Option<(Valid<PipelineLayoutId>, FollowUpIter<'a>)> {
+ tracing::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
+ debug_assert_eq!(B::VARIANT, bind_group_id.0.backend());
+
+ match self.entries[index].provide(bind_group_id, bind_group, offsets) {
+ Provision::Unchanged => None,
+ Provision::Changed { was_compatible, .. } => {
+ let compatible_count = self.compatible_count();
+ if index < compatible_count {
+ let end = compatible_count.min(if was_compatible {
+ index + 1
+ } else {
+ self.entries.len()
+ });
+ tracing::trace!("\t\tbinding up to {}", end);
+ Some((
+ self.pipeline_layout_id?,
+ FollowUpIter {
+ iter: self.entries[index + 1..end].iter(),
+ },
+ ))
+ } else {
+ tracing::trace!("\t\tskipping above compatible {}", compatible_count);
+ None
+ }
+ }
+ }
+ }
+
+ pub(super) fn list_active(&self) -> impl Iterator<Item = Valid<BindGroupId>> + '_ {
+ self.entries.iter().filter_map(|e| match e.provided {
+ Some(ref pair) if e.expected_layout_id.is_some() => Some(pair.group_id.value),
+ _ => None,
+ })
+ }
+
+ pub(super) fn invalid_mask(&self) -> BindGroupMask {
+ self.entries.iter().enumerate().fold(0, |mask, (i, entry)| {
+ if entry.is_valid().unwrap_or(true) {
+ mask
+ } else {
+ mask | 1u8 << i
+ }
+ })
+ }
+
+ fn compatible_count(&self) -> usize {
+ self.entries
+ .iter()
+ .position(|entry| !entry.is_valid().unwrap_or(false))
+ .unwrap_or_else(|| self.entries.len())
+ }
+}
+
+struct PushConstantChange {
+ stages: wgt::ShaderStage,
+ offset: u32,
+ enable: bool,
+}
+
+/// Break up possibly overlapping push constant ranges into a set of non-overlapping ranges
+/// which contain all the stage flags of the original ranges. This allows us to zero out (or write any value)
+/// to every possible value.
+pub fn compute_nonoverlapping_ranges(
+ ranges: &[wgt::PushConstantRange],
+) -> ArrayVec<[wgt::PushConstantRange; SHADER_STAGE_COUNT * 2]> {
+ if ranges.is_empty() {
+ return ArrayVec::new();
+ }
+ debug_assert!(ranges.len() <= SHADER_STAGE_COUNT);
+
+ let mut breaks: ArrayVec<[PushConstantChange; SHADER_STAGE_COUNT * 2]> = ArrayVec::new();
+ for range in ranges {
+ breaks.push(PushConstantChange {
+ stages: range.stages,
+ offset: range.range.start,
+ enable: true,
+ });
+ breaks.push(PushConstantChange {
+ stages: range.stages,
+ offset: range.range.end,
+ enable: false,
+ });
+ }
+ breaks.sort_unstable_by_key(|change| change.offset);
+
+ let mut output_ranges = ArrayVec::new();
+ let mut position = 0_u32;
+ let mut stages = wgt::ShaderStage::NONE;
+
+ for bk in breaks {
+ if bk.offset - position > 0 && !stages.is_empty() {
+ output_ranges.push(wgt::PushConstantRange {
+ stages,
+ range: position..bk.offset,
+ })
+ }
+ position = bk.offset;
+ stages.set(bk.stages, bk.enable);
+ }
+
+ output_ranges
+}
diff --git a/gfx/wgpu/wgpu-core/src/command/bundle.rs b/gfx/wgpu/wgpu-core/src/command/bundle.rs
new file mode 100644
index 0000000000..19c11c7136
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/command/bundle.rs
@@ -0,0 +1,1230 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*! Render Bundles
+
+ ## Software implementation
+
+ The path from nothing to using a render bundle consists of 3 phases.
+
+ ### Initial command encoding
+
+ User creates a `RenderBundleEncoder` and populates it by issuing commands
+ from `bundle_ffi` module, just like with `RenderPass`, except that the
+ set of available commands is reduced. Everything is written into a `RawPass`.
+
+ ### Bundle baking
+
+ Once the commands are encoded, user calls `render_bundle_encoder_finish`.
+ This is perhaps the most complex part of the logic. It consumes the
+ commands stored in `RawPass`, while validating everything, tracking the state,
+ and re-recording the commands into a separate `Vec<RenderCommand>`. It
+ doesn't actually execute any commands.
+
+ What's more important, is that the produced vector of commands is "normalized",
+ which means it can be executed verbatim without any state tracking. More
+ formally, "normalized" command stream guarantees that any state required by
+ a draw call is set explicitly by one of the commands between the draw call
+ and the last changing of the pipeline.
+
+ ### Execution
+
+ When the bundle is used in an actual render pass, `RenderBundle::execute` is
+ called. It goes through the commands and issues them into the native command
+ buffer. Thanks to the "normalized" property, it doesn't track any bind group
+ invalidations or index format changes.
+!*/
+#![allow(clippy::reversed_empty_ranges)]
+
+use crate::{
+ command::{
+ BasePass, DrawError, MapPassErr, PassErrorScope, RenderCommand, RenderCommandError,
+ StateChange,
+ },
+ conv,
+ device::{
+ AttachmentData, Device, DeviceError, RenderPassContext, MAX_VERTEX_BUFFERS,
+ SHADER_STAGE_COUNT,
+ },
+ hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Resource, Storage, Token},
+ id,
+ resource::BufferUse,
+ span,
+ track::{TrackerSet, UsageConflict},
+ validation::check_buffer_usage,
+ Label, LabelHelpers, LifeGuard, Stored, MAX_BIND_GROUPS,
+};
+use arrayvec::ArrayVec;
+use std::{borrow::Cow, iter, ops::Range};
+use thiserror::Error;
+
+/// Describes a [`RenderBundleEncoder`].
+#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct RenderBundleEncoderDescriptor<'a> {
+ /// Debug label of the render bundle encoder. This will show up in graphics debuggers for easy identification.
+ pub label: Label<'a>,
+ /// The formats of the color attachments that this render bundle is capable to rendering to. This
+ /// must match the formats of the color attachments in the renderpass this render bundle is executed in.
+ pub color_formats: Cow<'a, [wgt::TextureFormat]>,
+ /// The formats of the depth attachment that this render bundle is capable to rendering to. This
+ /// must match the formats of the depth attachments in the renderpass this render bundle is executed in.
+ pub depth_stencil_format: Option<wgt::TextureFormat>,
+ /// Sample count this render bundle is capable of rendering to. This must match the pipelines and
+ /// the renderpasses it is used in.
+ pub sample_count: u32,
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
+pub struct RenderBundleEncoder {
+ base: BasePass<RenderCommand>,
+ parent_id: id::DeviceId,
+ pub(crate) context: RenderPassContext,
+}
+
+impl RenderBundleEncoder {
+ pub fn new(
+ desc: &RenderBundleEncoderDescriptor,
+ parent_id: id::DeviceId,
+ base: Option<BasePass<RenderCommand>>,
+ ) -> Result<Self, CreateRenderBundleError> {
+ span!(_guard, INFO, "RenderBundleEncoder::new");
+ Ok(Self {
+ base: base.unwrap_or_else(BasePass::new),
+ parent_id,
+ context: RenderPassContext {
+ attachments: AttachmentData {
+ colors: desc.color_formats.iter().cloned().collect(),
+ resolves: ArrayVec::new(),
+ depth_stencil: desc.depth_stencil_format,
+ },
+ sample_count: {
+ let sc = desc.sample_count;
+ if sc == 0 || sc > 32 || !conv::is_power_of_two(sc) {
+ return Err(CreateRenderBundleError::InvalidSampleCount(sc));
+ }
+ sc as u8
+ },
+ },
+ })
+ }
+
+ pub fn dummy(parent_id: id::DeviceId) -> Self {
+ Self {
+ base: BasePass::new(),
+ parent_id,
+ context: RenderPassContext {
+ attachments: AttachmentData {
+ colors: ArrayVec::new(),
+ resolves: ArrayVec::new(),
+ depth_stencil: None,
+ },
+ sample_count: 0,
+ },
+ }
+ }
+
+ pub fn parent(&self) -> id::DeviceId {
+ self.parent_id
+ }
+
+ pub(crate) fn finish<B: hal::Backend, G: GlobalIdentityHandlerFactory>(
+ self,
+ desc: &RenderBundleDescriptor,
+ device: &Device<B>,
+ hub: &Hub<B, G>,
+ token: &mut Token<Device<B>>,
+ ) -> Result<RenderBundle, RenderBundleError> {
+ let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(token);
+ let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
+ let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
+ let (buffer_guard, _) = hub.buffers.read(&mut token);
+
+ let mut state = State {
+ trackers: TrackerSet::new(self.parent_id.backend()),
+ index: IndexState::new(),
+ vertex: (0..MAX_VERTEX_BUFFERS)
+ .map(|_| VertexState::new())
+ .collect(),
+ bind: (0..MAX_BIND_GROUPS).map(|_| BindState::new()).collect(),
+ push_constant_ranges: PushConstantState::new(),
+ raw_dynamic_offsets: Vec::new(),
+ flat_dynamic_offsets: Vec::new(),
+ used_bind_groups: 0,
+ pipeline: StateChange::new(),
+ };
+ let mut commands = Vec::new();
+ let mut base = self.base.as_ref();
+ let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>;
+
+ for &command in base.commands {
+ match command {
+ RenderCommand::SetBindGroup {
+ index,
+ num_dynamic_offsets,
+ bind_group_id,
+ } => {
+ let scope = PassErrorScope::SetBindGroup(bind_group_id);
+
+ let max_bind_groups = device.limits.max_bind_groups;
+ if (index as u32) >= max_bind_groups {
+ return Err(RenderCommandError::BindGroupIndexOutOfRange {
+ index,
+ max: max_bind_groups,
+ })
+ .map_pass_err(scope);
+ }
+
+ let offsets = &base.dynamic_offsets[..num_dynamic_offsets as usize];
+ base.dynamic_offsets = &base.dynamic_offsets[num_dynamic_offsets as usize..];
+ // Check for misaligned offsets.
+ if let Some(offset) = offsets
+ .iter()
+ .map(|offset| *offset as wgt::BufferAddress)
+ .find(|offset| offset % wgt::BIND_BUFFER_ALIGNMENT != 0)
+ {
+ return Err(RenderCommandError::UnalignedBufferOffset(offset))
+ .map_pass_err(scope);
+ }
+
+ let bind_group = state
+ .trackers
+ .bind_groups
+ .use_extend(&*bind_group_guard, bind_group_id, (), ())
+ .map_err(|_| RenderCommandError::InvalidBindGroup(bind_group_id))
+ .map_pass_err(scope)?;
+ if bind_group.dynamic_binding_info.len() != offsets.len() {
+ return Err(RenderCommandError::InvalidDynamicOffsetCount {
+ actual: offsets.len(),
+ expected: bind_group.dynamic_binding_info.len(),
+ })
+ .map_pass_err(scope);
+ }
+
+ state.set_bind_group(index, bind_group_id, bind_group.layout_id, offsets);
+ state
+ .trackers
+ .merge_extend(&bind_group.used)
+ .map_pass_err(scope)?;
+ }
+ RenderCommand::SetPipeline(pipeline_id) => {
+ let scope = PassErrorScope::SetPipelineRender(pipeline_id);
+ if state.pipeline.set_and_check_redundant(pipeline_id) {
+ continue;
+ }
+
+ let pipeline = state
+ .trackers
+ .render_pipes
+ .use_extend(&*pipeline_guard, pipeline_id, (), ())
+ .unwrap();
+
+ self.context
+ .check_compatible(&pipeline.pass_context)
+ .map_err(RenderCommandError::IncompatiblePipeline)
+ .map_pass_err(scope)?;
+
+ //TODO: check read-only depth
+
+ let layout = &pipeline_layout_guard[pipeline.layout_id.value];
+ pipeline_layout_id = Some(pipeline.layout_id.value);
+
+ state.set_pipeline(
+ pipeline.index_format,
+ &pipeline.vertex_strides,
+ &layout.bind_group_layout_ids,
+ &layout.push_constant_ranges,
+ );
+ commands.push(command);
+ if let Some(iter) = state.flush_push_constants() {
+ commands.extend(iter)
+ }
+ }
+ RenderCommand::SetIndexBuffer {
+ buffer_id,
+ offset,
+ size,
+ } => {
+ let scope = PassErrorScope::SetIndexBuffer(buffer_id);
+ let buffer = state
+ .trackers
+ .buffers
+ .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX)
+ .unwrap();
+ check_buffer_usage(buffer.usage, wgt::BufferUsage::INDEX)
+ .map_pass_err(scope)?;
+
+ let end = match size {
+ Some(s) => offset + s.get(),
+ None => buffer.size,
+ };
+ state.index.set_buffer(buffer_id, offset..end);
+ }
+ RenderCommand::SetVertexBuffer {
+ slot,
+ buffer_id,
+ offset,
+ size,
+ } => {
+ let scope = PassErrorScope::SetVertexBuffer(buffer_id);
+ let buffer = state
+ .trackers
+ .buffers
+ .use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX)
+ .unwrap();
+ check_buffer_usage(buffer.usage, wgt::BufferUsage::VERTEX)
+ .map_pass_err(scope)?;
+
+ let end = match size {
+ Some(s) => offset + s.get(),
+ None => buffer.size,
+ };
+ state.vertex[slot as usize].set_buffer(buffer_id, offset..end);
+ }
+ RenderCommand::SetPushConstant {
+ stages,
+ offset,
+ size_bytes,
+ values_offset: _,
+ } => {
+ let scope = PassErrorScope::SetPushConstant;
+ let end_offset = offset + size_bytes;
+
+ let pipeline_layout_id = pipeline_layout_id
+ .ok_or(DrawError::MissingPipeline)
+ .map_pass_err(scope)?;
+ let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
+
+ pipeline_layout
+ .validate_push_constant_ranges(stages, offset, end_offset)
+ .map_pass_err(scope)?;
+
+ commands.push(command);
+ }
+ RenderCommand::Draw {
+ vertex_count,
+ instance_count,
+ first_vertex,
+ first_instance,
+ } => {
+ let scope = PassErrorScope::Draw;
+ let (vertex_limit, instance_limit) = state.vertex_limits();
+ let last_vertex = first_vertex + vertex_count;
+ if last_vertex > vertex_limit {
+ return Err(DrawError::VertexBeyondLimit {
+ last_vertex,
+ vertex_limit,
+ })
+ .map_pass_err(scope);
+ }
+ let last_instance = first_instance + instance_count;
+ if last_instance > instance_limit {
+ return Err(DrawError::InstanceBeyondLimit {
+ last_instance,
+ instance_limit,
+ })
+ .map_pass_err(scope);
+ }
+ commands.extend(state.flush_vertices());
+ commands.extend(state.flush_binds());
+ commands.push(command);
+ }
+ RenderCommand::DrawIndexed {
+ index_count,
+ instance_count,
+ first_index,
+ base_vertex: _,
+ first_instance,
+ } => {
+ let scope = PassErrorScope::DrawIndexed;
+ //TODO: validate that base_vertex + max_index() is within the provided range
+ let (_, instance_limit) = state.vertex_limits();
+ let index_limit = state.index.limit();
+ let last_index = first_index + index_count;
+ if last_index > index_limit {
+ return Err(DrawError::IndexBeyondLimit {
+ last_index,
+ index_limit,
+ })
+ .map_pass_err(scope);
+ }
+ let last_instance = first_instance + instance_count;
+ if last_instance > instance_limit {
+ return Err(DrawError::InstanceBeyondLimit {
+ last_instance,
+ instance_limit,
+ })
+ .map_pass_err(scope);
+ }
+ commands.extend(state.index.flush());
+ commands.extend(state.flush_vertices());
+ commands.extend(state.flush_binds());
+ commands.push(command);
+ }
+ RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset: _,
+ count: None,
+ indexed: false,
+ } => {
+ let scope = PassErrorScope::DrawIndirect;
+ let buffer = state
+ .trackers
+ .buffers
+ .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
+ .unwrap();
+ check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)
+ .map_pass_err(scope)?;
+
+ commands.extend(state.flush_vertices());
+ commands.extend(state.flush_binds());
+ commands.push(command);
+ }
+ RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset: _,
+ count: None,
+ indexed: true,
+ } => {
+ let scope = PassErrorScope::DrawIndexedIndirect;
+ let buffer = state
+ .trackers
+ .buffers
+ .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
+ .map_err(|err| RenderCommandError::Buffer(buffer_id, err))
+ .map_pass_err(scope)?;
+ check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)
+ .map_pass_err(scope)?;
+
+ commands.extend(state.index.flush());
+ commands.extend(state.flush_vertices());
+ commands.extend(state.flush_binds());
+ commands.push(command);
+ }
+ RenderCommand::MultiDrawIndirect { .. }
+ | RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(),
+ RenderCommand::PushDebugGroup { color: _, len: _ } => unimplemented!(),
+ RenderCommand::InsertDebugMarker { color: _, len: _ } => unimplemented!(),
+ RenderCommand::PopDebugGroup => unimplemented!(),
+ RenderCommand::ExecuteBundle(_)
+ | RenderCommand::SetBlendColor(_)
+ | RenderCommand::SetStencilReference(_)
+ | RenderCommand::SetViewport { .. }
+ | RenderCommand::SetScissor(_) => unreachable!("not supported by a render bundle"),
+ }
+ }
+
+ let _ = desc.label; //TODO: actually use
+ Ok(RenderBundle {
+ base: BasePass {
+ commands,
+ dynamic_offsets: state.flat_dynamic_offsets,
+ string_data: Vec::new(),
+ push_constant_data: Vec::new(),
+ },
+ device_id: Stored {
+ value: id::Valid(self.parent_id),
+ ref_count: device.life_guard.add_ref(),
+ },
+ used: state.trackers,
+ context: self.context,
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ })
+ }
+}
+
+/// Error type returned from `RenderBundleEncoder::new` if the sample count is invalid.
+#[derive(Clone, Debug, Error)]
+pub enum CreateRenderBundleError {
+ #[error("invalid number of samples {0}")]
+ InvalidSampleCount(u32),
+}
+
+/// Error type returned from `RenderBundleEncoder::new` if the sample count is invalid.
+#[derive(Clone, Debug, Error)]
+pub enum ExecutionError {
+ #[error("buffer {0:?} is destroyed")]
+ DestroyedBuffer(id::BufferId),
+}
+
+pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
+
+//Note: here, `RenderBundle` is just wrapping a raw stream of render commands.
+// The plan is to back it by an actual Vulkan secondary buffer, D3D12 Bundle,
+// or Metal indirect command buffer.
+#[derive(Debug)]
+pub struct RenderBundle {
+ // Normalized command stream. It can be executed verbatim,
+ // without re-binding anything on the pipeline change.
+ base: BasePass<RenderCommand>,
+ pub(crate) device_id: Stored<id::DeviceId>,
+ pub(crate) used: TrackerSet,
+ pub(crate) context: RenderPassContext,
+ pub(crate) life_guard: LifeGuard,
+}
+
+unsafe impl Send for RenderBundle {}
+unsafe impl Sync for RenderBundle {}
+
+impl RenderBundle {
+ #[cfg(feature = "trace")]
+ pub(crate) fn to_base_pass(&self) -> BasePass<RenderCommand> {
+ BasePass::from_ref(self.base.as_ref())
+ }
+
+ /// Actually encode the contents into a native command buffer.
+ ///
+ /// This is partially duplicating the logic of `command_encoder_run_render_pass`.
+ /// However the point of this function is to be lighter, since we already had
+ /// a chance to go through the commands in `render_bundle_encoder_finish`.
+ ///
+ /// Note that the function isn't expected to fail, generally.
+ /// All the validation has already been done by this point.
+ /// The only failure condition is if some of the used buffers are destroyed.
+ pub(crate) unsafe fn execute<B: GfxBackend>(
+ &self,
+ cmd_buf: &mut B::CommandBuffer,
+ pipeline_layout_guard: &Storage<
+ crate::binding_model::PipelineLayout<B>,
+ id::PipelineLayoutId,
+ >,
+ bind_group_guard: &Storage<crate::binding_model::BindGroup<B>, id::BindGroupId>,
+ pipeline_guard: &Storage<crate::pipeline::RenderPipeline<B>, id::RenderPipelineId>,
+ buffer_guard: &Storage<crate::resource::Buffer<B>, id::BufferId>,
+ ) -> Result<(), ExecutionError> {
+ use hal::command::CommandBuffer as _;
+
+ let mut offsets = self.base.dynamic_offsets.as_slice();
+ let mut index_type = hal::IndexType::U16;
+ let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>;
+
+ for command in self.base.commands.iter() {
+ match *command {
+ RenderCommand::SetBindGroup {
+ index,
+ num_dynamic_offsets,
+ bind_group_id,
+ } => {
+ let bind_group = bind_group_guard.get(bind_group_id).unwrap();
+ cmd_buf.bind_graphics_descriptor_sets(
+ &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw,
+ index as usize,
+ iter::once(bind_group.raw.raw()),
+ &offsets[..num_dynamic_offsets as usize],
+ );
+ offsets = &offsets[num_dynamic_offsets as usize..];
+ }
+ RenderCommand::SetPipeline(pipeline_id) => {
+ let pipeline = pipeline_guard.get(pipeline_id).unwrap();
+ cmd_buf.bind_graphics_pipeline(&pipeline.raw);
+ index_type = conv::map_index_format(pipeline.index_format);
+ pipeline_layout_id = Some(pipeline.layout_id.value);
+ }
+ RenderCommand::SetIndexBuffer {
+ buffer_id,
+ offset,
+ size,
+ } => {
+ let &(ref buffer, _) = buffer_guard
+ .get(buffer_id)
+ .unwrap()
+ .raw
+ .as_ref()
+ .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
+ let range = hal::buffer::SubRange {
+ offset,
+ size: size.map(|s| s.get()),
+ };
+ cmd_buf.bind_index_buffer(buffer, range, index_type);
+ }
+ RenderCommand::SetVertexBuffer {
+ slot,
+ buffer_id,
+ offset,
+ size,
+ } => {
+ let &(ref buffer, _) = buffer_guard
+ .get(buffer_id)
+ .unwrap()
+ .raw
+ .as_ref()
+ .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
+ let range = hal::buffer::SubRange {
+ offset,
+ size: size.map(|s| s.get()),
+ };
+ cmd_buf.bind_vertex_buffers(slot, iter::once((buffer, range)));
+ }
+ RenderCommand::SetPushConstant {
+ stages,
+ offset,
+ size_bytes,
+ values_offset,
+ } => {
+ let pipeline_layout_id = pipeline_layout_id.unwrap();
+ let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
+
+ if let Some(values_offset) = values_offset {
+ let values_end_offset =
+ (values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
+ let data_slice = &self.base.push_constant_data
+ [(values_offset as usize)..values_end_offset];
+
+ cmd_buf.push_graphics_constants(
+ &pipeline_layout.raw,
+ conv::map_shader_stage_flags(stages),
+ offset,
+ &data_slice,
+ )
+ } else {
+ super::push_constant_clear(
+ offset,
+ size_bytes,
+ |clear_offset, clear_data| {
+ cmd_buf.push_graphics_constants(
+ &pipeline_layout.raw,
+ conv::map_shader_stage_flags(stages),
+ clear_offset,
+ clear_data,
+ );
+ },
+ );
+ }
+ }
+ RenderCommand::Draw {
+ vertex_count,
+ instance_count,
+ first_vertex,
+ first_instance,
+ } => {
+ cmd_buf.draw(
+ first_vertex..first_vertex + vertex_count,
+ first_instance..first_instance + instance_count,
+ );
+ }
+ RenderCommand::DrawIndexed {
+ index_count,
+ instance_count,
+ first_index,
+ base_vertex,
+ first_instance,
+ } => {
+ cmd_buf.draw_indexed(
+ first_index..first_index + index_count,
+ base_vertex,
+ first_instance..first_instance + instance_count,
+ );
+ }
+ RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset,
+ count: None,
+ indexed: false,
+ } => {
+ let &(ref buffer, _) = buffer_guard
+ .get(buffer_id)
+ .unwrap()
+ .raw
+ .as_ref()
+ .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
+ cmd_buf.draw_indirect(buffer, offset, 1, 0);
+ }
+ RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset,
+ count: None,
+ indexed: true,
+ } => {
+ let &(ref buffer, _) = buffer_guard
+ .get(buffer_id)
+ .unwrap()
+ .raw
+ .as_ref()
+ .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
+ cmd_buf.draw_indexed_indirect(buffer, offset, 1, 0);
+ }
+ RenderCommand::MultiDrawIndirect { .. }
+ | RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(),
+ RenderCommand::PushDebugGroup { color: _, len: _ } => unimplemented!(),
+ RenderCommand::InsertDebugMarker { color: _, len: _ } => unimplemented!(),
+ RenderCommand::PopDebugGroup => unimplemented!(),
+ RenderCommand::ExecuteBundle(_)
+ | RenderCommand::SetBlendColor(_)
+ | RenderCommand::SetStencilReference(_)
+ | RenderCommand::SetViewport { .. }
+ | RenderCommand::SetScissor(_) => unreachable!(),
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Resource for RenderBundle {
+ const TYPE: &'static str = "RenderBundle";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+#[derive(Debug)]
+struct IndexState {
+ buffer: Option<id::BufferId>,
+ format: wgt::IndexFormat,
+ range: Range<wgt::BufferAddress>,
+ is_dirty: bool,
+}
+
+impl IndexState {
+ fn new() -> Self {
+ Self {
+ buffer: None,
+ format: wgt::IndexFormat::default(),
+ range: 0..0,
+ is_dirty: false,
+ }
+ }
+
+ fn limit(&self) -> u32 {
+ assert!(self.buffer.is_some());
+ let bytes_per_index = match self.format {
+ wgt::IndexFormat::Uint16 => 2,
+ wgt::IndexFormat::Uint32 => 4,
+ };
+ ((self.range.end - self.range.start) / bytes_per_index) as u32
+ }
+
+ fn flush(&mut self) -> Option<RenderCommand> {
+ if self.is_dirty {
+ self.is_dirty = false;
+ Some(RenderCommand::SetIndexBuffer {
+ buffer_id: self.buffer.unwrap(),
+ offset: self.range.start,
+ size: wgt::BufferSize::new(self.range.end - self.range.start),
+ })
+ } else {
+ None
+ }
+ }
+
+ fn set_format(&mut self, format: wgt::IndexFormat) {
+ if self.format != format {
+ self.format = format;
+ self.is_dirty = true;
+ }
+ }
+
+ fn set_buffer(&mut self, id: id::BufferId, range: Range<wgt::BufferAddress>) {
+ self.buffer = Some(id);
+ self.range = range;
+ self.is_dirty = true;
+ }
+}
+
+#[derive(Debug)]
+struct VertexState {
+ buffer: Option<id::BufferId>,
+ range: Range<wgt::BufferAddress>,
+ stride: wgt::BufferAddress,
+ rate: wgt::InputStepMode,
+ is_dirty: bool,
+}
+
+impl VertexState {
+ fn new() -> Self {
+ Self {
+ buffer: None,
+ range: 0..0,
+ stride: 0,
+ rate: wgt::InputStepMode::Vertex,
+ is_dirty: false,
+ }
+ }
+
+ fn set_buffer(&mut self, buffer_id: id::BufferId, range: Range<wgt::BufferAddress>) {
+ self.buffer = Some(buffer_id);
+ self.range = range;
+ self.is_dirty = true;
+ }
+
+ fn flush(&mut self, slot: u32) -> Option<RenderCommand> {
+ if self.is_dirty {
+ self.is_dirty = false;
+ Some(RenderCommand::SetVertexBuffer {
+ slot,
+ buffer_id: self.buffer.unwrap(),
+ offset: self.range.start,
+ size: wgt::BufferSize::new(self.range.end - self.range.start),
+ })
+ } else {
+ None
+ }
+ }
+}
+
+#[derive(Debug)]
+struct BindState {
+ bind_group: Option<(id::BindGroupId, id::BindGroupLayoutId)>,
+ dynamic_offsets: Range<usize>,
+ is_dirty: bool,
+}
+
+impl BindState {
+ fn new() -> Self {
+ Self {
+ bind_group: None,
+ dynamic_offsets: 0..0,
+ is_dirty: false,
+ }
+ }
+
+ fn set_group(
+ &mut self,
+ bind_group_id: id::BindGroupId,
+ layout_id: id::BindGroupLayoutId,
+ dyn_offset: usize,
+ dyn_count: usize,
+ ) -> bool {
+ match self.bind_group {
+ Some((bg_id, _)) if bg_id == bind_group_id && dyn_count == 0 => false,
+ _ => {
+ self.bind_group = Some((bind_group_id, layout_id));
+ self.dynamic_offsets = dyn_offset..dyn_offset + dyn_count;
+ self.is_dirty = true;
+ true
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+struct PushConstantState {
+ ranges: ArrayVec<[wgt::PushConstantRange; SHADER_STAGE_COUNT]>,
+ is_dirty: bool,
+}
+impl PushConstantState {
+ fn new() -> Self {
+ Self {
+ ranges: ArrayVec::new(),
+ is_dirty: false,
+ }
+ }
+
+ fn set_push_constants(&mut self, new_ranges: &[wgt::PushConstantRange]) -> bool {
+ if &*self.ranges != new_ranges {
+ self.ranges = new_ranges.iter().cloned().collect();
+ self.is_dirty = true;
+ true
+ } else {
+ false
+ }
+ }
+}
+
+#[derive(Debug)]
+struct State {
+ trackers: TrackerSet,
+ index: IndexState,
+ vertex: ArrayVec<[VertexState; MAX_VERTEX_BUFFERS]>,
+ bind: ArrayVec<[BindState; MAX_BIND_GROUPS]>,
+ push_constant_ranges: PushConstantState,
+ raw_dynamic_offsets: Vec<wgt::DynamicOffset>,
+ flat_dynamic_offsets: Vec<wgt::DynamicOffset>,
+ used_bind_groups: usize,
+ pipeline: StateChange<id::RenderPipelineId>,
+}
+
+impl State {
+ fn vertex_limits(&self) -> (u32, u32) {
+ let mut vertex_limit = !0;
+ let mut instance_limit = !0;
+ for vbs in &self.vertex {
+ if vbs.stride == 0 {
+ continue;
+ }
+ let limit = ((vbs.range.end - vbs.range.start) / vbs.stride) as u32;
+ match vbs.rate {
+ wgt::InputStepMode::Vertex => vertex_limit = vertex_limit.min(limit),
+ wgt::InputStepMode::Instance => instance_limit = instance_limit.min(limit),
+ }
+ }
+ (vertex_limit, instance_limit)
+ }
+
+ fn invalidate_group_from(&mut self, slot: usize) {
+ for bind in self.bind[slot..].iter_mut() {
+ if bind.bind_group.is_some() {
+ bind.is_dirty = true;
+ }
+ }
+ }
+
+ fn set_bind_group(
+ &mut self,
+ slot: u8,
+ bind_group_id: id::BindGroupId,
+ layout_id: id::Valid<id::BindGroupLayoutId>,
+ offsets: &[wgt::DynamicOffset],
+ ) {
+ if self.bind[slot as usize].set_group(
+ bind_group_id,
+ layout_id.0,
+ self.raw_dynamic_offsets.len(),
+ offsets.len(),
+ ) {
+ self.invalidate_group_from(slot as usize + 1);
+ }
+ self.raw_dynamic_offsets.extend(offsets);
+ }
+
+ fn set_pipeline(
+ &mut self,
+ index_format: wgt::IndexFormat,
+ vertex_strides: &[(wgt::BufferAddress, wgt::InputStepMode)],
+ layout_ids: &[id::Valid<id::BindGroupLayoutId>],
+ push_constant_layouts: &[wgt::PushConstantRange],
+ ) {
+ self.index.set_format(index_format);
+ for (vs, &(stride, step_mode)) in self.vertex.iter_mut().zip(vertex_strides) {
+ if vs.stride != stride || vs.rate != step_mode {
+ vs.stride = stride;
+ vs.rate = step_mode;
+ vs.is_dirty = true;
+ }
+ }
+
+ let push_constants_changed = self
+ .push_constant_ranges
+ .set_push_constants(push_constant_layouts);
+
+ self.used_bind_groups = layout_ids.len();
+ let invalid_from = if push_constants_changed {
+ Some(0)
+ } else {
+ self.bind
+ .iter()
+ .zip(layout_ids)
+ .position(|(bs, layout_id)| match bs.bind_group {
+ Some((_, bgl_id)) => bgl_id != layout_id.0,
+ None => false,
+ })
+ };
+ if let Some(slot) = invalid_from {
+ self.invalidate_group_from(slot);
+ }
+ }
+
+ fn flush_push_constants(&mut self) -> Option<impl Iterator<Item = RenderCommand>> {
+ let is_dirty = self.push_constant_ranges.is_dirty;
+
+ if is_dirty {
+ let nonoverlapping_ranges =
+ super::bind::compute_nonoverlapping_ranges(&self.push_constant_ranges.ranges);
+
+ Some(
+ nonoverlapping_ranges
+ .into_iter()
+ .map(|range| RenderCommand::SetPushConstant {
+ stages: range.stages,
+ offset: range.range.start,
+ size_bytes: range.range.end - range.range.start,
+ values_offset: None,
+ }),
+ )
+ } else {
+ None
+ }
+ }
+
+ fn flush_vertices(&mut self) -> impl Iterator<Item = RenderCommand> + '_ {
+ self.vertex
+ .iter_mut()
+ .enumerate()
+ .flat_map(|(i, vs)| vs.flush(i as u32))
+ }
+
+ fn flush_binds(&mut self) -> impl Iterator<Item = RenderCommand> + '_ {
+ for bs in self.bind[..self.used_bind_groups].iter() {
+ if bs.is_dirty {
+ self.flat_dynamic_offsets
+ .extend_from_slice(&self.raw_dynamic_offsets[bs.dynamic_offsets.clone()]);
+ }
+ }
+ self.bind
+ .iter_mut()
+ .take(self.used_bind_groups)
+ .enumerate()
+ .flat_map(|(i, bs)| {
+ if bs.is_dirty {
+ bs.is_dirty = false;
+ Some(RenderCommand::SetBindGroup {
+ index: i as u8,
+ bind_group_id: bs.bind_group.unwrap().0,
+ num_dynamic_offsets: (bs.dynamic_offsets.end - bs.dynamic_offsets.start)
+ as u8,
+ })
+ } else {
+ None
+ }
+ })
+ }
+}
+
+/// Error encountered when finishing recording a render bundle.
+#[derive(Clone, Debug, Error)]
+pub(super) enum RenderBundleErrorInner {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error(transparent)]
+ RenderCommand(RenderCommandError),
+ #[error(transparent)]
+ ResourceUsageConflict(#[from] UsageConflict),
+ #[error(transparent)]
+ Draw(#[from] DrawError),
+}
+
+impl<T> From<T> for RenderBundleErrorInner
+where
+ T: Into<RenderCommandError>,
+{
+ fn from(t: T) -> Self {
+ Self::RenderCommand(t.into())
+ }
+}
+
+/// Error encountered when finishing recording a render bundle.
+#[derive(Clone, Debug, Error)]
+#[error("{scope}")]
+pub struct RenderBundleError {
+ pub scope: PassErrorScope,
+ #[source]
+ inner: RenderBundleErrorInner,
+}
+
+impl RenderBundleError {
+ pub(crate) const INVALID_DEVICE: Self = RenderBundleError {
+ scope: PassErrorScope::Bundle,
+ inner: RenderBundleErrorInner::Device(DeviceError::Invalid),
+ };
+}
+
+impl<T, E> MapPassErr<T, RenderBundleError> for Result<T, E>
+where
+ E: Into<RenderBundleErrorInner>,
+{
+ fn map_pass_err(self, scope: PassErrorScope) -> Result<T, RenderBundleError> {
+ self.map_err(|inner| RenderBundleError {
+ scope,
+ inner: inner.into(),
+ })
+ }
+}
+
+pub mod bundle_ffi {
+ use super::{RenderBundleEncoder, RenderCommand};
+ use crate::{id, span, RawString};
+ use std::{convert::TryInto, slice};
+ use wgt::{BufferAddress, BufferSize, DynamicOffset};
+
+ /// # Safety
+ ///
+ /// This function is unsafe as there is no guarantee that the given pointer is
+ /// valid for `offset_length` elements.
+ // TODO: There might be other safety issues, such as using the unsafe
+ // `RawPass::encode` and `RawPass::encode_slice`.
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_render_bundle_set_bind_group(
+ bundle: &mut RenderBundleEncoder,
+ index: u32,
+ bind_group_id: id::BindGroupId,
+ offsets: *const DynamicOffset,
+ offset_length: usize,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::set_bind_group");
+ bundle.base.commands.push(RenderCommand::SetBindGroup {
+ index: index.try_into().unwrap(),
+ num_dynamic_offsets: offset_length.try_into().unwrap(),
+ bind_group_id,
+ });
+ bundle
+ .base
+ .dynamic_offsets
+ .extend_from_slice(slice::from_raw_parts(offsets, offset_length));
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_bundle_set_pipeline(
+ bundle: &mut RenderBundleEncoder,
+ pipeline_id: id::RenderPipelineId,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::set_pipeline");
+ bundle
+ .base
+ .commands
+ .push(RenderCommand::SetPipeline(pipeline_id));
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_bundle_set_index_buffer(
+ bundle: &mut RenderBundleEncoder,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::set_index_buffer");
+ bundle.base.commands.push(RenderCommand::SetIndexBuffer {
+ buffer_id,
+ offset,
+ size,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_bundle_set_vertex_buffer(
+ bundle: &mut RenderBundleEncoder,
+ slot: u32,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::set_vertex_buffer");
+ bundle.base.commands.push(RenderCommand::SetVertexBuffer {
+ slot,
+ buffer_id,
+ offset,
+ size,
+ });
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_render_bundle_set_push_constants(
+ pass: &mut RenderBundleEncoder,
+ stages: wgt::ShaderStage,
+ offset: u32,
+ size_bytes: u32,
+ data: *const u8,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::set_push_constants");
+ assert_eq!(
+ offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
+ 0,
+ "Push constant offset must be aligned to 4 bytes."
+ );
+ assert_eq!(
+ size_bytes & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
+ 0,
+ "Push constant size must be aligned to 4 bytes."
+ );
+ let data_slice = slice::from_raw_parts(data, size_bytes as usize);
+ let value_offset = pass.base.push_constant_data.len().try_into().expect(
+ "Ran out of push constant space. Don't set 4gb of push constants per RenderBundle.",
+ );
+
+ pass.base.push_constant_data.extend(
+ data_slice
+ .chunks_exact(wgt::PUSH_CONSTANT_ALIGNMENT as usize)
+ .map(|arr| u32::from_ne_bytes([arr[0], arr[1], arr[2], arr[3]])),
+ );
+
+ pass.base.commands.push(RenderCommand::SetPushConstant {
+ stages,
+ offset,
+ size_bytes,
+ values_offset: Some(value_offset),
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_bundle_draw(
+ bundle: &mut RenderBundleEncoder,
+ vertex_count: u32,
+ instance_count: u32,
+ first_vertex: u32,
+ first_instance: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::draw");
+ bundle.base.commands.push(RenderCommand::Draw {
+ vertex_count,
+ instance_count,
+ first_vertex,
+ first_instance,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_bundle_draw_indexed(
+ bundle: &mut RenderBundleEncoder,
+ index_count: u32,
+ instance_count: u32,
+ first_index: u32,
+ base_vertex: i32,
+ first_instance: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::draw_indexed");
+ bundle.base.commands.push(RenderCommand::DrawIndexed {
+ index_count,
+ instance_count,
+ first_index,
+ base_vertex,
+ first_instance,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_bundle_draw_indirect(
+ bundle: &mut RenderBundleEncoder,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::draw_indirect");
+ bundle.base.commands.push(RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset,
+ count: None,
+ indexed: false,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_bundle_indexed_indirect(
+ bundle: &mut RenderBundleEncoder,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::draw_indexed_indirect");
+ bundle.base.commands.push(RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset,
+ count: None,
+ indexed: true,
+ });
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_render_bundle_push_debug_group(
+ _bundle: &mut RenderBundleEncoder,
+ _label: RawString,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::push_debug_group");
+ //TODO
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_render_bundle_pop_debug_group(_bundle: &mut RenderBundleEncoder) {
+ span!(_guard, DEBUG, "RenderBundle::pop_debug_group");
+ //TODO
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_render_bundle_insert_debug_marker(
+ _bundle: &mut RenderBundleEncoder,
+ _label: RawString,
+ ) {
+ span!(_guard, DEBUG, "RenderBundle::insert_debug_marker");
+ //TODO
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/command/compute.rs b/gfx/wgpu/wgpu-core/src/command/compute.rs
new file mode 100644
index 0000000000..1ab9df2516
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/command/compute.rs
@@ -0,0 +1,657 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ binding_model::{BindError, BindGroup, PushConstantUploadError},
+ command::{
+ bind::{Binder, LayoutChange},
+ BasePass, BasePassRef, CommandBuffer, CommandEncoderError, MapPassErr, PassErrorScope,
+ StateChange,
+ },
+ hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
+ id,
+ resource::{Buffer, BufferUse, Texture},
+ span,
+ track::{TrackerSet, UsageConflict},
+ validation::{check_buffer_usage, MissingBufferUsageError},
+ MAX_BIND_GROUPS,
+};
+
+use arrayvec::ArrayVec;
+use hal::command::CommandBuffer as _;
+use thiserror::Error;
+use wgt::{BufferAddress, BufferUsage, ShaderStage};
+
+use std::{fmt, iter, str};
+
+#[doc(hidden)]
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(
+ any(feature = "serial-pass", feature = "trace"),
+ derive(serde::Serialize)
+)]
+#[cfg_attr(
+ any(feature = "serial-pass", feature = "replay"),
+ derive(serde::Deserialize)
+)]
+pub enum ComputeCommand {
+ SetBindGroup {
+ index: u8,
+ num_dynamic_offsets: u8,
+ bind_group_id: id::BindGroupId,
+ },
+ SetPipeline(id::ComputePipelineId),
+ SetPushConstant {
+ offset: u32,
+ size_bytes: u32,
+ values_offset: u32,
+ },
+ Dispatch([u32; 3]),
+ DispatchIndirect {
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ },
+ PushDebugGroup {
+ color: u32,
+ len: usize,
+ },
+ PopDebugGroup,
+ InsertDebugMarker {
+ color: u32,
+ len: usize,
+ },
+}
+
+#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
+pub struct ComputePass {
+ base: BasePass<ComputeCommand>,
+ parent_id: id::CommandEncoderId,
+}
+
+impl ComputePass {
+ pub fn new(parent_id: id::CommandEncoderId) -> Self {
+ Self {
+ base: BasePass::new(),
+ parent_id,
+ }
+ }
+
+ pub fn parent_id(&self) -> id::CommandEncoderId {
+ self.parent_id
+ }
+
+ #[cfg(feature = "trace")]
+ pub fn into_command(self) -> crate::device::trace::Command {
+ crate::device::trace::Command::RunComputePass { base: self.base }
+ }
+}
+
+impl fmt::Debug for ComputePass {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ComputePass {{ encoder_id: {:?}, data: {:?} commands and {:?} dynamic offsets }}",
+ self.parent_id,
+ self.base.commands.len(),
+ self.base.dynamic_offsets.len()
+ )
+ }
+}
+
+#[repr(C)]
+#[derive(Clone, Debug, Default)]
+pub struct ComputePassDescriptor {
+ pub todo: u32,
+}
+
+#[derive(Clone, Debug, Error, PartialEq)]
+pub enum DispatchError {
+ #[error("compute pipeline must be set")]
+ MissingPipeline,
+ #[error("current compute pipeline has a layout which is incompatible with a currently set bind group, first differing at entry index {index}")]
+ IncompatibleBindGroup {
+ index: u32,
+ //expected: BindGroupLayoutId,
+ //provided: Option<(BindGroupLayoutId, BindGroupId)>,
+ },
+}
+
+/// Error encountered when performing a compute pass.
+#[derive(Clone, Debug, Error)]
+pub enum ComputePassErrorInner {
+ #[error(transparent)]
+ Encoder(#[from] CommandEncoderError),
+ #[error("bind group {0:?} is invalid")]
+ InvalidBindGroup(id::BindGroupId),
+ #[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
+ BindGroupIndexOutOfRange { index: u8, max: u32 },
+ #[error("compute pipeline {0:?} is invalid")]
+ InvalidPipeline(id::ComputePipelineId),
+ #[error("indirect buffer {0:?} is invalid or destroyed")]
+ InvalidIndirectBuffer(id::BufferId),
+ #[error(transparent)]
+ ResourceUsageConflict(#[from] UsageConflict),
+ #[error(transparent)]
+ MissingBufferUsage(#[from] MissingBufferUsageError),
+ #[error("cannot pop debug group, because number of pushed debug groups is zero")]
+ InvalidPopDebugGroup,
+ #[error(transparent)]
+ Dispatch(#[from] DispatchError),
+ #[error(transparent)]
+ Bind(#[from] BindError),
+ #[error(transparent)]
+ PushConstants(#[from] PushConstantUploadError),
+}
+
+/// Error encountered when performing a compute pass.
+#[derive(Clone, Debug, Error)]
+#[error("{scope}")]
+pub struct ComputePassError {
+ pub scope: PassErrorScope,
+ #[source]
+ inner: ComputePassErrorInner,
+}
+
+impl<T, E> MapPassErr<T, ComputePassError> for Result<T, E>
+where
+ E: Into<ComputePassErrorInner>,
+{
+ fn map_pass_err(self, scope: PassErrorScope) -> Result<T, ComputePassError> {
+ self.map_err(|inner| ComputePassError {
+ scope,
+ inner: inner.into(),
+ })
+ }
+}
+
+#[derive(Debug)]
+struct State {
+ binder: Binder,
+ pipeline: StateChange<id::ComputePipelineId>,
+ trackers: TrackerSet,
+ debug_scope_depth: u32,
+}
+
+impl State {
+ fn is_ready(&self) -> Result<(), DispatchError> {
+ //TODO: vertex buffers
+ let bind_mask = self.binder.invalid_mask();
+ if bind_mask != 0 {
+ //let (expected, provided) = self.binder.entries[index as usize].info();
+ return Err(DispatchError::IncompatibleBindGroup {
+ index: bind_mask.trailing_zeros(),
+ });
+ }
+ if self.pipeline.is_unset() {
+ return Err(DispatchError::MissingPipeline);
+ }
+ Ok(())
+ }
+
+ fn flush_states<B: GfxBackend>(
+ &mut self,
+ raw_cmd_buf: &mut B::CommandBuffer,
+ base_trackers: &mut TrackerSet,
+ bind_group_guard: &Storage<BindGroup<B>, id::BindGroupId>,
+ buffer_guard: &Storage<Buffer<B>, id::BufferId>,
+ texture_guard: &Storage<Texture<B>, id::TextureId>,
+ ) -> Result<(), UsageConflict> {
+ for id in self.binder.list_active() {
+ self.trackers.merge_extend(&bind_group_guard[id].used)?;
+ }
+
+ tracing::trace!("Encoding dispatch barriers");
+
+ CommandBuffer::insert_barriers(
+ raw_cmd_buf,
+ base_trackers,
+ &self.trackers,
+ buffer_guard,
+ texture_guard,
+ );
+
+ self.trackers.clear();
+ Ok(())
+ }
+}
+
+// Common routines between render/compute
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn command_encoder_run_compute_pass<B: GfxBackend>(
+ &self,
+ encoder_id: id::CommandEncoderId,
+ pass: &ComputePass,
+ ) -> Result<(), ComputePassError> {
+ self.command_encoder_run_compute_pass_impl::<B>(encoder_id, pass.base.as_ref())
+ }
+
+ #[doc(hidden)]
+ pub fn command_encoder_run_compute_pass_impl<B: GfxBackend>(
+ &self,
+ encoder_id: id::CommandEncoderId,
+ mut base: BasePassRef<ComputeCommand>,
+ ) -> Result<(), ComputePassError> {
+ span!(_guard, INFO, "CommandEncoder::run_compute_pass");
+ let scope = PassErrorScope::Pass(encoder_id);
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
+ let cmd_buf =
+ CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id).map_pass_err(scope)?;
+ let raw = cmd_buf.raw.last_mut().unwrap();
+
+ #[cfg(feature = "trace")]
+ if let Some(ref mut list) = cmd_buf.commands {
+ list.push(crate::device::trace::Command::RunComputePass {
+ base: BasePass::from_ref(base),
+ });
+ }
+
+ let (_, mut token) = hub.render_bundles.read(&mut token);
+ let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
+ let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
+ let (pipeline_guard, mut token) = hub.compute_pipelines.read(&mut token);
+ let (buffer_guard, mut token) = hub.buffers.read(&mut token);
+ let (texture_guard, _) = hub.textures.read(&mut token);
+
+ let mut state = State {
+ binder: Binder::new(cmd_buf.limits.max_bind_groups),
+ pipeline: StateChange::new(),
+ trackers: TrackerSet::new(B::VARIANT),
+ debug_scope_depth: 0,
+ };
+ let mut temp_offsets = Vec::new();
+
+ for command in base.commands {
+ match *command {
+ ComputeCommand::SetBindGroup {
+ index,
+ num_dynamic_offsets,
+ bind_group_id,
+ } => {
+ let scope = PassErrorScope::SetBindGroup(bind_group_id);
+
+ let max_bind_groups = cmd_buf.limits.max_bind_groups;
+ if (index as u32) >= max_bind_groups {
+ return Err(ComputePassErrorInner::BindGroupIndexOutOfRange {
+ index,
+ max: max_bind_groups,
+ })
+ .map_pass_err(scope);
+ }
+
+ temp_offsets.clear();
+ temp_offsets
+ .extend_from_slice(&base.dynamic_offsets[..num_dynamic_offsets as usize]);
+ base.dynamic_offsets = &base.dynamic_offsets[num_dynamic_offsets as usize..];
+
+ let bind_group = cmd_buf
+ .trackers
+ .bind_groups
+ .use_extend(&*bind_group_guard, bind_group_id, (), ())
+ .map_err(|_| ComputePassErrorInner::InvalidBindGroup(bind_group_id))
+ .map_pass_err(scope)?;
+ bind_group
+ .validate_dynamic_bindings(&temp_offsets)
+ .map_pass_err(scope)?;
+
+ if let Some((pipeline_layout_id, follow_ups)) = state.binder.provide_entry(
+ index as usize,
+ id::Valid(bind_group_id),
+ bind_group,
+ &temp_offsets,
+ ) {
+ let bind_groups = iter::once(bind_group.raw.raw())
+ .chain(
+ follow_ups
+ .clone()
+ .map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()),
+ )
+ .collect::<ArrayVec<[_; MAX_BIND_GROUPS]>>();
+ temp_offsets.extend(follow_ups.flat_map(|(_, offsets)| offsets));
+ unsafe {
+ raw.bind_compute_descriptor_sets(
+ &pipeline_layout_guard[pipeline_layout_id].raw,
+ index as usize,
+ bind_groups,
+ &temp_offsets,
+ );
+ }
+ }
+ }
+ ComputeCommand::SetPipeline(pipeline_id) => {
+ let scope = PassErrorScope::SetPipelineCompute(pipeline_id);
+
+ if state.pipeline.set_and_check_redundant(pipeline_id) {
+ continue;
+ }
+
+ let pipeline = cmd_buf
+ .trackers
+ .compute_pipes
+ .use_extend(&*pipeline_guard, pipeline_id, (), ())
+ .map_err(|_| ComputePassErrorInner::InvalidPipeline(pipeline_id))
+ .map_pass_err(scope)?;
+
+ unsafe {
+ raw.bind_compute_pipeline(&pipeline.raw);
+ }
+
+ // Rebind resources
+ if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) {
+ let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value];
+
+ state.binder.change_pipeline_layout(
+ &*pipeline_layout_guard,
+ pipeline.layout_id.value,
+ );
+
+ let mut is_compatible = true;
+
+ for (index, (entry, &bgl_id)) in state
+ .binder
+ .entries
+ .iter_mut()
+ .zip(&pipeline_layout.bind_group_layout_ids)
+ .enumerate()
+ {
+ match entry.expect_layout(bgl_id) {
+ LayoutChange::Match(bg_id, offsets) if is_compatible => {
+ let desc_set = bind_group_guard[bg_id].raw.raw();
+ unsafe {
+ raw.bind_compute_descriptor_sets(
+ &pipeline_layout.raw,
+ index,
+ iter::once(desc_set),
+ offsets.iter().cloned(),
+ );
+ }
+ }
+ LayoutChange::Match(..) | LayoutChange::Unchanged => {}
+ LayoutChange::Mismatch => {
+ is_compatible = false;
+ }
+ }
+ }
+
+ // Clear push constant ranges
+ let non_overlapping = super::bind::compute_nonoverlapping_ranges(
+ &pipeline_layout.push_constant_ranges,
+ );
+ for range in non_overlapping {
+ let offset = range.range.start;
+ let size_bytes = range.range.end - offset;
+ super::push_constant_clear(
+ offset,
+ size_bytes,
+ |clear_offset, clear_data| unsafe {
+ raw.push_compute_constants(
+ &pipeline_layout.raw,
+ clear_offset,
+ clear_data,
+ );
+ },
+ );
+ }
+ }
+ }
+ ComputeCommand::SetPushConstant {
+ offset,
+ size_bytes,
+ values_offset,
+ } => {
+ let scope = PassErrorScope::SetPushConstant;
+
+ let end_offset_bytes = offset + size_bytes;
+ let values_end_offset =
+ (values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
+ let data_slice =
+ &base.push_constant_data[(values_offset as usize)..values_end_offset];
+
+ let pipeline_layout_id = state
+ .binder
+ .pipeline_layout_id
+ //TODO: don't error here, lazily update the push constants
+ .ok_or(ComputePassErrorInner::Dispatch(
+ DispatchError::MissingPipeline,
+ ))
+ .map_pass_err(scope)?;
+ let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
+
+ pipeline_layout
+ .validate_push_constant_ranges(
+ ShaderStage::COMPUTE,
+ offset,
+ end_offset_bytes,
+ )
+ .map_pass_err(scope)?;
+
+ unsafe { raw.push_compute_constants(&pipeline_layout.raw, offset, data_slice) }
+ }
+ ComputeCommand::Dispatch(groups) => {
+ let scope = PassErrorScope::Dispatch;
+
+ state.is_ready().map_pass_err(scope)?;
+ state
+ .flush_states(
+ raw,
+ &mut cmd_buf.trackers,
+ &*bind_group_guard,
+ &*buffer_guard,
+ &*texture_guard,
+ )
+ .map_pass_err(scope)?;
+ unsafe {
+ raw.dispatch(groups);
+ }
+ }
+ ComputeCommand::DispatchIndirect { buffer_id, offset } => {
+ let scope = PassErrorScope::DispatchIndirect;
+
+ state.is_ready().map_pass_err(scope)?;
+
+ let indirect_buffer = state
+ .trackers
+ .buffers
+ .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
+ .map_err(|_| ComputePassErrorInner::InvalidIndirectBuffer(buffer_id))
+ .map_pass_err(scope)?;
+ check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)
+ .map_pass_err(scope)?;
+ let &(ref buf_raw, _) = indirect_buffer
+ .raw
+ .as_ref()
+ .ok_or(ComputePassErrorInner::InvalidIndirectBuffer(buffer_id))
+ .map_pass_err(scope)?;
+
+ state
+ .flush_states(
+ raw,
+ &mut cmd_buf.trackers,
+ &*bind_group_guard,
+ &*buffer_guard,
+ &*texture_guard,
+ )
+ .map_pass_err(scope)?;
+ unsafe {
+ raw.dispatch_indirect(buf_raw, offset);
+ }
+ }
+ ComputeCommand::PushDebugGroup { color, len } => {
+ state.debug_scope_depth += 1;
+
+ let label = str::from_utf8(&base.string_data[..len]).unwrap();
+ unsafe {
+ raw.begin_debug_marker(label, color);
+ }
+ base.string_data = &base.string_data[len..];
+ }
+ ComputeCommand::PopDebugGroup => {
+ let scope = PassErrorScope::PopDebugGroup;
+
+ if state.debug_scope_depth == 0 {
+ return Err(ComputePassErrorInner::InvalidPopDebugGroup)
+ .map_pass_err(scope);
+ }
+ state.debug_scope_depth -= 1;
+ unsafe {
+ raw.end_debug_marker();
+ }
+ }
+ ComputeCommand::InsertDebugMarker { color, len } => {
+ let label = str::from_utf8(&base.string_data[..len]).unwrap();
+ unsafe { raw.insert_debug_marker(label, color) }
+ base.string_data = &base.string_data[len..];
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
+
+pub mod compute_ffi {
+ use super::{ComputeCommand, ComputePass};
+ use crate::{id, span, RawString};
+ use std::{convert::TryInto, ffi, slice};
+ use wgt::{BufferAddress, DynamicOffset};
+
+ /// # Safety
+ ///
+ /// This function is unsafe as there is no guarantee that the given pointer is
+ /// valid for `offset_length` elements.
+ // TODO: There might be other safety issues, such as using the unsafe
+ // `RawPass::encode` and `RawPass::encode_slice`.
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_compute_pass_set_bind_group(
+ pass: &mut ComputePass,
+ index: u32,
+ bind_group_id: id::BindGroupId,
+ offsets: *const DynamicOffset,
+ offset_length: usize,
+ ) {
+ span!(_guard, DEBUG, "ComputePass::set_bind_group");
+ pass.base.commands.push(ComputeCommand::SetBindGroup {
+ index: index.try_into().unwrap(),
+ num_dynamic_offsets: offset_length.try_into().unwrap(),
+ bind_group_id,
+ });
+ pass.base
+ .dynamic_offsets
+ .extend_from_slice(slice::from_raw_parts(offsets, offset_length));
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_compute_pass_set_pipeline(
+ pass: &mut ComputePass,
+ pipeline_id: id::ComputePipelineId,
+ ) {
+ span!(_guard, DEBUG, "ComputePass::set_pipeline");
+ pass.base
+ .commands
+ .push(ComputeCommand::SetPipeline(pipeline_id));
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_compute_pass_set_push_constant(
+ pass: &mut ComputePass,
+ offset: u32,
+ size_bytes: u32,
+ data: *const u8,
+ ) {
+ span!(_guard, DEBUG, "ComputePass::set_push_constant");
+ assert_eq!(
+ offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
+ 0,
+ "Push constant offset must be aligned to 4 bytes."
+ );
+ assert_eq!(
+ size_bytes & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
+ 0,
+ "Push constant size must be aligned to 4 bytes."
+ );
+ let data_slice = slice::from_raw_parts(data, size_bytes as usize);
+ let value_offset = pass.base.push_constant_data.len().try_into().expect(
+ "Ran out of push constant space. Don't set 4gb of push constants per ComputePass.",
+ );
+
+ pass.base.push_constant_data.extend(
+ data_slice
+ .chunks_exact(wgt::PUSH_CONSTANT_ALIGNMENT as usize)
+ .map(|arr| u32::from_ne_bytes([arr[0], arr[1], arr[2], arr[3]])),
+ );
+
+ pass.base.commands.push(ComputeCommand::SetPushConstant {
+ offset,
+ size_bytes,
+ values_offset: value_offset,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_compute_pass_dispatch(
+ pass: &mut ComputePass,
+ groups_x: u32,
+ groups_y: u32,
+ groups_z: u32,
+ ) {
+ span!(_guard, DEBUG, "ComputePass::dispatch");
+ pass.base
+ .commands
+ .push(ComputeCommand::Dispatch([groups_x, groups_y, groups_z]));
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_compute_pass_dispatch_indirect(
+ pass: &mut ComputePass,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ ) {
+ span!(_guard, DEBUG, "ComputePass::dispatch_indirect");
+ pass.base
+ .commands
+ .push(ComputeCommand::DispatchIndirect { buffer_id, offset });
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_compute_pass_push_debug_group(
+ pass: &mut ComputePass,
+ label: RawString,
+ color: u32,
+ ) {
+ span!(_guard, DEBUG, "ComputePass::push_debug_group");
+ let bytes = ffi::CStr::from_ptr(label).to_bytes();
+ pass.base.string_data.extend_from_slice(bytes);
+
+ pass.base.commands.push(ComputeCommand::PushDebugGroup {
+ color,
+ len: bytes.len(),
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_compute_pass_pop_debug_group(pass: &mut ComputePass) {
+ span!(_guard, DEBUG, "ComputePass::pop_debug_group");
+ pass.base.commands.push(ComputeCommand::PopDebugGroup);
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_compute_pass_insert_debug_marker(
+ pass: &mut ComputePass,
+ label: RawString,
+ color: u32,
+ ) {
+ span!(_guard, DEBUG, "ComputePass::insert_debug_marker");
+ let bytes = ffi::CStr::from_ptr(label).to_bytes();
+ pass.base.string_data.extend_from_slice(bytes);
+
+ pass.base.commands.push(ComputeCommand::InsertDebugMarker {
+ color,
+ len: bytes.len(),
+ });
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/command/draw.rs b/gfx/wgpu/wgpu-core/src/command/draw.rs
new file mode 100644
index 0000000000..30c19fef7f
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/command/draw.rs
@@ -0,0 +1,180 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*! Draw structures - shared between render passes and bundles.
+!*/
+
+use crate::{
+ binding_model::PushConstantUploadError,
+ id,
+ resource::BufferUse,
+ track::UseExtendError,
+ validation::{MissingBufferUsageError, MissingTextureUsageError},
+};
+use wgt::{BufferAddress, BufferSize, Color};
+
+use std::num::NonZeroU32;
+use thiserror::Error;
+
+pub type BufferError = UseExtendError<BufferUse>;
+
+/// Error validating a draw call.
+#[derive(Clone, Debug, Error, PartialEq)]
+pub enum DrawError {
+ #[error("blend color needs to be set")]
+ MissingBlendColor,
+ #[error("render pipeline must be set")]
+ MissingPipeline,
+ #[error("current render pipeline has a layout which is incompatible with a currently set bind group, first differing at entry index {index}")]
+ IncompatibleBindGroup {
+ index: u32,
+ //expected: BindGroupLayoutId,
+ //provided: Option<(BindGroupLayoutId, BindGroupId)>,
+ },
+ #[error("vertex {last_vertex} extends beyond limit {vertex_limit}")]
+ VertexBeyondLimit { last_vertex: u32, vertex_limit: u32 },
+ #[error("instance {last_instance} extends beyond limit {instance_limit}")]
+ InstanceBeyondLimit {
+ last_instance: u32,
+ instance_limit: u32,
+ },
+ #[error("index {last_index} extends beyond limit {index_limit}")]
+ IndexBeyondLimit { last_index: u32, index_limit: u32 },
+}
+
+/// Error encountered when encoding a render command.
+/// This is the shared error set between render bundles and passes.
+#[derive(Clone, Debug, Error)]
+pub enum RenderCommandError {
+ #[error("bind group {0:?} is invalid")]
+ InvalidBindGroup(id::BindGroupId),
+ #[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
+ BindGroupIndexOutOfRange { index: u8, max: u32 },
+ #[error("dynamic buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")]
+ UnalignedBufferOffset(u64),
+ #[error("number of buffer offsets ({actual}) does not match the number of dynamic bindings ({expected})")]
+ InvalidDynamicOffsetCount { actual: usize, expected: usize },
+ #[error("render pipeline {0:?} is invalid")]
+ InvalidPipeline(id::RenderPipelineId),
+ #[error("Render pipeline is incompatible with render pass")]
+ IncompatiblePipeline(#[from] crate::device::RenderPassCompatibilityError),
+ #[error("pipeline is not compatible with the depth-stencil read-only render pass")]
+ IncompatibleReadOnlyDepthStencil,
+ #[error("buffer {0:?} is in error {1:?}")]
+ Buffer(id::BufferId, BufferError),
+ #[error("buffer {0:?} is destroyed")]
+ DestroyedBuffer(id::BufferId),
+ #[error(transparent)]
+ MissingBufferUsage(#[from] MissingBufferUsageError),
+ #[error(transparent)]
+ MissingTextureUsage(#[from] MissingTextureUsageError),
+ #[error(transparent)]
+ PushConstants(#[from] PushConstantUploadError),
+ #[error("Invalid Viewport parameters")]
+ InvalidViewport,
+ #[error("Invalid ScissorRect parameters")]
+ InvalidScissorRect,
+}
+
+#[derive(Clone, Copy, Debug, Default)]
+#[cfg_attr(
+ any(feature = "serial-pass", feature = "trace"),
+ derive(serde::Serialize)
+)]
+#[cfg_attr(
+ any(feature = "serial-pass", feature = "replay"),
+ derive(serde::Deserialize)
+)]
+pub struct Rect<T> {
+ pub x: T,
+ pub y: T,
+ pub w: T,
+ pub h: T,
+}
+
+#[doc(hidden)]
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(
+ any(feature = "serial-pass", feature = "trace"),
+ derive(serde::Serialize)
+)]
+#[cfg_attr(
+ any(feature = "serial-pass", feature = "replay"),
+ derive(serde::Deserialize)
+)]
+pub enum RenderCommand {
+ SetBindGroup {
+ index: u8,
+ num_dynamic_offsets: u8,
+ bind_group_id: id::BindGroupId,
+ },
+ SetPipeline(id::RenderPipelineId),
+ SetIndexBuffer {
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ },
+ SetVertexBuffer {
+ slot: u32,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ },
+ SetBlendColor(Color),
+ SetStencilReference(u32),
+ SetViewport {
+ rect: Rect<f32>,
+ //TODO: use half-float to reduce the size?
+ depth_min: f32,
+ depth_max: f32,
+ },
+ SetScissor(Rect<u32>),
+ SetPushConstant {
+ stages: wgt::ShaderStage,
+ offset: u32,
+ size_bytes: u32,
+ /// None means there is no data and the data should be an array of zeros.
+ ///
+ /// Facilitates clears in renderbundles which explicitly do their clears.
+ values_offset: Option<u32>,
+ },
+ Draw {
+ vertex_count: u32,
+ instance_count: u32,
+ first_vertex: u32,
+ first_instance: u32,
+ },
+ DrawIndexed {
+ index_count: u32,
+ instance_count: u32,
+ first_index: u32,
+ base_vertex: i32,
+ first_instance: u32,
+ },
+ MultiDrawIndirect {
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ /// Count of `None` represents a non-multi call.
+ count: Option<NonZeroU32>,
+ indexed: bool,
+ },
+ MultiDrawIndirectCount {
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ count_buffer_id: id::BufferId,
+ count_buffer_offset: BufferAddress,
+ max_count: u32,
+ indexed: bool,
+ },
+ PushDebugGroup {
+ color: u32,
+ len: usize,
+ },
+ PopDebugGroup,
+ InsertDebugMarker {
+ color: u32,
+ len: usize,
+ },
+ ExecuteBundle(id::RenderBundleId),
+}
diff --git a/gfx/wgpu/wgpu-core/src/command/mod.rs b/gfx/wgpu/wgpu-core/src/command/mod.rs
new file mode 100644
index 0000000000..1093bde155
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/command/mod.rs
@@ -0,0 +1,362 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+mod allocator;
+mod bind;
+mod bundle;
+mod compute;
+mod draw;
+mod render;
+mod transfer;
+
+pub(crate) use self::allocator::CommandAllocator;
+pub use self::allocator::CommandAllocatorError;
+pub use self::bundle::*;
+pub use self::compute::*;
+pub use self::draw::*;
+pub use self::render::*;
+pub use self::transfer::*;
+
+use crate::{
+ device::{all_buffer_stages, all_image_stages},
+ hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
+ id,
+ resource::{Buffer, Texture},
+ span,
+ track::TrackerSet,
+ Label, PrivateFeatures, Stored,
+};
+
+use hal::command::CommandBuffer as _;
+use thiserror::Error;
+
+use std::thread::ThreadId;
+
+const PUSH_CONSTANT_CLEAR_ARRAY: &[u32] = &[0_u32; 64];
+
+#[derive(Debug)]
+pub struct CommandBuffer<B: hal::Backend> {
+ pub(crate) raw: Vec<B::CommandBuffer>,
+ is_recording: bool,
+ recorded_thread_id: ThreadId,
+ pub(crate) device_id: Stored<id::DeviceId>,
+ pub(crate) trackers: TrackerSet,
+ pub(crate) used_swap_chain: Option<(Stored<id::SwapChainId>, B::Framebuffer)>,
+ limits: wgt::Limits,
+ private_features: PrivateFeatures,
+ #[cfg(feature = "trace")]
+ pub(crate) commands: Option<Vec<crate::device::trace::Command>>,
+ #[cfg(debug_assertions)]
+ pub(crate) label: String,
+}
+
+impl<B: GfxBackend> CommandBuffer<B> {
+ fn get_encoder(
+ storage: &mut Storage<Self, id::CommandEncoderId>,
+ id: id::CommandEncoderId,
+ ) -> Result<&mut Self, CommandEncoderError> {
+ match storage.get_mut(id) {
+ Ok(cmd_buf) if cmd_buf.is_recording => Ok(cmd_buf),
+ Ok(_) => Err(CommandEncoderError::NotRecording),
+ Err(_) => Err(CommandEncoderError::Invalid),
+ }
+ }
+
+ pub(crate) fn insert_barriers(
+ raw: &mut B::CommandBuffer,
+ base: &mut TrackerSet,
+ head: &TrackerSet,
+ buffer_guard: &Storage<Buffer<B>, id::BufferId>,
+ texture_guard: &Storage<Texture<B>, id::TextureId>,
+ ) {
+ use hal::command::CommandBuffer as _;
+
+ debug_assert_eq!(B::VARIANT, base.backend());
+ debug_assert_eq!(B::VARIANT, head.backend());
+
+ let buffer_barriers = base.buffers.merge_replace(&head.buffers).map(|pending| {
+ let buf = &buffer_guard[pending.id];
+ pending.into_hal(buf)
+ });
+ let texture_barriers = base.textures.merge_replace(&head.textures).map(|pending| {
+ let tex = &texture_guard[pending.id];
+ pending.into_hal(tex)
+ });
+ base.views.merge_extend(&head.views).unwrap();
+ base.bind_groups.merge_extend(&head.bind_groups).unwrap();
+ base.samplers.merge_extend(&head.samplers).unwrap();
+ base.compute_pipes
+ .merge_extend(&head.compute_pipes)
+ .unwrap();
+ base.render_pipes.merge_extend(&head.render_pipes).unwrap();
+ base.bundles.merge_extend(&head.bundles).unwrap();
+
+ let stages = all_buffer_stages() | all_image_stages();
+ unsafe {
+ raw.pipeline_barrier(
+ stages..stages,
+ hal::memory::Dependencies::empty(),
+ buffer_barriers.chain(texture_barriers),
+ );
+ }
+ }
+}
+
+impl<B: hal::Backend> crate::hub::Resource for CommandBuffer<B> {
+ const TYPE: &'static str = "CommandBuffer";
+
+ fn life_guard(&self) -> &crate::LifeGuard {
+ unreachable!()
+ }
+
+ fn label(&self) -> &str {
+ #[cfg(debug_assertions)]
+ return &self.label;
+ #[cfg(not(debug_assertions))]
+ return "";
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct BasePassRef<'a, C> {
+ pub commands: &'a [C],
+ pub dynamic_offsets: &'a [wgt::DynamicOffset],
+ pub string_data: &'a [u8],
+ pub push_constant_data: &'a [u32],
+}
+
+#[doc(hidden)]
+#[derive(Debug)]
+#[cfg_attr(
+ any(feature = "serial-pass", feature = "trace"),
+ derive(serde::Serialize)
+)]
+#[cfg_attr(
+ any(feature = "serial-pass", feature = "replay"),
+ derive(serde::Deserialize)
+)]
+pub struct BasePass<C> {
+ pub commands: Vec<C>,
+ pub dynamic_offsets: Vec<wgt::DynamicOffset>,
+ pub string_data: Vec<u8>,
+ pub push_constant_data: Vec<u32>,
+}
+
+impl<C: Clone> BasePass<C> {
+ fn new() -> Self {
+ Self {
+ commands: Vec::new(),
+ dynamic_offsets: Vec::new(),
+ string_data: Vec::new(),
+ push_constant_data: Vec::new(),
+ }
+ }
+
+ #[cfg(feature = "trace")]
+ fn from_ref(base: BasePassRef<C>) -> Self {
+ Self {
+ commands: base.commands.to_vec(),
+ dynamic_offsets: base.dynamic_offsets.to_vec(),
+ string_data: base.string_data.to_vec(),
+ push_constant_data: base.push_constant_data.to_vec(),
+ }
+ }
+
+ pub fn as_ref(&self) -> BasePassRef<C> {
+ BasePassRef {
+ commands: &self.commands,
+ dynamic_offsets: &self.dynamic_offsets,
+ string_data: &self.string_data,
+ push_constant_data: &self.push_constant_data,
+ }
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CommandEncoderError {
+ #[error("command encoder is invalid")]
+ Invalid,
+ #[error("command encoder must be active")]
+ NotRecording,
+}
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn command_encoder_finish<B: GfxBackend>(
+ &self,
+ encoder_id: id::CommandEncoderId,
+ _desc: &wgt::CommandBufferDescriptor<Label>,
+ ) -> (id::CommandBufferId, Option<CommandEncoderError>) {
+ span!(_guard, INFO, "CommandEncoder::finish");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (swap_chain_guard, mut token) = hub.swap_chains.read(&mut token);
+ //TODO: actually close the last recorded command buffer
+ let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
+
+ let error = match CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id) {
+ Ok(cmd_buf) => {
+ cmd_buf.is_recording = false;
+ // stop tracking the swapchain image, if used
+ if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain {
+ let view_id = swap_chain_guard[sc_id.value]
+ .acquired_view_id
+ .as_ref()
+ .expect("Used swap chain frame has already presented");
+ cmd_buf.trackers.views.remove(view_id.value);
+ }
+ tracing::trace!("Command buffer {:?} {:#?}", encoder_id, cmd_buf.trackers);
+ None
+ }
+ Err(e) => Some(e),
+ };
+
+ (encoder_id, error)
+ }
+
+ pub fn command_encoder_push_debug_group<B: GfxBackend>(
+ &self,
+ encoder_id: id::CommandEncoderId,
+ label: &str,
+ ) -> Result<(), CommandEncoderError> {
+ span!(_guard, DEBUG, "CommandEncoder::push_debug_group");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
+ let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
+ let cmb_raw = cmd_buf.raw.last_mut().unwrap();
+
+ unsafe {
+ cmb_raw.begin_debug_marker(label, 0);
+ }
+ Ok(())
+ }
+
+ pub fn command_encoder_insert_debug_marker<B: GfxBackend>(
+ &self,
+ encoder_id: id::CommandEncoderId,
+ label: &str,
+ ) -> Result<(), CommandEncoderError> {
+ span!(_guard, DEBUG, "CommandEncoder::insert_debug_marker");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
+ let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
+ let cmb_raw = cmd_buf.raw.last_mut().unwrap();
+
+ unsafe {
+ cmb_raw.insert_debug_marker(label, 0);
+ }
+ Ok(())
+ }
+
+ pub fn command_encoder_pop_debug_group<B: GfxBackend>(
+ &self,
+ encoder_id: id::CommandEncoderId,
+ ) -> Result<(), CommandEncoderError> {
+ span!(_guard, DEBUG, "CommandEncoder::pop_debug_marker");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
+ let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
+ let cmb_raw = cmd_buf.raw.last_mut().unwrap();
+
+ unsafe {
+ cmb_raw.end_debug_marker();
+ }
+ Ok(())
+ }
+}
+
+fn push_constant_clear<PushFn>(offset: u32, size_bytes: u32, mut push_fn: PushFn)
+where
+ PushFn: FnMut(u32, &[u32]),
+{
+ let mut count_words = 0_u32;
+ let size_words = size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT;
+ while count_words < size_words {
+ let count_bytes = count_words * wgt::PUSH_CONSTANT_ALIGNMENT;
+ let size_to_write_words =
+ (size_words - count_words).min(PUSH_CONSTANT_CLEAR_ARRAY.len() as u32);
+
+ push_fn(
+ offset + count_bytes,
+ &PUSH_CONSTANT_CLEAR_ARRAY[0..size_to_write_words as usize],
+ );
+
+ count_words += size_to_write_words;
+ }
+}
+
+#[derive(Debug)]
+struct StateChange<T> {
+ last_state: Option<T>,
+}
+
+impl<T: Copy + PartialEq> StateChange<T> {
+ fn new() -> Self {
+ Self { last_state: None }
+ }
+ fn set_and_check_redundant(&mut self, new_state: T) -> bool {
+ let already_set = self.last_state == Some(new_state);
+ self.last_state = Some(new_state);
+ already_set
+ }
+ fn is_unset(&self) -> bool {
+ self.last_state.is_none()
+ }
+ fn reset(&mut self) {
+ self.last_state = None;
+ }
+}
+
+trait MapPassErr<T, O> {
+ fn map_pass_err(self, scope: PassErrorScope) -> Result<T, O>;
+}
+
+#[derive(Clone, Copy, Debug, Error)]
+pub enum PassErrorScope {
+ #[error("In a bundle parameter")]
+ Bundle,
+ #[error("In a pass parameter")]
+ Pass(id::CommandEncoderId),
+ #[error("In a set_bind_group command")]
+ SetBindGroup(id::BindGroupId),
+ #[error("In a set_pipeline command")]
+ SetPipelineRender(id::RenderPipelineId),
+ #[error("In a set_pipeline command")]
+ SetPipelineCompute(id::ComputePipelineId),
+ #[error("In a set_push_constant command")]
+ SetPushConstant,
+ #[error("In a set_vertex_buffer command")]
+ SetVertexBuffer(id::BufferId),
+ #[error("In a set_index_buffer command")]
+ SetIndexBuffer(id::BufferId),
+ #[error("In a set_viewport command")]
+ SetViewport,
+ #[error("In a set_scissor_rect command")]
+ SetScissorRect,
+ #[error("In a draw command")]
+ Draw,
+ #[error("In a draw_indexed command")]
+ DrawIndexed,
+ #[error("In a draw_indirect command")]
+ DrawIndirect,
+ #[error("In a draw_indexed_indirect command")]
+ DrawIndexedIndirect,
+ #[error("In a execute_bundle command")]
+ ExecuteBundle,
+ #[error("In a dispatch command")]
+ Dispatch,
+ #[error("In a dispatch_indirect command")]
+ DispatchIndirect,
+ #[error("In a pop_debug_group command")]
+ PopDebugGroup,
+}
diff --git a/gfx/wgpu/wgpu-core/src/command/render.rs b/gfx/wgpu/wgpu-core/src/command/render.rs
new file mode 100644
index 0000000000..531107956a
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/command/render.rs
@@ -0,0 +1,2078 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ binding_model::BindError,
+ command::{
+ bind::{Binder, LayoutChange},
+ BasePass, BasePassRef, CommandBuffer, CommandEncoderError, DrawError, ExecutionError,
+ MapPassErr, PassErrorScope, RenderCommand, RenderCommandError, StateChange,
+ },
+ conv,
+ device::{
+ AttachmentData, AttachmentDataVec, FramebufferKey, RenderPassCompatibilityError,
+ RenderPassContext, RenderPassKey, MAX_COLOR_TARGETS, MAX_VERTEX_BUFFERS,
+ },
+ hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
+ id,
+ pipeline::PipelineFlags,
+ resource::{BufferUse, TextureUse, TextureView, TextureViewInner},
+ span,
+ track::{TextureSelector, TrackerSet, UsageConflict},
+ validation::{
+ check_buffer_usage, check_texture_usage, MissingBufferUsageError, MissingTextureUsageError,
+ },
+ Stored, MAX_BIND_GROUPS,
+};
+
+use arrayvec::ArrayVec;
+use hal::command::CommandBuffer as _;
+use thiserror::Error;
+use wgt::{BufferAddress, BufferUsage, Color, IndexFormat, InputStepMode, TextureUsage};
+
+#[cfg(any(feature = "serial-pass", feature = "replay"))]
+use serde::Deserialize;
+#[cfg(any(feature = "serial-pass", feature = "trace"))]
+use serde::Serialize;
+
+use std::{
+ borrow::{Borrow, Cow},
+ collections::hash_map::Entry,
+ fmt, iter,
+ num::NonZeroU32,
+ ops::Range,
+ str,
+};
+
+/// Operation to perform to the output attachment at the start of a renderpass.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(any(feature = "serial-pass", feature = "trace"), derive(Serialize))]
+#[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))]
+pub enum LoadOp {
+ /// Clear the output attachment with the clear color. Clearing is faster than loading.
+ Clear = 0,
+ /// Do not clear output attachment.
+ Load = 1,
+}
+
+/// Operation to perform to the output attachment at the end of a renderpass.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(any(feature = "serial-pass", feature = "trace"), derive(Serialize))]
+#[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))]
+pub enum StoreOp {
+ /// Clear the render target. If you don't care about the contents of the target, this can be faster.
+ Clear = 0,
+ /// Store the result of the renderpass.
+ Store = 1,
+}
+
+/// Describes an individual channel within a render pass, such as color, depth, or stencil.
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq)]
+#[cfg_attr(any(feature = "serial-pass", feature = "trace"), derive(Serialize))]
+#[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))]
+pub struct PassChannel<V> {
+ /// Operation to perform to the output attachment at the start of a renderpass. This must be clear if it
+ /// is the first renderpass rendering to a swap chain image.
+ pub load_op: LoadOp,
+ /// Operation to perform to the output attachment at the end of a renderpass.
+ pub store_op: StoreOp,
+ /// If load_op is [`LoadOp::Clear`], the attachement will be cleared to this color.
+ pub clear_value: V,
+ /// If true, the relevant channel is not changed by a renderpass, and the corresponding attachment
+ /// can be used inside the pass by other read-only usages.
+ pub read_only: bool,
+}
+
+/// Describes a color attachment to a render pass.
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq)]
+#[cfg_attr(any(feature = "serial-pass", feature = "trace"), derive(Serialize))]
+#[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))]
+pub struct ColorAttachmentDescriptor {
+ /// The view to use as an attachment.
+ pub attachment: id::TextureViewId,
+ /// The view that will receive the resolved output if multisampling is used.
+ pub resolve_target: Option<id::TextureViewId>,
+ /// What operations will be performed on this color attachment.
+ pub channel: PassChannel<Color>,
+}
+
+/// Describes a depth/stencil attachment to a render pass.
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq)]
+#[cfg_attr(any(feature = "serial-pass", feature = "trace"), derive(Serialize))]
+#[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))]
+pub struct DepthStencilAttachmentDescriptor {
+ /// The view to use as an attachment.
+ pub attachment: id::TextureViewId,
+ /// What operations will be performed on the depth part of the attachment.
+ pub depth: PassChannel<f32>,
+ /// What operations will be performed on the stencil part of the attachment.
+ pub stencil: PassChannel<u32>,
+}
+
+impl DepthStencilAttachmentDescriptor {
+ fn is_read_only(&self, aspects: hal::format::Aspects) -> Result<bool, RenderPassErrorInner> {
+ if aspects.contains(hal::format::Aspects::DEPTH) && !self.depth.read_only {
+ return Ok(false);
+ }
+ if (self.depth.load_op, self.depth.store_op) != (LoadOp::Load, StoreOp::Store) {
+ return Err(RenderPassErrorInner::InvalidDepthOps);
+ }
+ if aspects.contains(hal::format::Aspects::STENCIL) && !self.stencil.read_only {
+ return Ok(false);
+ }
+ if (self.stencil.load_op, self.stencil.store_op) != (LoadOp::Load, StoreOp::Store) {
+ return Err(RenderPassErrorInner::InvalidStencilOps);
+ }
+ Ok(true)
+ }
+}
+
+/// Describes the attachments of a render pass.
+#[derive(Clone, Debug, Default, PartialEq)]
+pub struct RenderPassDescriptor<'a> {
+ /// The color attachments of the render pass.
+ pub color_attachments: Cow<'a, [ColorAttachmentDescriptor]>,
+ /// The depth and stencil attachment of the render pass, if any.
+ pub depth_stencil_attachment: Option<&'a DepthStencilAttachmentDescriptor>,
+}
+
+#[cfg_attr(feature = "serial-pass", derive(Deserialize, Serialize))]
+pub struct RenderPass {
+ base: BasePass<RenderCommand>,
+ parent_id: id::CommandEncoderId,
+ color_targets: ArrayVec<[ColorAttachmentDescriptor; MAX_COLOR_TARGETS]>,
+ depth_stencil_target: Option<DepthStencilAttachmentDescriptor>,
+}
+
+impl RenderPass {
+ pub fn new(parent_id: id::CommandEncoderId, desc: RenderPassDescriptor) -> Self {
+ Self {
+ base: BasePass::new(),
+ parent_id,
+ color_targets: desc.color_attachments.iter().cloned().collect(),
+ depth_stencil_target: desc.depth_stencil_attachment.cloned(),
+ }
+ }
+
+ pub fn parent_id(&self) -> id::CommandEncoderId {
+ self.parent_id
+ }
+
+ #[cfg(feature = "trace")]
+ pub fn into_command(self) -> crate::device::trace::Command {
+ crate::device::trace::Command::RunRenderPass {
+ base: self.base,
+ target_colors: self.color_targets.into_iter().collect(),
+ target_depth_stencil: self.depth_stencil_target,
+ }
+ }
+}
+
+impl fmt::Debug for RenderPass {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "RenderPass {{ encoder_id: {:?}, color_targets: {:?}, depth_stencil_target: {:?}, data: {:?} commands, {:?} dynamic offsets, and {:?} push constant u32s }}",
+ self.parent_id,
+ self.color_targets,
+ self.depth_stencil_target,
+ self.base.commands.len(),
+ self.base.dynamic_offsets.len(),
+ self.base.push_constant_data.len(),
+ )
+ }
+}
+
+#[derive(Debug, PartialEq)]
+enum OptionalState {
+ Unused,
+ Required,
+ Set,
+}
+
+impl OptionalState {
+ fn require(&mut self, require: bool) {
+ if require && *self == OptionalState::Unused {
+ *self = OptionalState::Required;
+ }
+ }
+}
+
+#[derive(Debug, Default)]
+struct IndexState {
+ bound_buffer_view: Option<(id::Valid<id::BufferId>, Range<BufferAddress>)>,
+ format: IndexFormat,
+ limit: u32,
+}
+
+impl IndexState {
+ fn update_limit(&mut self) {
+ self.limit = match self.bound_buffer_view {
+ Some((_, ref range)) => {
+ let shift = match self.format {
+ IndexFormat::Uint16 => 1,
+ IndexFormat::Uint32 => 2,
+ };
+ ((range.end - range.start) >> shift) as u32
+ }
+ None => 0,
+ }
+ }
+
+ fn reset(&mut self) {
+ self.bound_buffer_view = None;
+ self.limit = 0;
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+struct VertexBufferState {
+ total_size: BufferAddress,
+ stride: BufferAddress,
+ rate: InputStepMode,
+}
+
+impl VertexBufferState {
+ const EMPTY: Self = VertexBufferState {
+ total_size: 0,
+ stride: 0,
+ rate: InputStepMode::Vertex,
+ };
+}
+
+#[derive(Debug, Default)]
+struct VertexState {
+ inputs: ArrayVec<[VertexBufferState; MAX_VERTEX_BUFFERS]>,
+ vertex_limit: u32,
+ instance_limit: u32,
+}
+
+impl VertexState {
+ fn update_limits(&mut self) {
+ self.vertex_limit = !0;
+ self.instance_limit = !0;
+ for vbs in &self.inputs {
+ if vbs.stride == 0 {
+ continue;
+ }
+ let limit = (vbs.total_size / vbs.stride) as u32;
+ match vbs.rate {
+ InputStepMode::Vertex => self.vertex_limit = self.vertex_limit.min(limit),
+ InputStepMode::Instance => self.instance_limit = self.instance_limit.min(limit),
+ }
+ }
+ }
+
+ fn reset(&mut self) {
+ self.inputs.clear();
+ self.vertex_limit = 0;
+ self.instance_limit = 0;
+ }
+}
+
+#[derive(Debug)]
+struct State {
+ pipeline_flags: PipelineFlags,
+ binder: Binder,
+ blend_color: OptionalState,
+ stencil_reference: u32,
+ pipeline: StateChange<id::RenderPipelineId>,
+ index: IndexState,
+ vertex: VertexState,
+ debug_scope_depth: u32,
+}
+
+impl State {
+ fn is_ready(&self) -> Result<(), DrawError> {
+ //TODO: vertex buffers
+ let bind_mask = self.binder.invalid_mask();
+ if bind_mask != 0 {
+ //let (expected, provided) = self.binder.entries[index as usize].info();
+ return Err(DrawError::IncompatibleBindGroup {
+ index: bind_mask.trailing_zeros(),
+ });
+ }
+ if self.pipeline.is_unset() {
+ return Err(DrawError::MissingPipeline);
+ }
+ if self.blend_color == OptionalState::Required {
+ return Err(DrawError::MissingBlendColor);
+ }
+ Ok(())
+ }
+
+ /// Reset the `RenderBundle`-related states.
+ fn reset_bundle(&mut self) {
+ self.binder.reset();
+ self.pipeline.reset();
+ self.index.reset();
+ self.vertex.reset();
+ }
+}
+
+/// Error encountered when performing a render pass.
+#[derive(Clone, Debug, Error)]
+pub enum RenderPassErrorInner {
+ #[error(transparent)]
+ Encoder(#[from] CommandEncoderError),
+ #[error("attachment texture view {0:?} is invalid")]
+ InvalidAttachment(id::TextureViewId),
+ #[error("attachments have different sizes")]
+ MismatchAttachments,
+ #[error("attachment's sample count {0} is invalid")]
+ InvalidSampleCount(u8),
+ #[error("attachment with resolve target must be multi-sampled")]
+ InvalidResolveSourceSampleCount,
+ #[error("resolve target must have a sample count of 1")]
+ InvalidResolveTargetSampleCount,
+ #[error("not enough memory left")]
+ OutOfMemory,
+ #[error("extent state {state_extent:?} must match extent from view {view_extent:?}")]
+ ExtentStateMismatch {
+ state_extent: hal::image::Extent,
+ view_extent: hal::image::Extent,
+ },
+ #[error("attempted to use a swap chain image as a depth/stencil attachment")]
+ SwapChainImageAsDepthStencil,
+ #[error("unable to clear non-present/read-only depth")]
+ InvalidDepthOps,
+ #[error("unable to clear non-present/read-only stencil")]
+ InvalidStencilOps,
+ #[error("all attachments must have the same sample count, found {actual} != {expected}")]
+ SampleCountMismatch { actual: u8, expected: u8 },
+ #[error("texture view's swap chain must match swap chain in use")]
+ SwapChainMismatch,
+ #[error("setting `values_offset` to be `None` is only for internal use in render bundles")]
+ InvalidValuesOffset,
+ #[error("required device features not enabled: {0:?}")]
+ MissingDeviceFeatures(wgt::Features),
+ #[error("indirect draw with offset {offset}{} uses bytes {begin_offset}..{end_offset} which overruns indirect buffer of size {buffer_size}", count.map_or_else(String::new, |v| format!(" and count {}", v)))]
+ IndirectBufferOverrun {
+ offset: u64,
+ count: Option<NonZeroU32>,
+ begin_offset: u64,
+ end_offset: u64,
+ buffer_size: u64,
+ },
+ #[error("indirect draw uses bytes {begin_count_offset}..{end_count_offset} which overruns indirect buffer of size {count_buffer_size}")]
+ IndirectCountBufferOverrun {
+ begin_count_offset: u64,
+ end_count_offset: u64,
+ count_buffer_size: u64,
+ },
+ #[error("cannot pop debug group, because number of pushed debug groups is zero")]
+ InvalidPopDebugGroup,
+ #[error(transparent)]
+ ResourceUsageConflict(#[from] UsageConflict),
+ #[error("render bundle is incompatible, {0}")]
+ IncompatibleRenderBundle(#[from] RenderPassCompatibilityError),
+ #[error(transparent)]
+ RenderCommand(#[from] RenderCommandError),
+ #[error(transparent)]
+ Draw(#[from] DrawError),
+ #[error(transparent)]
+ Bind(#[from] BindError),
+}
+
+impl From<MissingBufferUsageError> for RenderPassErrorInner {
+ fn from(error: MissingBufferUsageError) -> Self {
+ Self::RenderCommand(error.into())
+ }
+}
+
+impl From<MissingTextureUsageError> for RenderPassErrorInner {
+ fn from(error: MissingTextureUsageError) -> Self {
+ Self::RenderCommand(error.into())
+ }
+}
+
+/// Error encountered when performing a render pass.
+#[derive(Clone, Debug, Error)]
+#[error("Render pass error {scope}: {inner}")]
+pub struct RenderPassError {
+ pub scope: PassErrorScope,
+ #[source]
+ inner: RenderPassErrorInner,
+}
+
+impl<T, E> MapPassErr<T, RenderPassError> for Result<T, E>
+where
+ E: Into<RenderPassErrorInner>,
+{
+ fn map_pass_err(self, scope: PassErrorScope) -> Result<T, RenderPassError> {
+ self.map_err(|inner| RenderPassError {
+ scope,
+ inner: inner.into(),
+ })
+ }
+}
+
+fn check_device_features(
+ actual: wgt::Features,
+ expected: wgt::Features,
+) -> Result<(), RenderPassErrorInner> {
+ if !actual.contains(expected) {
+ Err(RenderPassErrorInner::MissingDeviceFeatures(expected))
+ } else {
+ Ok(())
+ }
+}
+
+// Common routines between render/compute
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn command_encoder_run_render_pass<B: GfxBackend>(
+ &self,
+ encoder_id: id::CommandEncoderId,
+ pass: &RenderPass,
+ ) -> Result<(), RenderPassError> {
+ self.command_encoder_run_render_pass_impl::<B>(
+ encoder_id,
+ pass.base.as_ref(),
+ &pass.color_targets,
+ pass.depth_stencil_target.as_ref(),
+ )
+ }
+
+ #[doc(hidden)]
+ pub fn command_encoder_run_render_pass_impl<B: GfxBackend>(
+ &self,
+ encoder_id: id::CommandEncoderId,
+ mut base: BasePassRef<RenderCommand>,
+ color_attachments: &[ColorAttachmentDescriptor],
+ depth_stencil_attachment: Option<&DepthStencilAttachmentDescriptor>,
+ ) -> Result<(), RenderPassError> {
+ span!(_guard, INFO, "CommandEncoder::run_render_pass");
+ let scope = PassErrorScope::Pass(encoder_id);
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
+
+ let mut trackers = TrackerSet::new(B::VARIANT);
+ let cmd_buf =
+ CommandBuffer::get_encoder(&mut *cmb_guard, encoder_id).map_pass_err(scope)?;
+ let device = &device_guard[cmd_buf.device_id.value];
+ let mut raw = device.cmd_allocator.extend(cmd_buf);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref mut list) = cmd_buf.commands {
+ list.push(crate::device::trace::Command::RunRenderPass {
+ base: BasePass::from_ref(base),
+ target_colors: color_attachments.iter().cloned().collect(),
+ target_depth_stencil: depth_stencil_attachment.cloned(),
+ });
+ }
+
+ unsafe {
+ raw.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
+ }
+
+ let (bundle_guard, mut token) = hub.render_bundles.read(&mut token);
+ let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
+ let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
+ let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
+ let (buffer_guard, mut token) = hub.buffers.read(&mut token);
+ let (texture_guard, mut token) = hub.textures.read(&mut token);
+ let (view_guard, _) = hub.texture_views.read(&mut token);
+
+ // We default to false intentionally, even if depth-stencil isn't used at all.
+ // This allows us to use the primary raw pipeline in `RenderPipeline`,
+ // instead of the special read-only one, which would be `None`.
+ let mut is_ds_read_only = false;
+
+ struct RenderAttachment<'a> {
+ texture_id: &'a Stored<id::TextureId>,
+ selector: &'a TextureSelector,
+ previous_use: Option<TextureUse>,
+ new_use: TextureUse,
+ }
+ let mut render_attachments = AttachmentDataVec::<RenderAttachment>::new();
+
+ let mut attachment_width = None;
+ let mut attachment_height = None;
+ let mut valid_attachment = true;
+
+ let context = {
+ use hal::device::Device as _;
+
+ let sample_count_limit = device.hal_limits.framebuffer_color_sample_counts;
+ let base_trackers = &cmd_buf.trackers;
+
+ let mut extent = None;
+ let mut sample_count = 0;
+ let mut depth_stencil_aspects = hal::format::Aspects::empty();
+ let mut used_swap_chain = None::<Stored<id::SwapChainId>>;
+
+ let mut add_view = |view: &TextureView<B>| {
+ if let Some(ex) = extent {
+ if ex != view.extent {
+ return Err(RenderPassErrorInner::ExtentStateMismatch {
+ state_extent: ex,
+ view_extent: view.extent,
+ });
+ }
+ } else {
+ extent = Some(view.extent);
+ }
+ if sample_count == 0 {
+ sample_count = view.samples;
+ } else if sample_count != view.samples {
+ return Err(RenderPassErrorInner::SampleCountMismatch {
+ actual: view.samples,
+ expected: sample_count,
+ });
+ }
+ Ok(())
+ };
+
+ tracing::trace!(
+ "Encoding render pass begin in command buffer {:?}",
+ encoder_id
+ );
+ let rp_key = {
+ let depth_stencil = match depth_stencil_attachment {
+ Some(at) => {
+ let view = trackers
+ .views
+ .use_extend(&*view_guard, at.attachment, (), ())
+ .map_err(|_| RenderPassErrorInner::InvalidAttachment(at.attachment))
+ .map_pass_err(scope)?;
+ add_view(view).map_pass_err(scope)?;
+ depth_stencil_aspects = view.aspects;
+
+ let source_id = match view.inner {
+ TextureViewInner::Native { ref source_id, .. } => source_id,
+ TextureViewInner::SwapChain { .. } => {
+ return Err(RenderPassErrorInner::SwapChainImageAsDepthStencil)
+ .map_pass_err(scope)
+ }
+ };
+
+ // Using render pass for transition.
+ let previous_use = base_trackers
+ .textures
+ .query(source_id.value, view.selector.clone());
+ let new_use = if at.is_read_only(view.aspects).map_pass_err(scope)? {
+ is_ds_read_only = true;
+ TextureUse::ATTACHMENT_READ
+ } else {
+ TextureUse::ATTACHMENT_WRITE
+ };
+ render_attachments.push(RenderAttachment {
+ texture_id: source_id,
+ selector: &view.selector,
+ previous_use,
+ new_use,
+ });
+
+ let new_layout = conv::map_texture_state(new_use, view.aspects).1;
+ let old_layout = match previous_use {
+ Some(usage) => conv::map_texture_state(usage, view.aspects).1,
+ None => new_layout,
+ };
+
+ let ds_at = hal::pass::Attachment {
+ format: Some(conv::map_texture_format(
+ view.format,
+ device.private_features,
+ )),
+ samples: view.samples,
+ ops: conv::map_load_store_ops(&at.depth),
+ stencil_ops: conv::map_load_store_ops(&at.stencil),
+ layouts: old_layout..new_layout,
+ };
+ Some((ds_at, new_layout))
+ }
+ None => None,
+ };
+
+ let mut colors = ArrayVec::new();
+ let mut resolves = ArrayVec::new();
+
+ for at in color_attachments {
+ let view = trackers
+ .views
+ .use_extend(&*view_guard, at.attachment, (), ())
+ .map_err(|_| RenderPassErrorInner::InvalidAttachment(at.attachment))
+ .map_pass_err(scope)?;
+ add_view(view).map_pass_err(scope)?;
+
+ valid_attachment &= *attachment_width.get_or_insert(view.extent.width)
+ == view.extent.width
+ && *attachment_height.get_or_insert(view.extent.height)
+ == view.extent.height;
+
+ let layouts = match view.inner {
+ TextureViewInner::Native { ref source_id, .. } => {
+ let previous_use = base_trackers
+ .textures
+ .query(source_id.value, view.selector.clone());
+ let new_use = TextureUse::ATTACHMENT_WRITE;
+ render_attachments.push(RenderAttachment {
+ texture_id: source_id,
+ selector: &view.selector,
+ previous_use,
+ new_use,
+ });
+
+ let new_layout =
+ conv::map_texture_state(new_use, hal::format::Aspects::COLOR).1;
+ let old_layout = match previous_use {
+ Some(usage) => {
+ conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
+ }
+ None => new_layout,
+ };
+ old_layout..new_layout
+ }
+ TextureViewInner::SwapChain { ref source_id, .. } => {
+ if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain {
+ if source_id.value != sc_id.value {
+ return Err(RenderPassErrorInner::SwapChainMismatch)
+ .map_pass_err(scope);
+ }
+ } else {
+ assert!(used_swap_chain.is_none());
+ used_swap_chain = Some(source_id.clone());
+ }
+
+ let end = hal::image::Layout::Present;
+ let start = match at.channel.load_op {
+ LoadOp::Clear => hal::image::Layout::Undefined,
+ LoadOp::Load => end,
+ };
+ start..end
+ }
+ };
+
+ let color_at = hal::pass::Attachment {
+ format: Some(conv::map_texture_format(
+ view.format,
+ device.private_features,
+ )),
+ samples: view.samples,
+ ops: conv::map_load_store_ops(&at.channel),
+ stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
+ layouts,
+ };
+ colors.push((color_at, hal::image::Layout::ColorAttachmentOptimal));
+ }
+
+ if !valid_attachment {
+ return Err(RenderPassErrorInner::MismatchAttachments).map_pass_err(scope);
+ }
+
+ for resolve_target in color_attachments.iter().flat_map(|at| at.resolve_target) {
+ let view = trackers
+ .views
+ .use_extend(&*view_guard, resolve_target, (), ())
+ .map_err(|_| RenderPassErrorInner::InvalidAttachment(resolve_target))
+ .map_pass_err(scope)?;
+ if extent != Some(view.extent) {
+ return Err(RenderPassErrorInner::ExtentStateMismatch {
+ state_extent: extent.unwrap_or_default(),
+ view_extent: view.extent,
+ })
+ .map_pass_err(scope);
+ }
+ if view.samples != 1 {
+ return Err(RenderPassErrorInner::InvalidResolveTargetSampleCount)
+ .map_pass_err(scope);
+ }
+ if sample_count == 1 {
+ return Err(RenderPassErrorInner::InvalidResolveSourceSampleCount)
+ .map_pass_err(scope);
+ }
+
+ let layouts = match view.inner {
+ TextureViewInner::Native { ref source_id, .. } => {
+ let previous_use = base_trackers
+ .textures
+ .query(source_id.value, view.selector.clone());
+ let new_use = TextureUse::ATTACHMENT_WRITE;
+ render_attachments.push(RenderAttachment {
+ texture_id: source_id,
+ selector: &view.selector,
+ previous_use,
+ new_use,
+ });
+
+ let new_layout =
+ conv::map_texture_state(new_use, hal::format::Aspects::COLOR).1;
+ let old_layout = match previous_use {
+ Some(usage) => {
+ conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
+ }
+ None => new_layout,
+ };
+ old_layout..new_layout
+ }
+ TextureViewInner::SwapChain { ref source_id, .. } => {
+ if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain {
+ if source_id.value != sc_id.value {
+ return Err(RenderPassErrorInner::SwapChainMismatch)
+ .map_pass_err(scope);
+ }
+ } else {
+ assert!(used_swap_chain.is_none());
+ used_swap_chain = Some(source_id.clone());
+ }
+ hal::image::Layout::Undefined..hal::image::Layout::Present
+ }
+ };
+
+ let resolve_at = hal::pass::Attachment {
+ format: Some(conv::map_texture_format(
+ view.format,
+ device.private_features,
+ )),
+ samples: view.samples,
+ ops: hal::pass::AttachmentOps::new(
+ hal::pass::AttachmentLoadOp::DontCare,
+ hal::pass::AttachmentStoreOp::Store,
+ ),
+ stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
+ layouts,
+ };
+ resolves.push((resolve_at, hal::image::Layout::ColorAttachmentOptimal));
+ }
+
+ RenderPassKey {
+ colors,
+ resolves,
+ depth_stencil,
+ }
+ };
+
+ if sample_count & sample_count_limit == 0 {
+ return Err(RenderPassErrorInner::InvalidSampleCount(sample_count))
+ .map_pass_err(scope);
+ }
+
+ let mut render_pass_cache = device.render_passes.lock();
+ let render_pass = match render_pass_cache.entry(rp_key.clone()) {
+ Entry::Occupied(e) => e.into_mut(),
+ Entry::Vacant(entry) => {
+ let color_ids: [hal::pass::AttachmentRef; MAX_COLOR_TARGETS] = [
+ (0, hal::image::Layout::ColorAttachmentOptimal),
+ (1, hal::image::Layout::ColorAttachmentOptimal),
+ (2, hal::image::Layout::ColorAttachmentOptimal),
+ (3, hal::image::Layout::ColorAttachmentOptimal),
+ ];
+
+ let mut resolve_ids = ArrayVec::<[_; MAX_COLOR_TARGETS]>::new();
+ let mut attachment_index = color_attachments.len();
+ if color_attachments
+ .iter()
+ .any(|at| at.resolve_target.is_some())
+ {
+ for ((i, at), &(_, layout)) in color_attachments
+ .iter()
+ .enumerate()
+ .zip(entry.key().resolves.iter())
+ {
+ let real_attachment_index = match at.resolve_target {
+ Some(_) => attachment_index + i,
+ None => hal::pass::ATTACHMENT_UNUSED,
+ };
+ resolve_ids.push((real_attachment_index, layout));
+ }
+ attachment_index += color_attachments.len();
+ }
+
+ let depth_id = depth_stencil_attachment.map(|_| {
+ let usage = if is_ds_read_only {
+ TextureUse::ATTACHMENT_READ
+ } else {
+ TextureUse::ATTACHMENT_WRITE
+ };
+ (
+ attachment_index,
+ conv::map_texture_state(usage, depth_stencil_aspects).1,
+ )
+ });
+
+ let subpass = hal::pass::SubpassDesc {
+ colors: &color_ids[..color_attachments.len()],
+ resolves: &resolve_ids,
+ depth_stencil: depth_id.as_ref(),
+ inputs: &[],
+ preserves: &[],
+ };
+ let all = entry
+ .key()
+ .all()
+ .map(|(at, _)| at)
+ .collect::<AttachmentDataVec<_>>();
+
+ let pass =
+ unsafe { device.raw.create_render_pass(all, iter::once(subpass), &[]) }
+ .unwrap();
+ entry.insert(pass)
+ }
+ };
+
+ let mut framebuffer_cache;
+ let fb_key = FramebufferKey {
+ colors: color_attachments
+ .iter()
+ .map(|at| id::Valid(at.attachment))
+ .collect(),
+ resolves: color_attachments
+ .iter()
+ .filter_map(|at| at.resolve_target)
+ .map(id::Valid)
+ .collect(),
+ depth_stencil: depth_stencil_attachment.map(|at| id::Valid(at.attachment)),
+ };
+ let context = RenderPassContext {
+ attachments: AttachmentData {
+ colors: fb_key
+ .colors
+ .iter()
+ .map(|&at| view_guard[at].format)
+ .collect(),
+ resolves: fb_key
+ .resolves
+ .iter()
+ .map(|&at| view_guard[at].format)
+ .collect(),
+ depth_stencil: fb_key.depth_stencil.map(|at| view_guard[at].format),
+ },
+ sample_count,
+ };
+
+ let framebuffer = match used_swap_chain.take() {
+ Some(sc_id) => {
+ assert!(cmd_buf.used_swap_chain.is_none());
+ // Always create a new framebuffer and delete it after presentation.
+ let attachments = fb_key
+ .all()
+ .map(|&id| match view_guard[id].inner {
+ TextureViewInner::Native { ref raw, .. } => raw,
+ TextureViewInner::SwapChain { ref image, .. } => Borrow::borrow(image),
+ })
+ .collect::<AttachmentDataVec<_>>();
+ let framebuffer = unsafe {
+ device
+ .raw
+ .create_framebuffer(&render_pass, attachments, extent.unwrap())
+ .or(Err(RenderPassErrorInner::OutOfMemory))
+ .map_pass_err(scope)?
+ };
+ cmd_buf.used_swap_chain = Some((sc_id, framebuffer));
+ &mut cmd_buf.used_swap_chain.as_mut().unwrap().1
+ }
+ None => {
+ // Cache framebuffers by the device.
+ framebuffer_cache = device.framebuffers.lock();
+ match framebuffer_cache.entry(fb_key) {
+ Entry::Occupied(e) => e.into_mut(),
+ Entry::Vacant(e) => {
+ let fb = {
+ let attachments = e
+ .key()
+ .all()
+ .map(|&id| match view_guard[id].inner {
+ TextureViewInner::Native { ref raw, .. } => raw,
+ TextureViewInner::SwapChain { ref image, .. } => {
+ Borrow::borrow(image)
+ }
+ })
+ .collect::<AttachmentDataVec<_>>();
+ unsafe {
+ device
+ .raw
+ .create_framebuffer(
+ &render_pass,
+ attachments,
+ extent.unwrap(),
+ )
+ .or(Err(RenderPassErrorInner::OutOfMemory))
+ .map_pass_err(scope)?
+ }
+ };
+ e.insert(fb)
+ }
+ }
+ }
+ };
+
+ let rect = {
+ let ex = extent.unwrap();
+ hal::pso::Rect {
+ x: 0,
+ y: 0,
+ w: ex.width as _,
+ h: ex.height as _,
+ }
+ };
+
+ let clear_values = color_attachments
+ .iter()
+ .zip(&rp_key.colors)
+ .flat_map(|(at, (rat, _layout))| {
+ match at.channel.load_op {
+ LoadOp::Load => None,
+ LoadOp::Clear => {
+ use hal::format::ChannelType;
+ //TODO: validate sign/unsign and normalized ranges of the color values
+ let value = match rat.format.unwrap().base_format().1 {
+ ChannelType::Unorm
+ | ChannelType::Snorm
+ | ChannelType::Ufloat
+ | ChannelType::Sfloat
+ | ChannelType::Uscaled
+ | ChannelType::Sscaled
+ | ChannelType::Srgb => hal::command::ClearColor {
+ float32: conv::map_color_f32(&at.channel.clear_value),
+ },
+ ChannelType::Sint => hal::command::ClearColor {
+ sint32: conv::map_color_i32(&at.channel.clear_value),
+ },
+ ChannelType::Uint => hal::command::ClearColor {
+ uint32: conv::map_color_u32(&at.channel.clear_value),
+ },
+ };
+ Some(hal::command::ClearValue { color: value })
+ }
+ }
+ })
+ .chain(depth_stencil_attachment.and_then(|at| {
+ match (at.depth.load_op, at.stencil.load_op) {
+ (LoadOp::Load, LoadOp::Load) => None,
+ (LoadOp::Clear, _) | (_, LoadOp::Clear) => {
+ let value = hal::command::ClearDepthStencil {
+ depth: at.depth.clear_value,
+ stencil: at.stencil.clear_value,
+ };
+ Some(hal::command::ClearValue {
+ depth_stencil: value,
+ })
+ }
+ }
+ }))
+ .collect::<ArrayVec<[_; MAX_COLOR_TARGETS + 1]>>();
+
+ unsafe {
+ raw.begin_render_pass(
+ render_pass,
+ framebuffer,
+ rect,
+ clear_values,
+ hal::command::SubpassContents::Inline,
+ );
+ raw.set_scissors(0, iter::once(&rect));
+ raw.set_viewports(
+ 0,
+ iter::once(hal::pso::Viewport {
+ rect,
+ depth: 0.0..1.0,
+ }),
+ );
+ }
+
+ context
+ };
+
+ let mut state = State {
+ pipeline_flags: PipelineFlags::empty(),
+ binder: Binder::new(cmd_buf.limits.max_bind_groups),
+ blend_color: OptionalState::Unused,
+ stencil_reference: 0,
+ pipeline: StateChange::new(),
+ index: IndexState::default(),
+ vertex: VertexState::default(),
+ debug_scope_depth: 0,
+ };
+ let mut temp_offsets = Vec::new();
+
+ for command in base.commands {
+ match *command {
+ RenderCommand::SetBindGroup {
+ index,
+ num_dynamic_offsets,
+ bind_group_id,
+ } => {
+ let scope = PassErrorScope::SetBindGroup(bind_group_id);
+ let max_bind_groups = device.limits.max_bind_groups;
+ if (index as u32) >= max_bind_groups {
+ return Err(RenderCommandError::BindGroupIndexOutOfRange {
+ index,
+ max: max_bind_groups,
+ })
+ .map_pass_err(scope);
+ }
+
+ temp_offsets.clear();
+ temp_offsets
+ .extend_from_slice(&base.dynamic_offsets[..num_dynamic_offsets as usize]);
+ base.dynamic_offsets = &base.dynamic_offsets[num_dynamic_offsets as usize..];
+
+ let bind_group = trackers
+ .bind_groups
+ .use_extend(&*bind_group_guard, bind_group_id, (), ())
+ .unwrap();
+ bind_group
+ .validate_dynamic_bindings(&temp_offsets)
+ .map_pass_err(scope)?;
+
+ trackers
+ .merge_extend(&bind_group.used)
+ .map_pass_err(scope)?;
+
+ if let Some((pipeline_layout_id, follow_ups)) = state.binder.provide_entry(
+ index as usize,
+ id::Valid(bind_group_id),
+ bind_group,
+ &temp_offsets,
+ ) {
+ let bind_groups = iter::once(bind_group.raw.raw())
+ .chain(
+ follow_ups
+ .clone()
+ .map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()),
+ )
+ .collect::<ArrayVec<[_; MAX_BIND_GROUPS]>>();
+ temp_offsets.extend(follow_ups.flat_map(|(_, offsets)| offsets));
+ unsafe {
+ raw.bind_graphics_descriptor_sets(
+ &pipeline_layout_guard[pipeline_layout_id].raw,
+ index as usize,
+ bind_groups,
+ &temp_offsets,
+ );
+ }
+ };
+ }
+ RenderCommand::SetPipeline(pipeline_id) => {
+ let scope = PassErrorScope::SetPipelineRender(pipeline_id);
+ if state.pipeline.set_and_check_redundant(pipeline_id) {
+ continue;
+ }
+
+ let pipeline = trackers
+ .render_pipes
+ .use_extend(&*pipeline_guard, pipeline_id, (), ())
+ .map_err(|_| RenderCommandError::InvalidPipeline(pipeline_id))
+ .map_pass_err(scope)?;
+
+ context
+ .check_compatible(&pipeline.pass_context)
+ .map_err(RenderCommandError::IncompatiblePipeline)
+ .map_pass_err(scope)?;
+
+ state.pipeline_flags = pipeline.flags;
+
+ if pipeline.flags.contains(PipelineFlags::WRITES_DEPTH_STENCIL)
+ && is_ds_read_only
+ {
+ return Err(RenderCommandError::IncompatibleReadOnlyDepthStencil)
+ .map_pass_err(scope);
+ }
+
+ state
+ .blend_color
+ .require(pipeline.flags.contains(PipelineFlags::BLEND_COLOR));
+
+ unsafe {
+ raw.bind_graphics_pipeline(&pipeline.raw);
+ }
+
+ if pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE) {
+ unsafe {
+ raw.set_stencil_reference(
+ hal::pso::Face::all(),
+ state.stencil_reference,
+ );
+ }
+ }
+
+ // Rebind resource
+ if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) {
+ let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value];
+
+ state.binder.change_pipeline_layout(
+ &*pipeline_layout_guard,
+ pipeline.layout_id.value,
+ );
+
+ let mut is_compatible = true;
+
+ for (index, (entry, &bgl_id)) in state
+ .binder
+ .entries
+ .iter_mut()
+ .zip(&pipeline_layout.bind_group_layout_ids)
+ .enumerate()
+ {
+ match entry.expect_layout(bgl_id) {
+ LayoutChange::Match(bg_id, offsets) if is_compatible => {
+ let desc_set = bind_group_guard[bg_id].raw.raw();
+ unsafe {
+ raw.bind_graphics_descriptor_sets(
+ &pipeline_layout.raw,
+ index,
+ iter::once(desc_set),
+ offsets.iter().cloned(),
+ );
+ }
+ }
+ LayoutChange::Match(..) | LayoutChange::Unchanged => {}
+ LayoutChange::Mismatch => {
+ is_compatible = false;
+ }
+ }
+ }
+
+ // Clear push constant ranges
+ let non_overlapping = super::bind::compute_nonoverlapping_ranges(
+ &pipeline_layout.push_constant_ranges,
+ );
+ for range in non_overlapping {
+ let offset = range.range.start;
+ let size_bytes = range.range.end - offset;
+ super::push_constant_clear(
+ offset,
+ size_bytes,
+ |clear_offset, clear_data| unsafe {
+ raw.push_graphics_constants(
+ &pipeline_layout.raw,
+ conv::map_shader_stage_flags(range.stages),
+ clear_offset,
+ clear_data,
+ );
+ },
+ );
+ }
+ }
+
+ // Rebind index buffer if the index format has changed with the pipeline switch
+ if state.index.format != pipeline.index_format {
+ state.index.format = pipeline.index_format;
+ state.index.update_limit();
+
+ if let Some((buffer_id, ref range)) = state.index.bound_buffer_view {
+ let &(ref buffer, _) = buffer_guard[buffer_id].raw.as_ref().unwrap();
+
+ let range = hal::buffer::SubRange {
+ offset: range.start,
+ size: Some(range.end - range.start),
+ };
+ let index_type = conv::map_index_format(state.index.format);
+ unsafe {
+ raw.bind_index_buffer(buffer, range, index_type);
+ }
+ }
+ }
+ // Update vertex buffer limits
+ for (vbs, &(stride, rate)) in
+ state.vertex.inputs.iter_mut().zip(&pipeline.vertex_strides)
+ {
+ vbs.stride = stride;
+ vbs.rate = rate;
+ }
+ let vertex_strides_len = pipeline.vertex_strides.len();
+ for vbs in state.vertex.inputs.iter_mut().skip(vertex_strides_len) {
+ vbs.stride = 0;
+ vbs.rate = InputStepMode::Vertex;
+ }
+ state.vertex.update_limits();
+ }
+ RenderCommand::SetIndexBuffer {
+ buffer_id,
+ offset,
+ size,
+ } => {
+ let scope = PassErrorScope::SetIndexBuffer(buffer_id);
+ let buffer = trackers
+ .buffers
+ .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX)
+ .map_err(|e| RenderCommandError::Buffer(buffer_id, e))
+ .map_pass_err(scope)?;
+ check_buffer_usage(buffer.usage, BufferUsage::INDEX).map_pass_err(scope)?;
+ let &(ref buf_raw, _) = buffer
+ .raw
+ .as_ref()
+ .ok_or(RenderCommandError::DestroyedBuffer(buffer_id))
+ .map_pass_err(scope)?;
+
+ let end = match size {
+ Some(s) => offset + s.get(),
+ None => buffer.size,
+ };
+ state.index.bound_buffer_view = Some((id::Valid(buffer_id), offset..end));
+ state.index.update_limit();
+
+ let range = hal::buffer::SubRange {
+ offset,
+ size: Some(end - offset),
+ };
+ let index_type = conv::map_index_format(state.index.format);
+ unsafe {
+ raw.bind_index_buffer(buf_raw, range, index_type);
+ }
+ }
+ RenderCommand::SetVertexBuffer {
+ slot,
+ buffer_id,
+ offset,
+ size,
+ } => {
+ let scope = PassErrorScope::SetVertexBuffer(buffer_id);
+ let buffer = trackers
+ .buffers
+ .use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX)
+ .map_err(|e| RenderCommandError::Buffer(buffer_id, e))
+ .map_pass_err(scope)?;
+ check_buffer_usage(buffer.usage, BufferUsage::VERTEX).map_pass_err(scope)?;
+ let &(ref buf_raw, _) = buffer
+ .raw
+ .as_ref()
+ .ok_or(RenderCommandError::DestroyedBuffer(buffer_id))
+ .map_pass_err(scope)?;
+
+ let empty_slots = (1 + slot as usize).saturating_sub(state.vertex.inputs.len());
+ state
+ .vertex
+ .inputs
+ .extend(iter::repeat(VertexBufferState::EMPTY).take(empty_slots));
+ state.vertex.inputs[slot as usize].total_size = match size {
+ Some(s) => s.get(),
+ None => buffer.size - offset,
+ };
+
+ let range = hal::buffer::SubRange {
+ offset,
+ size: size.map(|s| s.get()),
+ };
+ unsafe {
+ raw.bind_vertex_buffers(slot, iter::once((buf_raw, range)));
+ }
+ state.vertex.update_limits();
+ }
+ RenderCommand::SetBlendColor(ref color) => {
+ state.blend_color = OptionalState::Set;
+ unsafe {
+ raw.set_blend_constants(conv::map_color_f32(color));
+ }
+ }
+ RenderCommand::SetStencilReference(value) => {
+ state.stencil_reference = value;
+ if state
+ .pipeline_flags
+ .contains(PipelineFlags::STENCIL_REFERENCE)
+ {
+ unsafe {
+ raw.set_stencil_reference(hal::pso::Face::all(), value);
+ }
+ }
+ }
+ RenderCommand::SetViewport {
+ ref rect,
+ depth_min,
+ depth_max,
+ } => {
+ let scope = PassErrorScope::SetViewport;
+ use std::{convert::TryFrom, i16};
+ if rect.w <= 0.0
+ || rect.h <= 0.0
+ || depth_min < 0.0
+ || depth_min > 1.0
+ || depth_max < 0.0
+ || depth_max > 1.0
+ {
+ return Err(RenderCommandError::InvalidViewport).map_pass_err(scope);
+ }
+ let r = hal::pso::Rect {
+ x: i16::try_from(rect.x.round() as i64).unwrap_or(0),
+ y: i16::try_from(rect.y.round() as i64).unwrap_or(0),
+ w: i16::try_from(rect.w.round() as i64).unwrap_or(i16::MAX),
+ h: i16::try_from(rect.h.round() as i64).unwrap_or(i16::MAX),
+ };
+ unsafe {
+ raw.set_viewports(
+ 0,
+ iter::once(hal::pso::Viewport {
+ rect: r,
+ depth: depth_min..depth_max,
+ }),
+ );
+ }
+ }
+ RenderCommand::SetPushConstant {
+ stages,
+ offset,
+ size_bytes,
+ values_offset,
+ } => {
+ let scope = PassErrorScope::SetPushConstant;
+ let values_offset = values_offset
+ .ok_or(RenderPassErrorInner::InvalidValuesOffset)
+ .map_pass_err(scope)?;
+
+ let end_offset_bytes = offset + size_bytes;
+ let values_end_offset =
+ (values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
+ let data_slice =
+ &base.push_constant_data[(values_offset as usize)..values_end_offset];
+
+ let pipeline_layout_id = state
+ .binder
+ .pipeline_layout_id
+ .ok_or(DrawError::MissingPipeline)
+ .map_pass_err(scope)?;
+ let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
+
+ pipeline_layout
+ .validate_push_constant_ranges(stages, offset, end_offset_bytes)
+ .map_err(RenderCommandError::from)
+ .map_pass_err(scope)?;
+
+ unsafe {
+ raw.push_graphics_constants(
+ &pipeline_layout.raw,
+ conv::map_shader_stage_flags(stages),
+ offset,
+ data_slice,
+ )
+ }
+ }
+ RenderCommand::SetScissor(ref rect) => {
+ let scope = PassErrorScope::SetScissorRect;
+ use std::{convert::TryFrom, i16};
+ if rect.w == 0
+ || rect.h == 0
+ || rect.x + rect.w > attachment_width.unwrap()
+ || rect.y + rect.h > attachment_height.unwrap()
+ {
+ return Err(RenderCommandError::InvalidScissorRect).map_pass_err(scope);
+ }
+ let r = hal::pso::Rect {
+ x: i16::try_from(rect.x).unwrap_or(0),
+ y: i16::try_from(rect.y).unwrap_or(0),
+ w: i16::try_from(rect.w).unwrap_or(i16::MAX),
+ h: i16::try_from(rect.h).unwrap_or(i16::MAX),
+ };
+ unsafe {
+ raw.set_scissors(0, iter::once(r));
+ }
+ }
+ RenderCommand::Draw {
+ vertex_count,
+ instance_count,
+ first_vertex,
+ first_instance,
+ } => {
+ let scope = PassErrorScope::Draw;
+ state.is_ready().map_pass_err(scope)?;
+ let last_vertex = first_vertex + vertex_count;
+ let vertex_limit = state.vertex.vertex_limit;
+ if last_vertex > vertex_limit {
+ return Err(DrawError::VertexBeyondLimit {
+ last_vertex,
+ vertex_limit,
+ })
+ .map_pass_err(scope);
+ }
+ let last_instance = first_instance + instance_count;
+ let instance_limit = state.vertex.instance_limit;
+ if last_instance > instance_limit {
+ return Err(DrawError::InstanceBeyondLimit {
+ last_instance,
+ instance_limit,
+ })
+ .map_pass_err(scope);
+ }
+
+ unsafe {
+ raw.draw(
+ first_vertex..first_vertex + vertex_count,
+ first_instance..first_instance + instance_count,
+ );
+ }
+ }
+ RenderCommand::DrawIndexed {
+ index_count,
+ instance_count,
+ first_index,
+ base_vertex,
+ first_instance,
+ } => {
+ let scope = PassErrorScope::DrawIndexed;
+ state.is_ready().map_pass_err(scope)?;
+
+ //TODO: validate that base_vertex + max_index() is within the provided range
+ let last_index = first_index + index_count;
+ let index_limit = state.index.limit;
+ if last_index > index_limit {
+ return Err(DrawError::IndexBeyondLimit {
+ last_index,
+ index_limit,
+ })
+ .map_pass_err(scope);
+ }
+ let last_instance = first_instance + instance_count;
+ let instance_limit = state.vertex.instance_limit;
+ if last_instance > instance_limit {
+ return Err(DrawError::InstanceBeyondLimit {
+ last_instance,
+ instance_limit,
+ })
+ .map_pass_err(scope);
+ }
+
+ unsafe {
+ raw.draw_indexed(
+ first_index..first_index + index_count,
+ base_vertex,
+ first_instance..first_instance + instance_count,
+ );
+ }
+ }
+ RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset,
+ count,
+ indexed,
+ } => {
+ let scope = if indexed {
+ PassErrorScope::DrawIndexedIndirect
+ } else {
+ PassErrorScope::DrawIndirect
+ };
+ state.is_ready().map_pass_err(scope)?;
+
+ let stride = match indexed {
+ false => 16,
+ true => 20,
+ };
+
+ if count.is_some() {
+ check_device_features(device.features, wgt::Features::MULTI_DRAW_INDIRECT)
+ .map_pass_err(scope)?;
+ }
+
+ let indirect_buffer = trackers
+ .buffers
+ .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
+ .map_err(|e| RenderCommandError::Buffer(buffer_id, e))
+ .map_pass_err(scope)?;
+ check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)
+ .map_pass_err(scope)?;
+ let &(ref indirect_raw, _) = indirect_buffer
+ .raw
+ .as_ref()
+ .ok_or(RenderCommandError::DestroyedBuffer(buffer_id))
+ .map_pass_err(scope)?;
+
+ let actual_count = count.map_or(1, |c| c.get());
+
+ let begin_offset = offset;
+ let end_offset = offset + stride * actual_count as u64;
+ if end_offset > indirect_buffer.size {
+ return Err(RenderPassErrorInner::IndirectBufferOverrun {
+ offset,
+ count,
+ begin_offset,
+ end_offset,
+ buffer_size: indirect_buffer.size,
+ })
+ .map_pass_err(scope);
+ }
+
+ match indexed {
+ false => unsafe {
+ raw.draw_indirect(indirect_raw, offset, actual_count, stride as u32);
+ },
+ true => unsafe {
+ raw.draw_indexed_indirect(
+ indirect_raw,
+ offset,
+ actual_count,
+ stride as u32,
+ );
+ },
+ }
+ }
+ RenderCommand::MultiDrawIndirectCount {
+ buffer_id,
+ offset,
+ count_buffer_id,
+ count_buffer_offset,
+ max_count,
+ indexed,
+ } => {
+ let scope = if indexed {
+ PassErrorScope::DrawIndexedIndirect
+ } else {
+ PassErrorScope::DrawIndirect
+ };
+ state.is_ready().map_pass_err(scope)?;
+
+ let stride = match indexed {
+ false => 16,
+ true => 20,
+ };
+
+ check_device_features(
+ device.features,
+ wgt::Features::MULTI_DRAW_INDIRECT_COUNT,
+ )
+ .map_pass_err(scope)?;
+
+ let indirect_buffer = trackers
+ .buffers
+ .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
+ .map_err(|e| RenderCommandError::Buffer(buffer_id, e))
+ .map_pass_err(scope)?;
+ check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)
+ .map_pass_err(scope)?;
+ let &(ref indirect_raw, _) = indirect_buffer
+ .raw
+ .as_ref()
+ .ok_or(RenderCommandError::DestroyedBuffer(buffer_id))
+ .map_pass_err(scope)?;
+
+ let count_buffer = trackers
+ .buffers
+ .use_extend(&*buffer_guard, count_buffer_id, (), BufferUse::INDIRECT)
+ .map_err(|e| RenderCommandError::Buffer(count_buffer_id, e))
+ .map_pass_err(scope)?;
+ check_buffer_usage(count_buffer.usage, BufferUsage::INDIRECT)
+ .map_pass_err(scope)?;
+ let &(ref count_raw, _) = count_buffer
+ .raw
+ .as_ref()
+ .ok_or(RenderCommandError::DestroyedBuffer(count_buffer_id))
+ .map_pass_err(scope)?;
+
+ let begin_offset = offset;
+ let end_offset = offset + stride * max_count as u64;
+ if end_offset > indirect_buffer.size {
+ return Err(RenderPassErrorInner::IndirectBufferOverrun {
+ offset,
+ count: None,
+ begin_offset,
+ end_offset,
+ buffer_size: indirect_buffer.size,
+ })
+ .map_pass_err(scope);
+ }
+
+ let begin_count_offset = count_buffer_offset;
+ let end_count_offset = count_buffer_offset + 4;
+ if end_count_offset > count_buffer.size {
+ return Err(RenderPassErrorInner::IndirectCountBufferOverrun {
+ begin_count_offset,
+ end_count_offset,
+ count_buffer_size: count_buffer.size,
+ })
+ .map_pass_err(scope);
+ }
+
+ match indexed {
+ false => unsafe {
+ raw.draw_indirect_count(
+ indirect_raw,
+ offset,
+ count_raw,
+ count_buffer_offset,
+ max_count,
+ stride as u32,
+ );
+ },
+ true => unsafe {
+ raw.draw_indexed_indirect_count(
+ indirect_raw,
+ offset,
+ count_raw,
+ count_buffer_offset,
+ max_count,
+ stride as u32,
+ );
+ },
+ }
+ }
+ RenderCommand::PushDebugGroup { color, len } => {
+ state.debug_scope_depth += 1;
+ let label = str::from_utf8(&base.string_data[..len]).unwrap();
+ unsafe {
+ raw.begin_debug_marker(label, color);
+ }
+ base.string_data = &base.string_data[len..];
+ }
+ RenderCommand::PopDebugGroup => {
+ let scope = PassErrorScope::PopDebugGroup;
+ if state.debug_scope_depth == 0 {
+ return Err(RenderPassErrorInner::InvalidPopDebugGroup).map_pass_err(scope);
+ }
+ state.debug_scope_depth -= 1;
+ unsafe {
+ raw.end_debug_marker();
+ }
+ }
+ RenderCommand::InsertDebugMarker { color, len } => {
+ let label = str::from_utf8(&base.string_data[..len]).unwrap();
+ unsafe {
+ raw.insert_debug_marker(label, color);
+ }
+ base.string_data = &base.string_data[len..];
+ }
+ RenderCommand::ExecuteBundle(bundle_id) => {
+ let scope = PassErrorScope::ExecuteBundle;
+ let bundle = trackers
+ .bundles
+ .use_extend(&*bundle_guard, bundle_id, (), ())
+ .unwrap();
+
+ context
+ .check_compatible(&bundle.context)
+ .map_err(RenderPassErrorInner::IncompatibleRenderBundle)
+ .map_pass_err(scope)?;
+
+ unsafe {
+ bundle.execute(
+ &mut raw,
+ &*pipeline_layout_guard,
+ &*bind_group_guard,
+ &*pipeline_guard,
+ &*buffer_guard,
+ )
+ }
+ .map_err(|e| match e {
+ ExecutionError::DestroyedBuffer(id) => {
+ RenderCommandError::DestroyedBuffer(id)
+ }
+ })
+ .map_pass_err(scope)?;
+
+ trackers.merge_extend(&bundle.used).map_pass_err(scope)?;
+ state.reset_bundle();
+ }
+ }
+ }
+
+ tracing::trace!("Merging {:?} with the render pass", encoder_id);
+ unsafe {
+ raw.end_render_pass();
+ }
+
+ for ra in render_attachments {
+ let texture = &texture_guard[ra.texture_id.value];
+ check_texture_usage(texture.usage, TextureUsage::RENDER_ATTACHMENT)
+ .map_pass_err(scope)?;
+
+ // the tracker set of the pass is always in "extend" mode
+ trackers
+ .textures
+ .change_extend(
+ ra.texture_id.value,
+ &ra.texture_id.ref_count,
+ ra.selector.clone(),
+ ra.new_use,
+ )
+ .unwrap();
+
+ if let Some(usage) = ra.previous_use {
+ // Make the attachment tracks to be aware of the internal
+ // transition done by the render pass, by registering the
+ // previous usage as the initial state.
+ trackers
+ .textures
+ .prepend(
+ ra.texture_id.value,
+ &ra.texture_id.ref_count,
+ ra.selector.clone(),
+ usage,
+ )
+ .unwrap();
+ }
+ }
+
+ super::CommandBuffer::insert_barriers(
+ cmd_buf.raw.last_mut().unwrap(),
+ &mut cmd_buf.trackers,
+ &trackers,
+ &*buffer_guard,
+ &*texture_guard,
+ );
+ unsafe {
+ cmd_buf.raw.last_mut().unwrap().finish();
+ }
+ cmd_buf.raw.push(raw);
+
+ Ok(())
+ }
+}
+
+pub mod render_ffi {
+ use super::{
+ super::{Rect, RenderCommand},
+ RenderPass,
+ };
+ use crate::{id, span, RawString};
+ use std::{convert::TryInto, ffi, num::NonZeroU32, slice};
+ use wgt::{BufferAddress, BufferSize, Color, DynamicOffset};
+
+ /// # Safety
+ ///
+ /// This function is unsafe as there is no guarantee that the given pointer is
+ /// valid for `offset_length` elements.
+ // TODO: There might be other safety issues, such as using the unsafe
+ // `RawPass::encode` and `RawPass::encode_slice`.
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_render_pass_set_bind_group(
+ pass: &mut RenderPass,
+ index: u32,
+ bind_group_id: id::BindGroupId,
+ offsets: *const DynamicOffset,
+ offset_length: usize,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::set_bind_group");
+ pass.base.commands.push(RenderCommand::SetBindGroup {
+ index: index.try_into().unwrap(),
+ num_dynamic_offsets: offset_length.try_into().unwrap(),
+ bind_group_id,
+ });
+ pass.base
+ .dynamic_offsets
+ .extend_from_slice(slice::from_raw_parts(offsets, offset_length));
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_set_pipeline(
+ pass: &mut RenderPass,
+ pipeline_id: id::RenderPipelineId,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::set_pipeline");
+ pass.base
+ .commands
+ .push(RenderCommand::SetPipeline(pipeline_id));
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_set_index_buffer(
+ pass: &mut RenderPass,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::set_index_buffer");
+ pass.base.commands.push(RenderCommand::SetIndexBuffer {
+ buffer_id,
+ offset,
+ size,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_set_vertex_buffer(
+ pass: &mut RenderPass,
+ slot: u32,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::set_vertex_buffer");
+ pass.base.commands.push(RenderCommand::SetVertexBuffer {
+ slot,
+ buffer_id,
+ offset,
+ size,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_set_blend_color(pass: &mut RenderPass, color: &Color) {
+ span!(_guard, DEBUG, "RenderPass::set_blend_color");
+ pass.base
+ .commands
+ .push(RenderCommand::SetBlendColor(*color));
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_set_stencil_reference(pass: &mut RenderPass, value: u32) {
+ span!(_guard, DEBUG, "RenderPass::set_stencil_buffer");
+ pass.base
+ .commands
+ .push(RenderCommand::SetStencilReference(value));
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_set_viewport(
+ pass: &mut RenderPass,
+ x: f32,
+ y: f32,
+ w: f32,
+ h: f32,
+ depth_min: f32,
+ depth_max: f32,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::set_viewport");
+ pass.base.commands.push(RenderCommand::SetViewport {
+ rect: Rect { x, y, w, h },
+ depth_min,
+ depth_max,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_set_scissor_rect(
+ pass: &mut RenderPass,
+ x: u32,
+ y: u32,
+ w: u32,
+ h: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::set_scissor_rect");
+ pass.base
+ .commands
+ .push(RenderCommand::SetScissor(Rect { x, y, w, h }));
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_render_pass_set_push_constants(
+ pass: &mut RenderPass,
+ stages: wgt::ShaderStage,
+ offset: u32,
+ size_bytes: u32,
+ data: *const u8,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::set_push_constants");
+ assert_eq!(
+ offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
+ 0,
+ "Push constant offset must be aligned to 4 bytes."
+ );
+ assert_eq!(
+ size_bytes & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
+ 0,
+ "Push constant size must be aligned to 4 bytes."
+ );
+ let data_slice = slice::from_raw_parts(data, size_bytes as usize);
+ let value_offset = pass.base.push_constant_data.len().try_into().expect(
+ "Ran out of push constant space. Don't set 4gb of push constants per RenderPass.",
+ );
+
+ pass.base.push_constant_data.extend(
+ data_slice
+ .chunks_exact(wgt::PUSH_CONSTANT_ALIGNMENT as usize)
+ .map(|arr| u32::from_ne_bytes([arr[0], arr[1], arr[2], arr[3]])),
+ );
+
+ pass.base.commands.push(RenderCommand::SetPushConstant {
+ stages,
+ offset,
+ size_bytes,
+ values_offset: Some(value_offset),
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_draw(
+ pass: &mut RenderPass,
+ vertex_count: u32,
+ instance_count: u32,
+ first_vertex: u32,
+ first_instance: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::draw");
+ pass.base.commands.push(RenderCommand::Draw {
+ vertex_count,
+ instance_count,
+ first_vertex,
+ first_instance,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_draw_indexed(
+ pass: &mut RenderPass,
+ index_count: u32,
+ instance_count: u32,
+ first_index: u32,
+ base_vertex: i32,
+ first_instance: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::draw_indexed");
+ pass.base.commands.push(RenderCommand::DrawIndexed {
+ index_count,
+ instance_count,
+ first_index,
+ base_vertex,
+ first_instance,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_draw_indirect(
+ pass: &mut RenderPass,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::draw_indirect");
+ pass.base.commands.push(RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset,
+ count: None,
+ indexed: false,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_draw_indexed_indirect(
+ pass: &mut RenderPass,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::draw_indexed_indirect");
+ pass.base.commands.push(RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset,
+ count: None,
+ indexed: true,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_multi_draw_indirect(
+ pass: &mut RenderPass,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ count: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::multi_draw_indirect");
+ pass.base.commands.push(RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset,
+ count: NonZeroU32::new(count),
+ indexed: false,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_multi_draw_indexed_indirect(
+ pass: &mut RenderPass,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ count: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::multi_draw_indexed_indirect");
+ pass.base.commands.push(RenderCommand::MultiDrawIndirect {
+ buffer_id,
+ offset,
+ count: NonZeroU32::new(count),
+ indexed: true,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_multi_draw_indirect_count(
+ pass: &mut RenderPass,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ count_buffer_id: id::BufferId,
+ count_buffer_offset: BufferAddress,
+ max_count: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::multi_draw_indirect_count");
+ pass.base
+ .commands
+ .push(RenderCommand::MultiDrawIndirectCount {
+ buffer_id,
+ offset,
+ count_buffer_id,
+ count_buffer_offset,
+ max_count,
+ indexed: false,
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_multi_draw_indexed_indirect_count(
+ pass: &mut RenderPass,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ count_buffer_id: id::BufferId,
+ count_buffer_offset: BufferAddress,
+ max_count: u32,
+ ) {
+ span!(
+ _guard,
+ DEBUG,
+ "RenderPass::multi_draw_indexed_indirect_count"
+ );
+ pass.base
+ .commands
+ .push(RenderCommand::MultiDrawIndirectCount {
+ buffer_id,
+ offset,
+ count_buffer_id,
+ count_buffer_offset,
+ max_count,
+ indexed: true,
+ });
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_render_pass_push_debug_group(
+ pass: &mut RenderPass,
+ label: RawString,
+ color: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::push_debug_group");
+ let bytes = ffi::CStr::from_ptr(label).to_bytes();
+ pass.base.string_data.extend_from_slice(bytes);
+
+ pass.base.commands.push(RenderCommand::PushDebugGroup {
+ color,
+ len: bytes.len(),
+ });
+ }
+
+ #[no_mangle]
+ pub extern "C" fn wgpu_render_pass_pop_debug_group(pass: &mut RenderPass) {
+ span!(_guard, DEBUG, "RenderPass::pop_debug_group");
+ pass.base.commands.push(RenderCommand::PopDebugGroup);
+ }
+
+ #[no_mangle]
+ pub unsafe extern "C" fn wgpu_render_pass_insert_debug_marker(
+ pass: &mut RenderPass,
+ label: RawString,
+ color: u32,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::insert_debug_marker");
+ let bytes = ffi::CStr::from_ptr(label).to_bytes();
+ pass.base.string_data.extend_from_slice(bytes);
+
+ pass.base.commands.push(RenderCommand::InsertDebugMarker {
+ color,
+ len: bytes.len(),
+ });
+ }
+
+ #[no_mangle]
+ pub unsafe fn wgpu_render_pass_execute_bundles(
+ pass: &mut RenderPass,
+ render_bundle_ids: *const id::RenderBundleId,
+ render_bundle_ids_length: usize,
+ ) {
+ span!(_guard, DEBUG, "RenderPass::execute_bundles");
+ for &bundle_id in slice::from_raw_parts(render_bundle_ids, render_bundle_ids_length) {
+ pass.base
+ .commands
+ .push(RenderCommand::ExecuteBundle(bundle_id));
+ }
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/command/transfer.rs b/gfx/wgpu/wgpu-core/src/command/transfer.rs
new file mode 100644
index 0000000000..98ab294cfc
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/command/transfer.rs
@@ -0,0 +1,789 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#[cfg(feature = "trace")]
+use crate::device::trace::Command as TraceCommand;
+use crate::{
+ command::{CommandBuffer, CommandEncoderError},
+ conv,
+ device::{all_buffer_stages, all_image_stages},
+ hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
+ id::{BufferId, CommandEncoderId, TextureId},
+ resource::{BufferUse, Texture, TextureErrorDimension, TextureUse},
+ span,
+ track::TextureSelector,
+};
+
+use hal::command::CommandBuffer as _;
+use thiserror::Error;
+use wgt::{BufferAddress, BufferUsage, Extent3d, TextureUsage};
+
+use std::iter;
+
+pub(crate) const BITS_PER_BYTE: u32 = 8;
+
+pub type BufferCopyView = wgt::BufferCopyView<BufferId>;
+pub type TextureCopyView = wgt::TextureCopyView<TextureId>;
+
+#[derive(Clone, Debug)]
+pub enum CopySide {
+ Source,
+ Destination,
+}
+
+/// Error encountered while attempting a data transfer.
+#[derive(Clone, Debug, Error)]
+pub enum TransferError {
+ #[error("buffer {0:?} is invalid or destroyed")]
+ InvalidBuffer(BufferId),
+ #[error("texture {0:?} is invalid or destroyed")]
+ InvalidTexture(TextureId),
+ #[error("Source and destination cannot be the same buffer")]
+ SameSourceDestinationBuffer,
+ #[error("source buffer/texture is missing the `COPY_SRC` usage flag")]
+ MissingCopySrcUsageFlag,
+ #[error("destination buffer/texture is missing the `COPY_DST` usage flag")]
+ MissingCopyDstUsageFlag(Option<BufferId>, Option<TextureId>),
+ #[error("copy of {start_offset}..{end_offset} would end up overruning the bounds of the {side:?} buffer of size {buffer_size}")]
+ BufferOverrun {
+ start_offset: BufferAddress,
+ end_offset: BufferAddress,
+ buffer_size: BufferAddress,
+ side: CopySide,
+ },
+ #[error("copy of {dimension:?} {start_offset}..{end_offset} would end up overruning the bounds of the {side:?} texture of {dimension:?} size {texture_size}")]
+ TextureOverrun {
+ start_offset: u32,
+ end_offset: u32,
+ texture_size: u32,
+ dimension: TextureErrorDimension,
+ side: CopySide,
+ },
+ #[error("buffer offset {0} is not aligned to block size or `COPY_BUFFER_ALIGNMENT`")]
+ UnalignedBufferOffset(BufferAddress),
+ #[error("copy size {0} does not respect `COPY_BUFFER_ALIGNMENT`")]
+ UnalignedCopySize(BufferAddress),
+ #[error("copy width is not a multiple of block width")]
+ UnalignedCopyWidth,
+ #[error("copy height is not a multiple of block height")]
+ UnalignedCopyHeight,
+ #[error("copy origin's x component is not a multiple of block width")]
+ UnalignedCopyOriginX,
+ #[error("copy origin's y component is not a multiple of block height")]
+ UnalignedCopyOriginY,
+ #[error("bytes per row does not respect `COPY_BYTES_PER_ROW_ALIGNMENT`")]
+ UnalignedBytesPerRow,
+ #[error("number of rows per image is not a multiple of block height")]
+ UnalignedRowsPerImage,
+ #[error("number of bytes per row is less than the number of bytes in a complete row")]
+ InvalidBytesPerRow,
+ #[error("image is 1D and the copy height and depth are not both set to 1")]
+ InvalidCopySize,
+ #[error("number of rows per image is invalid")]
+ InvalidRowsPerImage,
+ #[error("source and destination layers have different aspects")]
+ MismatchedAspects,
+ #[error("copying from textures with format {0:?} is forbidden")]
+ CopyFromForbiddenTextureFormat(wgt::TextureFormat),
+ #[error("copying to textures with format {0:?} is forbidden")]
+ CopyToForbiddenTextureFormat(wgt::TextureFormat),
+}
+
+/// Error encountered while attempting to do a copy on a command encoder.
+#[derive(Clone, Debug, Error)]
+pub enum CopyError {
+ #[error(transparent)]
+ Encoder(#[from] CommandEncoderError),
+ #[error("Copy error")]
+ Transfer(#[from] TransferError),
+}
+
+//TODO: we currently access each texture twice for a transfer,
+// once only to get the aspect flags, which is unfortunate.
+pub(crate) fn texture_copy_view_to_hal<B: hal::Backend>(
+ view: &TextureCopyView,
+ size: &Extent3d,
+ texture_guard: &Storage<Texture<B>, TextureId>,
+) -> Result<
+ (
+ hal::image::SubresourceLayers,
+ TextureSelector,
+ hal::image::Offset,
+ ),
+ TransferError,
+> {
+ let texture = texture_guard
+ .get(view.texture)
+ .map_err(|_| TransferError::InvalidTexture(view.texture))?;
+
+ let level = view.mip_level as hal::image::Level;
+ let (layer, layer_count, z) = match texture.dimension {
+ wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => (
+ view.origin.z as hal::image::Layer,
+ size.depth as hal::image::Layer,
+ 0,
+ ),
+ wgt::TextureDimension::D3 => (0, 1, view.origin.z as i32),
+ };
+
+ // TODO: Can't satisfy clippy here unless we modify
+ // `TextureSelector` to use `std::ops::RangeBounds`.
+ #[allow(clippy::range_plus_one)]
+ Ok((
+ hal::image::SubresourceLayers {
+ aspects: texture.aspects,
+ level,
+ layers: layer..layer + layer_count,
+ },
+ TextureSelector {
+ levels: level..level + 1,
+ layers: layer..layer + layer_count,
+ },
+ hal::image::Offset {
+ x: view.origin.x as i32,
+ y: view.origin.y as i32,
+ z,
+ },
+ ))
+}
+
+/// Function copied with minor modifications from webgpu standard https://gpuweb.github.io/gpuweb/#valid-texture-copy-range
+pub(crate) fn validate_linear_texture_data(
+ layout: &wgt::TextureDataLayout,
+ format: wgt::TextureFormat,
+ buffer_size: BufferAddress,
+ buffer_side: CopySide,
+ bytes_per_block: BufferAddress,
+ copy_size: &Extent3d,
+) -> Result<(), TransferError> {
+ // Convert all inputs to BufferAddress (u64) to prevent overflow issues
+ let copy_width = copy_size.width as BufferAddress;
+ let copy_height = copy_size.height as BufferAddress;
+ let copy_depth = copy_size.depth as BufferAddress;
+
+ let offset = layout.offset;
+ let rows_per_image = layout.rows_per_image as BufferAddress;
+ let bytes_per_row = layout.bytes_per_row as BufferAddress;
+
+ let (block_width, block_height) = conv::texture_block_size(format);
+ let block_width = block_width as BufferAddress;
+ let block_height = block_height as BufferAddress;
+ let block_size = bytes_per_block;
+
+ if copy_width % block_width != 0 {
+ return Err(TransferError::UnalignedCopyWidth);
+ }
+ if copy_height % block_height != 0 {
+ return Err(TransferError::UnalignedCopyHeight);
+ }
+ if rows_per_image % block_height != 0 {
+ return Err(TransferError::UnalignedRowsPerImage);
+ }
+
+ let bytes_in_a_complete_row = block_size * copy_width / block_width;
+ let required_bytes_in_copy = if copy_width == 0 || copy_height == 0 || copy_depth == 0 {
+ 0
+ } else {
+ let actual_rows_per_image = if rows_per_image == 0 {
+ copy_height
+ } else {
+ rows_per_image
+ };
+ let texel_block_rows_per_image = actual_rows_per_image / block_height;
+ let bytes_per_image = bytes_per_row * texel_block_rows_per_image;
+ let bytes_in_last_slice =
+ bytes_per_row * (copy_height / block_height - 1) + bytes_in_a_complete_row;
+ bytes_per_image * (copy_depth - 1) + bytes_in_last_slice
+ };
+
+ if rows_per_image != 0 && rows_per_image < copy_height {
+ return Err(TransferError::InvalidRowsPerImage);
+ }
+ if offset + required_bytes_in_copy > buffer_size {
+ return Err(TransferError::BufferOverrun {
+ start_offset: offset,
+ end_offset: offset + required_bytes_in_copy,
+ buffer_size,
+ side: buffer_side,
+ });
+ }
+ if offset % block_size != 0 {
+ return Err(TransferError::UnalignedBufferOffset(offset));
+ }
+ if copy_height > 1 && bytes_per_row < bytes_in_a_complete_row {
+ return Err(TransferError::InvalidBytesPerRow);
+ }
+ if copy_depth > 1 && rows_per_image == 0 {
+ return Err(TransferError::InvalidRowsPerImage);
+ }
+ Ok(())
+}
+
+/// Function copied with minor modifications from webgpu standard https://gpuweb.github.io/gpuweb/#valid-texture-copy-range
+pub(crate) fn validate_texture_copy_range(
+ texture_copy_view: &TextureCopyView,
+ texture_format: wgt::TextureFormat,
+ texture_dimension: hal::image::Kind,
+ texture_side: CopySide,
+ copy_size: &Extent3d,
+) -> Result<(), TransferError> {
+ let (block_width, block_height) = conv::texture_block_size(texture_format);
+
+ let mut extent = texture_dimension.level_extent(texture_copy_view.mip_level as u8);
+ match texture_dimension {
+ hal::image::Kind::D1(..) => {
+ if (copy_size.height, copy_size.depth) != (1, 1) {
+ return Err(TransferError::InvalidCopySize);
+ }
+ }
+ hal::image::Kind::D2(_, _, array_layers, _) => {
+ extent.depth = array_layers as u32;
+ }
+ hal::image::Kind::D3(..) => {}
+ };
+
+ let x_copy_max = texture_copy_view.origin.x + copy_size.width;
+ if x_copy_max > extent.width {
+ return Err(TransferError::TextureOverrun {
+ start_offset: texture_copy_view.origin.x,
+ end_offset: x_copy_max,
+ texture_size: extent.width,
+ dimension: TextureErrorDimension::X,
+ side: texture_side,
+ });
+ }
+ let y_copy_max = texture_copy_view.origin.y + copy_size.height;
+ if y_copy_max > extent.height {
+ return Err(TransferError::TextureOverrun {
+ start_offset: texture_copy_view.origin.y,
+ end_offset: y_copy_max,
+ texture_size: extent.height,
+ dimension: TextureErrorDimension::Y,
+ side: texture_side,
+ });
+ }
+ let z_copy_max = texture_copy_view.origin.z + copy_size.depth;
+ if z_copy_max > extent.depth {
+ return Err(TransferError::TextureOverrun {
+ start_offset: texture_copy_view.origin.z,
+ end_offset: z_copy_max,
+ texture_size: extent.depth,
+ dimension: TextureErrorDimension::Z,
+ side: texture_side,
+ });
+ }
+
+ if texture_copy_view.origin.x % block_width != 0 {
+ return Err(TransferError::UnalignedCopyOriginX);
+ }
+ if texture_copy_view.origin.y % block_height != 0 {
+ return Err(TransferError::UnalignedCopyOriginY);
+ }
+ if copy_size.width % block_width != 0 {
+ return Err(TransferError::UnalignedCopyWidth);
+ }
+ if copy_size.height % block_height != 0 {
+ return Err(TransferError::UnalignedCopyHeight);
+ }
+ Ok(())
+}
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn command_encoder_copy_buffer_to_buffer<B: GfxBackend>(
+ &self,
+ command_encoder_id: CommandEncoderId,
+ source: BufferId,
+ source_offset: BufferAddress,
+ destination: BufferId,
+ destination_offset: BufferAddress,
+ size: BufferAddress,
+ ) -> Result<(), CopyError> {
+ span!(_guard, INFO, "CommandEncoder::copy_buffer_to_buffer");
+
+ if source == destination {
+ Err(TransferError::SameSourceDestinationBuffer)?
+ }
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
+ let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?;
+ let (buffer_guard, _) = hub.buffers.read(&mut token);
+ // we can't hold both src_pending and dst_pending in scope because they
+ // borrow the buffer tracker mutably...
+ let mut barriers = Vec::new();
+
+ #[cfg(feature = "trace")]
+ if let Some(ref mut list) = cmd_buf.commands {
+ list.push(TraceCommand::CopyBufferToBuffer {
+ src: source,
+ src_offset: source_offset,
+ dst: destination,
+ dst_offset: destination_offset,
+ size,
+ });
+ }
+
+ let (src_buffer, src_pending) = cmd_buf
+ .trackers
+ .buffers
+ .use_replace(&*buffer_guard, source, (), BufferUse::COPY_SRC)
+ .map_err(TransferError::InvalidBuffer)?;
+ let &(ref src_raw, _) = src_buffer
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidBuffer(source))?;
+ if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
+ Err(TransferError::MissingCopySrcUsageFlag)?
+ }
+ barriers.extend(src_pending.map(|pending| pending.into_hal(src_buffer)));
+
+ let (dst_buffer, dst_pending) = cmd_buf
+ .trackers
+ .buffers
+ .use_replace(&*buffer_guard, destination, (), BufferUse::COPY_DST)
+ .map_err(TransferError::InvalidBuffer)?;
+ let &(ref dst_raw, _) = dst_buffer
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidBuffer(destination))?;
+ if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
+ Err(TransferError::MissingCopyDstUsageFlag(
+ Some(destination),
+ None,
+ ))?
+ }
+ barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_buffer)));
+
+ if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ Err(TransferError::UnalignedCopySize(size))?
+ }
+ if source_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ Err(TransferError::UnalignedBufferOffset(source_offset))?
+ }
+ if destination_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ Err(TransferError::UnalignedBufferOffset(destination_offset))?
+ }
+
+ let source_end_offset = source_offset + size;
+ let destination_end_offset = destination_offset + size;
+ if source_end_offset > src_buffer.size {
+ Err(TransferError::BufferOverrun {
+ start_offset: source_offset,
+ end_offset: source_end_offset,
+ buffer_size: src_buffer.size,
+ side: CopySide::Source,
+ })?
+ }
+ if destination_end_offset > dst_buffer.size {
+ Err(TransferError::BufferOverrun {
+ start_offset: destination_offset,
+ end_offset: destination_end_offset,
+ buffer_size: dst_buffer.size,
+ side: CopySide::Destination,
+ })?
+ }
+
+ if size == 0 {
+ tracing::trace!("Ignoring copy_buffer_to_buffer of size 0");
+ return Ok(());
+ }
+
+ let region = hal::command::BufferCopy {
+ src: source_offset,
+ dst: destination_offset,
+ size,
+ };
+ let cmb_raw = cmd_buf.raw.last_mut().unwrap();
+ unsafe {
+ cmb_raw.pipeline_barrier(
+ all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ barriers,
+ );
+ cmb_raw.copy_buffer(src_raw, dst_raw, iter::once(region));
+ }
+ Ok(())
+ }
+
+ pub fn command_encoder_copy_buffer_to_texture<B: GfxBackend>(
+ &self,
+ command_encoder_id: CommandEncoderId,
+ source: &BufferCopyView,
+ destination: &TextureCopyView,
+ copy_size: &Extent3d,
+ ) -> Result<(), CopyError> {
+ span!(_guard, INFO, "CommandEncoder::copy_buffer_to_texture");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
+ let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?;
+ let (buffer_guard, mut token) = hub.buffers.read(&mut token);
+ let (texture_guard, _) = hub.textures.read(&mut token);
+ let (dst_layers, dst_selector, dst_offset) =
+ texture_copy_view_to_hal(destination, copy_size, &*texture_guard)?;
+
+ #[cfg(feature = "trace")]
+ if let Some(ref mut list) = cmd_buf.commands {
+ list.push(TraceCommand::CopyBufferToTexture {
+ src: source.clone(),
+ dst: destination.clone(),
+ size: *copy_size,
+ });
+ }
+
+ if copy_size.width == 0 || copy_size.height == 0 || copy_size.width == 0 {
+ tracing::trace!("Ignoring copy_buffer_to_texture of size 0");
+ return Ok(());
+ }
+
+ let (src_buffer, src_pending) = cmd_buf
+ .trackers
+ .buffers
+ .use_replace(&*buffer_guard, source.buffer, (), BufferUse::COPY_SRC)
+ .map_err(TransferError::InvalidBuffer)?;
+ let &(ref src_raw, _) = src_buffer
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidBuffer(source.buffer))?;
+ if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
+ Err(TransferError::MissingCopySrcUsageFlag)?
+ }
+ let src_barriers = src_pending.map(|pending| pending.into_hal(src_buffer));
+
+ let (dst_texture, dst_pending) = cmd_buf
+ .trackers
+ .textures
+ .use_replace(
+ &*texture_guard,
+ destination.texture,
+ dst_selector,
+ TextureUse::COPY_DST,
+ )
+ .unwrap();
+ let &(ref dst_raw, _) = dst_texture
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidTexture(destination.texture))?;
+ if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
+ Err(TransferError::MissingCopyDstUsageFlag(
+ None,
+ Some(destination.texture),
+ ))?
+ }
+ let dst_barriers = dst_pending.map(|pending| pending.into_hal(dst_texture));
+
+ let bytes_per_row_alignment = wgt::COPY_BYTES_PER_ROW_ALIGNMENT;
+ let bytes_per_block = conv::map_texture_format(dst_texture.format, cmd_buf.private_features)
+ .surface_desc()
+ .bits as u32
+ / BITS_PER_BYTE;
+ let src_bytes_per_row = source.layout.bytes_per_row;
+ if bytes_per_row_alignment % bytes_per_block != 0 {
+ Err(TransferError::UnalignedBytesPerRow)?
+ }
+ if src_bytes_per_row % bytes_per_row_alignment != 0 {
+ Err(TransferError::UnalignedBytesPerRow)?
+ }
+ validate_texture_copy_range(
+ destination,
+ dst_texture.format,
+ dst_texture.kind,
+ CopySide::Destination,
+ copy_size,
+ )?;
+ validate_linear_texture_data(
+ &source.layout,
+ dst_texture.format,
+ src_buffer.size,
+ CopySide::Source,
+ bytes_per_block as BufferAddress,
+ copy_size,
+ )?;
+
+ let (block_width, _) = conv::texture_block_size(dst_texture.format);
+ if !conv::is_valid_copy_dst_texture_format(dst_texture.format) {
+ Err(TransferError::CopyToForbiddenTextureFormat(
+ dst_texture.format,
+ ))?
+ }
+
+ let buffer_width = (source.layout.bytes_per_row / bytes_per_block) * block_width;
+ let region = hal::command::BufferImageCopy {
+ buffer_offset: source.layout.offset,
+ buffer_width,
+ buffer_height: source.layout.rows_per_image,
+ image_layers: dst_layers,
+ image_offset: dst_offset,
+ image_extent: conv::map_extent(copy_size, dst_texture.dimension),
+ };
+ let cmb_raw = cmd_buf.raw.last_mut().unwrap();
+ unsafe {
+ cmb_raw.pipeline_barrier(
+ all_buffer_stages() | all_image_stages()..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ src_barriers.chain(dst_barriers),
+ );
+ cmb_raw.copy_buffer_to_image(
+ src_raw,
+ dst_raw,
+ hal::image::Layout::TransferDstOptimal,
+ iter::once(region),
+ );
+ }
+ Ok(())
+ }
+
+ pub fn command_encoder_copy_texture_to_buffer<B: GfxBackend>(
+ &self,
+ command_encoder_id: CommandEncoderId,
+ source: &TextureCopyView,
+ destination: &BufferCopyView,
+ copy_size: &Extent3d,
+ ) -> Result<(), CopyError> {
+ span!(_guard, INFO, "CommandEncoder::copy_texture_to_buffer");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
+ let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?;
+ let (buffer_guard, mut token) = hub.buffers.read(&mut token);
+ let (texture_guard, _) = hub.textures.read(&mut token);
+ let (src_layers, src_selector, src_offset) =
+ texture_copy_view_to_hal(source, copy_size, &*texture_guard)?;
+
+ #[cfg(feature = "trace")]
+ if let Some(ref mut list) = cmd_buf.commands {
+ list.push(TraceCommand::CopyTextureToBuffer {
+ src: source.clone(),
+ dst: destination.clone(),
+ size: *copy_size,
+ });
+ }
+
+ if copy_size.width == 0 || copy_size.height == 0 || copy_size.width == 0 {
+ tracing::trace!("Ignoring copy_texture_to_buffer of size 0");
+ return Ok(());
+ }
+
+ let (src_texture, src_pending) = cmd_buf
+ .trackers
+ .textures
+ .use_replace(
+ &*texture_guard,
+ source.texture,
+ src_selector,
+ TextureUse::COPY_SRC,
+ )
+ .unwrap();
+ let &(ref src_raw, _) = src_texture
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidTexture(source.texture))?;
+ if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
+ Err(TransferError::MissingCopySrcUsageFlag)?
+ }
+ let src_barriers = src_pending.map(|pending| pending.into_hal(src_texture));
+
+ let (dst_buffer, dst_barriers) = cmd_buf
+ .trackers
+ .buffers
+ .use_replace(&*buffer_guard, destination.buffer, (), BufferUse::COPY_DST)
+ .map_err(TransferError::InvalidBuffer)?;
+ let &(ref dst_raw, _) = dst_buffer
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidBuffer(destination.buffer))?;
+ if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
+ Err(TransferError::MissingCopyDstUsageFlag(
+ Some(destination.buffer),
+ None,
+ ))?
+ }
+ let dst_barrier = dst_barriers.map(|pending| pending.into_hal(dst_buffer));
+
+ let bytes_per_row_alignment = wgt::COPY_BYTES_PER_ROW_ALIGNMENT;
+ let bytes_per_block = conv::map_texture_format(src_texture.format, cmd_buf.private_features)
+ .surface_desc()
+ .bits as u32
+ / BITS_PER_BYTE;
+ let dst_bytes_per_row = destination.layout.bytes_per_row;
+ if bytes_per_row_alignment % bytes_per_block != 0 {
+ Err(TransferError::UnalignedBytesPerRow)?
+ }
+ if dst_bytes_per_row % bytes_per_row_alignment != 0 {
+ Err(TransferError::UnalignedBytesPerRow)?
+ }
+ validate_texture_copy_range(
+ source,
+ src_texture.format,
+ src_texture.kind,
+ CopySide::Source,
+ copy_size,
+ )?;
+ validate_linear_texture_data(
+ &destination.layout,
+ src_texture.format,
+ dst_buffer.size,
+ CopySide::Destination,
+ bytes_per_block as BufferAddress,
+ copy_size,
+ )?;
+
+ let (block_width, _) = conv::texture_block_size(src_texture.format);
+ if !conv::is_valid_copy_src_texture_format(src_texture.format) {
+ Err(TransferError::CopyFromForbiddenTextureFormat(
+ src_texture.format,
+ ))?
+ }
+
+ let buffer_width = (destination.layout.bytes_per_row / bytes_per_block) * block_width;
+ let region = hal::command::BufferImageCopy {
+ buffer_offset: destination.layout.offset,
+ buffer_width,
+ buffer_height: destination.layout.rows_per_image,
+ image_layers: src_layers,
+ image_offset: src_offset,
+ image_extent: conv::map_extent(copy_size, src_texture.dimension),
+ };
+ let cmb_raw = cmd_buf.raw.last_mut().unwrap();
+ unsafe {
+ cmb_raw.pipeline_barrier(
+ all_buffer_stages() | all_image_stages()..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ src_barriers.chain(dst_barrier),
+ );
+ cmb_raw.copy_image_to_buffer(
+ src_raw,
+ hal::image::Layout::TransferSrcOptimal,
+ dst_raw,
+ iter::once(region),
+ );
+ }
+ Ok(())
+ }
+
+ pub fn command_encoder_copy_texture_to_texture<B: GfxBackend>(
+ &self,
+ command_encoder_id: CommandEncoderId,
+ source: &TextureCopyView,
+ destination: &TextureCopyView,
+ copy_size: &Extent3d,
+ ) -> Result<(), CopyError> {
+ span!(_guard, INFO, "CommandEncoder::copy_texture_to_texture");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
+ let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?;
+ let (_, mut token) = hub.buffers.read(&mut token); // skip token
+ let (texture_guard, _) = hub.textures.read(&mut token);
+ // we can't hold both src_pending and dst_pending in scope because they
+ // borrow the buffer tracker mutably...
+ let mut barriers = Vec::new();
+ let (src_layers, src_selector, src_offset) =
+ texture_copy_view_to_hal(source, copy_size, &*texture_guard)?;
+ let (dst_layers, dst_selector, dst_offset) =
+ texture_copy_view_to_hal(destination, copy_size, &*texture_guard)?;
+ if src_layers.aspects != dst_layers.aspects {
+ Err(TransferError::MismatchedAspects)?
+ }
+
+ #[cfg(feature = "trace")]
+ if let Some(ref mut list) = cmd_buf.commands {
+ list.push(TraceCommand::CopyTextureToTexture {
+ src: source.clone(),
+ dst: destination.clone(),
+ size: *copy_size,
+ });
+ }
+
+ if copy_size.width == 0 || copy_size.height == 0 || copy_size.width == 0 {
+ tracing::trace!("Ignoring copy_texture_to_texture of size 0");
+ return Ok(());
+ }
+
+ let (src_texture, src_pending) = cmd_buf
+ .trackers
+ .textures
+ .use_replace(
+ &*texture_guard,
+ source.texture,
+ src_selector,
+ TextureUse::COPY_SRC,
+ )
+ .unwrap();
+ let &(ref src_raw, _) = src_texture
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidTexture(source.texture))?;
+ if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
+ Err(TransferError::MissingCopySrcUsageFlag)?
+ }
+ barriers.extend(src_pending.map(|pending| pending.into_hal(src_texture)));
+
+ let (dst_texture, dst_pending) = cmd_buf
+ .trackers
+ .textures
+ .use_replace(
+ &*texture_guard,
+ destination.texture,
+ dst_selector,
+ TextureUse::COPY_DST,
+ )
+ .unwrap();
+ let &(ref dst_raw, _) = dst_texture
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidTexture(destination.texture))?;
+ if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
+ Err(TransferError::MissingCopyDstUsageFlag(
+ None,
+ Some(destination.texture),
+ ))?
+ }
+ barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture)));
+
+ validate_texture_copy_range(
+ source,
+ src_texture.format,
+ src_texture.kind,
+ CopySide::Source,
+ copy_size,
+ )?;
+ validate_texture_copy_range(
+ destination,
+ dst_texture.format,
+ dst_texture.kind,
+ CopySide::Destination,
+ copy_size,
+ )?;
+
+ let region = hal::command::ImageCopy {
+ src_subresource: src_layers,
+ src_offset,
+ dst_subresource: dst_layers,
+ dst_offset,
+ extent: conv::map_extent(copy_size, src_texture.dimension),
+ };
+ let cmb_raw = cmd_buf.raw.last_mut().unwrap();
+ unsafe {
+ cmb_raw.pipeline_barrier(
+ all_image_stages()..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ barriers,
+ );
+ cmb_raw.copy_image(
+ src_raw,
+ hal::image::Layout::TransferSrcOptimal,
+ dst_raw,
+ hal::image::Layout::TransferDstOptimal,
+ iter::once(region),
+ );
+ }
+ Ok(())
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/conv.rs b/gfx/wgpu/wgpu-core/src/conv.rs
new file mode 100644
index 0000000000..58093b37e6
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/conv.rs
@@ -0,0 +1,833 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ command::{LoadOp, PassChannel, StoreOp},
+ resource, PrivateFeatures,
+};
+
+use std::convert::TryInto;
+
+pub fn map_buffer_usage(usage: wgt::BufferUsage) -> (hal::buffer::Usage, hal::memory::Properties) {
+ use hal::buffer::Usage as U;
+ use hal::memory::Properties as P;
+ use wgt::BufferUsage as W;
+
+ let mut hal_memory = P::empty();
+ if usage.contains(W::MAP_READ) {
+ hal_memory |= P::CPU_VISIBLE | P::CPU_CACHED;
+ }
+ if usage.contains(W::MAP_WRITE) {
+ hal_memory |= P::CPU_VISIBLE;
+ }
+
+ let mut hal_usage = U::empty();
+ if usage.contains(W::COPY_SRC) {
+ hal_usage |= U::TRANSFER_SRC;
+ }
+ if usage.contains(W::COPY_DST) {
+ hal_usage |= U::TRANSFER_DST;
+ }
+ if usage.contains(W::INDEX) {
+ hal_usage |= U::INDEX;
+ }
+ if usage.contains(W::VERTEX) {
+ hal_usage |= U::VERTEX;
+ }
+ if usage.contains(W::UNIFORM) {
+ hal_usage |= U::UNIFORM;
+ }
+ if usage.contains(W::STORAGE) {
+ hal_usage |= U::STORAGE;
+ }
+ if usage.contains(W::INDIRECT) {
+ hal_usage |= U::INDIRECT;
+ }
+
+ (hal_usage, hal_memory)
+}
+
+pub fn map_texture_usage(
+ usage: wgt::TextureUsage,
+ aspects: hal::format::Aspects,
+) -> hal::image::Usage {
+ use hal::image::Usage as U;
+ use wgt::TextureUsage as W;
+
+ let mut value = U::empty();
+ if usage.contains(W::COPY_SRC) {
+ value |= U::TRANSFER_SRC;
+ }
+ if usage.contains(W::COPY_DST) {
+ value |= U::TRANSFER_DST;
+ }
+ if usage.contains(W::SAMPLED) {
+ value |= U::SAMPLED;
+ }
+ if usage.contains(W::STORAGE) {
+ value |= U::STORAGE;
+ }
+ if usage.contains(W::RENDER_ATTACHMENT) {
+ if aspects.intersects(hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL) {
+ value |= U::DEPTH_STENCIL_ATTACHMENT;
+ } else {
+ value |= U::COLOR_ATTACHMENT;
+ }
+ }
+ // Note: TextureUsage::Present does not need to be handled explicitly
+ // TODO: HAL Transient Attachment, HAL Input Attachment
+ value
+}
+
+pub fn map_binding_type(binding: &wgt::BindGroupLayoutEntry) -> hal::pso::DescriptorType {
+ use hal::pso;
+ use wgt::BindingType as Bt;
+ match binding.ty {
+ Bt::Buffer {
+ ty,
+ has_dynamic_offset,
+ min_binding_size: _,
+ } => pso::DescriptorType::Buffer {
+ ty: match ty {
+ wgt::BufferBindingType::Uniform => pso::BufferDescriptorType::Uniform,
+ wgt::BufferBindingType::Storage { read_only } => {
+ pso::BufferDescriptorType::Storage { read_only }
+ }
+ },
+ format: pso::BufferDescriptorFormat::Structured {
+ dynamic_offset: has_dynamic_offset,
+ },
+ },
+ Bt::Sampler { .. } => pso::DescriptorType::Sampler,
+ Bt::Texture { .. } => pso::DescriptorType::Image {
+ ty: pso::ImageDescriptorType::Sampled {
+ with_sampler: false,
+ },
+ },
+ Bt::StorageTexture { access, .. } => pso::DescriptorType::Image {
+ ty: pso::ImageDescriptorType::Storage {
+ read_only: match access {
+ wgt::StorageTextureAccess::ReadOnly => true,
+ _ => false,
+ },
+ },
+ },
+ }
+}
+
+pub fn map_shader_stage_flags(shader_stage_flags: wgt::ShaderStage) -> hal::pso::ShaderStageFlags {
+ use hal::pso::ShaderStageFlags as H;
+ use wgt::ShaderStage as Ss;
+
+ let mut value = H::empty();
+ if shader_stage_flags.contains(Ss::VERTEX) {
+ value |= H::VERTEX;
+ }
+ if shader_stage_flags.contains(Ss::FRAGMENT) {
+ value |= H::FRAGMENT;
+ }
+ if shader_stage_flags.contains(Ss::COMPUTE) {
+ value |= H::COMPUTE;
+ }
+ value
+}
+
+pub fn map_extent(extent: &wgt::Extent3d, dim: wgt::TextureDimension) -> hal::image::Extent {
+ hal::image::Extent {
+ width: extent.width,
+ height: extent.height,
+ depth: match dim {
+ wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => 1,
+ wgt::TextureDimension::D3 => extent.depth,
+ },
+ }
+}
+
+pub fn map_primitive_topology(primitive_topology: wgt::PrimitiveTopology) -> hal::pso::Primitive {
+ use hal::pso::Primitive as H;
+ use wgt::PrimitiveTopology as Pt;
+ match primitive_topology {
+ Pt::PointList => H::PointList,
+ Pt::LineList => H::LineList,
+ Pt::LineStrip => H::LineStrip,
+ Pt::TriangleList => H::TriangleList,
+ Pt::TriangleStrip => H::TriangleStrip,
+ }
+}
+
+pub fn map_color_state_descriptor(desc: &wgt::ColorStateDescriptor) -> hal::pso::ColorBlendDesc {
+ let color_mask = desc.write_mask;
+ let blend_state = if desc.color_blend != wgt::BlendDescriptor::REPLACE
+ || desc.alpha_blend != wgt::BlendDescriptor::REPLACE
+ {
+ Some(hal::pso::BlendState {
+ color: map_blend_descriptor(&desc.color_blend),
+ alpha: map_blend_descriptor(&desc.alpha_blend),
+ })
+ } else {
+ None
+ };
+ hal::pso::ColorBlendDesc {
+ mask: map_color_write_flags(color_mask),
+ blend: blend_state,
+ }
+}
+
+fn map_color_write_flags(flags: wgt::ColorWrite) -> hal::pso::ColorMask {
+ use hal::pso::ColorMask as H;
+ use wgt::ColorWrite as Cw;
+
+ let mut value = H::empty();
+ if flags.contains(Cw::RED) {
+ value |= H::RED;
+ }
+ if flags.contains(Cw::GREEN) {
+ value |= H::GREEN;
+ }
+ if flags.contains(Cw::BLUE) {
+ value |= H::BLUE;
+ }
+ if flags.contains(Cw::ALPHA) {
+ value |= H::ALPHA;
+ }
+ value
+}
+
+fn map_blend_descriptor(blend_desc: &wgt::BlendDescriptor) -> hal::pso::BlendOp {
+ use hal::pso::BlendOp as H;
+ use wgt::BlendOperation as Bo;
+ match blend_desc.operation {
+ Bo::Add => H::Add {
+ src: map_blend_factor(blend_desc.src_factor),
+ dst: map_blend_factor(blend_desc.dst_factor),
+ },
+ Bo::Subtract => H::Sub {
+ src: map_blend_factor(blend_desc.src_factor),
+ dst: map_blend_factor(blend_desc.dst_factor),
+ },
+ Bo::ReverseSubtract => H::RevSub {
+ src: map_blend_factor(blend_desc.src_factor),
+ dst: map_blend_factor(blend_desc.dst_factor),
+ },
+ Bo::Min => H::Min,
+ Bo::Max => H::Max,
+ }
+}
+
+fn map_blend_factor(blend_factor: wgt::BlendFactor) -> hal::pso::Factor {
+ use hal::pso::Factor as H;
+ use wgt::BlendFactor as Bf;
+ match blend_factor {
+ Bf::Zero => H::Zero,
+ Bf::One => H::One,
+ Bf::SrcColor => H::SrcColor,
+ Bf::OneMinusSrcColor => H::OneMinusSrcColor,
+ Bf::SrcAlpha => H::SrcAlpha,
+ Bf::OneMinusSrcAlpha => H::OneMinusSrcAlpha,
+ Bf::DstColor => H::DstColor,
+ Bf::OneMinusDstColor => H::OneMinusDstColor,
+ Bf::DstAlpha => H::DstAlpha,
+ Bf::OneMinusDstAlpha => H::OneMinusDstAlpha,
+ Bf::SrcAlphaSaturated => H::SrcAlphaSaturate,
+ Bf::BlendColor => H::ConstColor,
+ Bf::OneMinusBlendColor => H::OneMinusConstColor,
+ }
+}
+
+pub fn map_depth_stencil_state_descriptor(
+ desc: &wgt::DepthStencilStateDescriptor,
+) -> hal::pso::DepthStencilDesc {
+ hal::pso::DepthStencilDesc {
+ depth: if desc.is_depth_enabled() {
+ Some(hal::pso::DepthTest {
+ fun: map_compare_function(desc.depth_compare),
+ write: desc.depth_write_enabled,
+ })
+ } else {
+ None
+ },
+ depth_bounds: false, // TODO
+ stencil: if desc.stencil.is_enabled() {
+ let s = &desc.stencil;
+ Some(hal::pso::StencilTest {
+ faces: hal::pso::Sided {
+ front: map_stencil_face(&s.front),
+ back: map_stencil_face(&s.back),
+ },
+ read_masks: hal::pso::State::Static(hal::pso::Sided::new(s.read_mask)),
+ write_masks: hal::pso::State::Static(hal::pso::Sided::new(s.write_mask)),
+ reference_values: if s.needs_ref_value() {
+ hal::pso::State::Dynamic
+ } else {
+ hal::pso::State::Static(hal::pso::Sided::new(0))
+ },
+ })
+ } else {
+ None
+ },
+ }
+}
+
+fn map_stencil_face(
+ stencil_state_face_desc: &wgt::StencilStateFaceDescriptor,
+) -> hal::pso::StencilFace {
+ hal::pso::StencilFace {
+ fun: map_compare_function(stencil_state_face_desc.compare),
+ op_fail: map_stencil_operation(stencil_state_face_desc.fail_op),
+ op_depth_fail: map_stencil_operation(stencil_state_face_desc.depth_fail_op),
+ op_pass: map_stencil_operation(stencil_state_face_desc.pass_op),
+ }
+}
+
+pub fn map_compare_function(compare_function: wgt::CompareFunction) -> hal::pso::Comparison {
+ use hal::pso::Comparison as H;
+ use wgt::CompareFunction as Cf;
+ match compare_function {
+ Cf::Never => H::Never,
+ Cf::Less => H::Less,
+ Cf::Equal => H::Equal,
+ Cf::LessEqual => H::LessEqual,
+ Cf::Greater => H::Greater,
+ Cf::NotEqual => H::NotEqual,
+ Cf::GreaterEqual => H::GreaterEqual,
+ Cf::Always => H::Always,
+ }
+}
+
+fn map_stencil_operation(stencil_operation: wgt::StencilOperation) -> hal::pso::StencilOp {
+ use hal::pso::StencilOp as H;
+ use wgt::StencilOperation as So;
+ match stencil_operation {
+ So::Keep => H::Keep,
+ So::Zero => H::Zero,
+ So::Replace => H::Replace,
+ So::Invert => H::Invert,
+ So::IncrementClamp => H::IncrementClamp,
+ So::DecrementClamp => H::DecrementClamp,
+ So::IncrementWrap => H::IncrementWrap,
+ So::DecrementWrap => H::DecrementWrap,
+ }
+}
+
+pub(crate) fn map_texture_format(
+ texture_format: wgt::TextureFormat,
+ private_features: PrivateFeatures,
+) -> hal::format::Format {
+ use hal::format::Format as H;
+ use wgt::TextureFormat as Tf;
+ match texture_format {
+ // Normal 8 bit formats
+ Tf::R8Unorm => H::R8Unorm,
+ Tf::R8Snorm => H::R8Snorm,
+ Tf::R8Uint => H::R8Uint,
+ Tf::R8Sint => H::R8Sint,
+
+ // Normal 16 bit formats
+ Tf::R16Uint => H::R16Uint,
+ Tf::R16Sint => H::R16Sint,
+ Tf::R16Float => H::R16Sfloat,
+ Tf::Rg8Unorm => H::Rg8Unorm,
+ Tf::Rg8Snorm => H::Rg8Snorm,
+ Tf::Rg8Uint => H::Rg8Uint,
+ Tf::Rg8Sint => H::Rg8Sint,
+
+ // Normal 32 bit formats
+ Tf::R32Uint => H::R32Uint,
+ Tf::R32Sint => H::R32Sint,
+ Tf::R32Float => H::R32Sfloat,
+ Tf::Rg16Uint => H::Rg16Uint,
+ Tf::Rg16Sint => H::Rg16Sint,
+ Tf::Rg16Float => H::Rg16Sfloat,
+ Tf::Rgba8Unorm => H::Rgba8Unorm,
+ Tf::Rgba8UnormSrgb => H::Rgba8Srgb,
+ Tf::Rgba8Snorm => H::Rgba8Snorm,
+ Tf::Rgba8Uint => H::Rgba8Uint,
+ Tf::Rgba8Sint => H::Rgba8Sint,
+ Tf::Bgra8Unorm => H::Bgra8Unorm,
+ Tf::Bgra8UnormSrgb => H::Bgra8Srgb,
+
+ // Packed 32 bit formats
+ Tf::Rgb10a2Unorm => H::A2r10g10b10Unorm,
+ Tf::Rg11b10Float => H::B10g11r11Ufloat,
+
+ // Normal 64 bit formats
+ Tf::Rg32Uint => H::Rg32Uint,
+ Tf::Rg32Sint => H::Rg32Sint,
+ Tf::Rg32Float => H::Rg32Sfloat,
+ Tf::Rgba16Uint => H::Rgba16Uint,
+ Tf::Rgba16Sint => H::Rgba16Sint,
+ Tf::Rgba16Float => H::Rgba16Sfloat,
+
+ // Normal 128 bit formats
+ Tf::Rgba32Uint => H::Rgba32Uint,
+ Tf::Rgba32Sint => H::Rgba32Sint,
+ Tf::Rgba32Float => H::Rgba32Sfloat,
+
+ // Depth and stencil formats
+ Tf::Depth32Float => H::D32Sfloat,
+ Tf::Depth24Plus => {
+ if private_features.texture_d24 {
+ H::X8D24Unorm
+ } else {
+ H::D32Sfloat
+ }
+ }
+ Tf::Depth24PlusStencil8 => {
+ if private_features.texture_d24_s8 {
+ H::D24UnormS8Uint
+ } else {
+ H::D32SfloatS8Uint
+ }
+ }
+
+ // BCn compressed formats
+ Tf::Bc1RgbaUnorm => H::Bc1RgbaUnorm,
+ Tf::Bc1RgbaUnormSrgb => H::Bc1RgbaSrgb,
+ Tf::Bc2RgbaUnorm => H::Bc2Unorm,
+ Tf::Bc2RgbaUnormSrgb => H::Bc2Srgb,
+ Tf::Bc3RgbaUnorm => H::Bc3Unorm,
+ Tf::Bc3RgbaUnormSrgb => H::Bc3Srgb,
+ Tf::Bc4RUnorm => H::Bc4Unorm,
+ Tf::Bc4RSnorm => H::Bc4Snorm,
+ Tf::Bc5RgUnorm => H::Bc5Unorm,
+ Tf::Bc5RgSnorm => H::Bc5Snorm,
+ Tf::Bc6hRgbSfloat => H::Bc6hSfloat,
+ Tf::Bc6hRgbUfloat => H::Bc6hUfloat,
+ Tf::Bc7RgbaUnorm => H::Bc7Unorm,
+ Tf::Bc7RgbaUnormSrgb => H::Bc7Srgb,
+ }
+}
+
+pub fn texture_block_size(format: wgt::TextureFormat) -> (u32, u32) {
+ use wgt::TextureFormat as Tf;
+ match format {
+ Tf::R8Unorm
+ | Tf::R8Snorm
+ | Tf::R8Uint
+ | Tf::R8Sint
+ | Tf::R16Uint
+ | Tf::R16Sint
+ | Tf::R16Float
+ | Tf::Rg8Unorm
+ | Tf::Rg8Snorm
+ | Tf::Rg8Uint
+ | Tf::Rg8Sint
+ | Tf::R32Uint
+ | Tf::R32Sint
+ | Tf::R32Float
+ | Tf::Rg16Uint
+ | Tf::Rg16Sint
+ | Tf::Rg16Float
+ | Tf::Rgba8Unorm
+ | Tf::Rgba8UnormSrgb
+ | Tf::Rgba8Snorm
+ | Tf::Rgba8Uint
+ | Tf::Rgba8Sint
+ | Tf::Bgra8Unorm
+ | Tf::Bgra8UnormSrgb
+ | Tf::Rgb10a2Unorm
+ | Tf::Rg11b10Float
+ | Tf::Rg32Uint
+ | Tf::Rg32Sint
+ | Tf::Rg32Float
+ | Tf::Rgba16Uint
+ | Tf::Rgba16Sint
+ | Tf::Rgba16Float
+ | Tf::Rgba32Uint
+ | Tf::Rgba32Sint
+ | Tf::Rgba32Float
+ | Tf::Depth32Float
+ | Tf::Depth24Plus
+ | Tf::Depth24PlusStencil8 => (1, 1),
+
+ Tf::Bc1RgbaUnorm
+ | Tf::Bc1RgbaUnormSrgb
+ | Tf::Bc2RgbaUnorm
+ | Tf::Bc2RgbaUnormSrgb
+ | Tf::Bc3RgbaUnorm
+ | Tf::Bc3RgbaUnormSrgb
+ | Tf::Bc4RUnorm
+ | Tf::Bc4RSnorm
+ | Tf::Bc5RgUnorm
+ | Tf::Bc5RgSnorm
+ | Tf::Bc6hRgbUfloat
+ | Tf::Bc6hRgbSfloat
+ | Tf::Bc7RgbaUnorm
+ | Tf::Bc7RgbaUnormSrgb => (4, 4),
+ }
+}
+
+pub fn texture_features(format: wgt::TextureFormat) -> wgt::Features {
+ use wgt::TextureFormat as Tf;
+ match format {
+ Tf::R8Unorm
+ | Tf::R8Snorm
+ | Tf::R8Uint
+ | Tf::R8Sint
+ | Tf::R16Uint
+ | Tf::R16Sint
+ | Tf::R16Float
+ | Tf::Rg8Unorm
+ | Tf::Rg8Snorm
+ | Tf::Rg8Uint
+ | Tf::Rg8Sint
+ | Tf::R32Uint
+ | Tf::R32Sint
+ | Tf::R32Float
+ | Tf::Rg16Uint
+ | Tf::Rg16Sint
+ | Tf::Rg16Float
+ | Tf::Rgba8Unorm
+ | Tf::Rgba8UnormSrgb
+ | Tf::Rgba8Snorm
+ | Tf::Rgba8Uint
+ | Tf::Rgba8Sint
+ | Tf::Bgra8Unorm
+ | Tf::Bgra8UnormSrgb
+ | Tf::Rgb10a2Unorm
+ | Tf::Rg11b10Float
+ | Tf::Rg32Uint
+ | Tf::Rg32Sint
+ | Tf::Rg32Float
+ | Tf::Rgba16Uint
+ | Tf::Rgba16Sint
+ | Tf::Rgba16Float
+ | Tf::Rgba32Uint
+ | Tf::Rgba32Sint
+ | Tf::Rgba32Float
+ | Tf::Depth32Float
+ | Tf::Depth24Plus
+ | Tf::Depth24PlusStencil8 => wgt::Features::empty(),
+
+ Tf::Bc1RgbaUnorm
+ | Tf::Bc1RgbaUnormSrgb
+ | Tf::Bc2RgbaUnorm
+ | Tf::Bc2RgbaUnormSrgb
+ | Tf::Bc3RgbaUnorm
+ | Tf::Bc3RgbaUnormSrgb
+ | Tf::Bc4RUnorm
+ | Tf::Bc4RSnorm
+ | Tf::Bc5RgUnorm
+ | Tf::Bc5RgSnorm
+ | Tf::Bc6hRgbUfloat
+ | Tf::Bc6hRgbSfloat
+ | Tf::Bc7RgbaUnorm
+ | Tf::Bc7RgbaUnormSrgb => wgt::Features::TEXTURE_COMPRESSION_BC,
+ }
+}
+
+pub fn map_vertex_format(vertex_format: wgt::VertexFormat) -> hal::format::Format {
+ use hal::format::Format as H;
+ use wgt::VertexFormat as Vf;
+ match vertex_format {
+ Vf::Uchar2 => H::Rg8Uint,
+ Vf::Uchar4 => H::Rgba8Uint,
+ Vf::Char2 => H::Rg8Sint,
+ Vf::Char4 => H::Rgba8Sint,
+ Vf::Uchar2Norm => H::Rg8Unorm,
+ Vf::Uchar4Norm => H::Rgba8Unorm,
+ Vf::Char2Norm => H::Rg8Snorm,
+ Vf::Char4Norm => H::Rgba8Snorm,
+ Vf::Ushort2 => H::Rg16Uint,
+ Vf::Ushort4 => H::Rgba16Uint,
+ Vf::Short2 => H::Rg16Sint,
+ Vf::Short4 => H::Rgba16Sint,
+ Vf::Ushort2Norm => H::Rg16Unorm,
+ Vf::Ushort4Norm => H::Rgba16Unorm,
+ Vf::Short2Norm => H::Rg16Snorm,
+ Vf::Short4Norm => H::Rgba16Snorm,
+ Vf::Half2 => H::Rg16Sfloat,
+ Vf::Half4 => H::Rgba16Sfloat,
+ Vf::Float => H::R32Sfloat,
+ Vf::Float2 => H::Rg32Sfloat,
+ Vf::Float3 => H::Rgb32Sfloat,
+ Vf::Float4 => H::Rgba32Sfloat,
+ Vf::Uint => H::R32Uint,
+ Vf::Uint2 => H::Rg32Uint,
+ Vf::Uint3 => H::Rgb32Uint,
+ Vf::Uint4 => H::Rgba32Uint,
+ Vf::Int => H::R32Sint,
+ Vf::Int2 => H::Rg32Sint,
+ Vf::Int3 => H::Rgb32Sint,
+ Vf::Int4 => H::Rgba32Sint,
+ }
+}
+
+pub fn is_power_of_two(val: u32) -> bool {
+ val != 0 && (val & (val - 1)) == 0
+}
+
+pub fn is_valid_copy_src_texture_format(format: wgt::TextureFormat) -> bool {
+ use wgt::TextureFormat as Tf;
+ match format {
+ Tf::Depth24Plus | Tf::Depth24PlusStencil8 => false,
+ _ => true,
+ }
+}
+
+pub fn is_valid_copy_dst_texture_format(format: wgt::TextureFormat) -> bool {
+ use wgt::TextureFormat as Tf;
+ match format {
+ Tf::Depth32Float | Tf::Depth24Plus | Tf::Depth24PlusStencil8 => false,
+ _ => true,
+ }
+}
+
+pub fn map_texture_dimension_size(
+ dimension: wgt::TextureDimension,
+ wgt::Extent3d {
+ width,
+ height,
+ depth,
+ }: wgt::Extent3d,
+ sample_size: u32,
+) -> Result<hal::image::Kind, resource::TextureDimensionError> {
+ use hal::image::Kind as H;
+ use resource::TextureDimensionError as Tde;
+ use wgt::TextureDimension::*;
+
+ let zero_dim = if width == 0 {
+ Some(resource::TextureErrorDimension::X)
+ } else if height == 0 {
+ Some(resource::TextureErrorDimension::Y)
+ } else if depth == 0 {
+ Some(resource::TextureErrorDimension::Z)
+ } else {
+ None
+ };
+ if let Some(dim) = zero_dim {
+ return Err(resource::TextureDimensionError::Zero(dim));
+ }
+
+ Ok(match dimension {
+ D1 => {
+ if height != 1 {
+ return Err(Tde::InvalidHeight);
+ }
+ if sample_size != 1 {
+ return Err(Tde::InvalidSampleCount(sample_size));
+ }
+ let layers = depth.try_into().unwrap_or(!0);
+ H::D1(width, layers)
+ }
+ D2 => {
+ if sample_size > 32 || !is_power_of_two(sample_size) {
+ return Err(Tde::InvalidSampleCount(sample_size));
+ }
+ let layers = depth.try_into().unwrap_or(!0);
+ H::D2(width, height, layers, sample_size as u8)
+ }
+ D3 => {
+ if sample_size != 1 {
+ return Err(Tde::InvalidSampleCount(sample_size));
+ }
+ H::D3(width, height, depth)
+ }
+ })
+}
+
+pub fn map_texture_view_dimension(dimension: wgt::TextureViewDimension) -> hal::image::ViewKind {
+ use hal::image::ViewKind as H;
+ use wgt::TextureViewDimension::*;
+ match dimension {
+ D1 => H::D1,
+ D2 => H::D2,
+ D2Array => H::D2Array,
+ Cube => H::Cube,
+ CubeArray => H::CubeArray,
+ D3 => H::D3,
+ }
+}
+
+pub(crate) fn map_buffer_state(usage: resource::BufferUse) -> hal::buffer::State {
+ use crate::resource::BufferUse as W;
+ use hal::buffer::Access as A;
+
+ let mut access = A::empty();
+ if usage.contains(W::MAP_READ) {
+ access |= A::HOST_READ;
+ }
+ if usage.contains(W::MAP_WRITE) {
+ access |= A::HOST_WRITE;
+ }
+ if usage.contains(W::COPY_SRC) {
+ access |= A::TRANSFER_READ;
+ }
+ if usage.contains(W::COPY_DST) {
+ access |= A::TRANSFER_WRITE;
+ }
+ if usage.contains(W::INDEX) {
+ access |= A::INDEX_BUFFER_READ;
+ }
+ if usage.contains(W::VERTEX) {
+ access |= A::VERTEX_BUFFER_READ;
+ }
+ if usage.contains(W::UNIFORM) {
+ access |= A::UNIFORM_READ | A::SHADER_READ;
+ }
+ if usage.contains(W::STORAGE_LOAD) {
+ access |= A::SHADER_READ;
+ }
+ if usage.contains(W::STORAGE_STORE) {
+ access |= A::SHADER_WRITE;
+ }
+ if usage.contains(W::INDIRECT) {
+ access |= A::INDIRECT_COMMAND_READ;
+ }
+
+ access
+}
+
+pub(crate) fn map_texture_state(
+ usage: resource::TextureUse,
+ aspects: hal::format::Aspects,
+) -> hal::image::State {
+ use crate::resource::TextureUse as W;
+ use hal::image::{Access as A, Layout as L};
+
+ let is_color = aspects.contains(hal::format::Aspects::COLOR);
+ let layout = match usage {
+ W::UNINITIALIZED => return (A::empty(), L::Undefined),
+ W::COPY_SRC => L::TransferSrcOptimal,
+ W::COPY_DST => L::TransferDstOptimal,
+ W::SAMPLED if is_color => L::ShaderReadOnlyOptimal,
+ W::ATTACHMENT_READ | W::ATTACHMENT_WRITE if is_color => L::ColorAttachmentOptimal,
+ _ if is_color => L::General,
+ W::ATTACHMENT_WRITE => L::DepthStencilAttachmentOptimal,
+ _ => L::DepthStencilReadOnlyOptimal,
+ };
+
+ let mut access = A::empty();
+ if usage.contains(W::COPY_SRC) {
+ access |= A::TRANSFER_READ;
+ }
+ if usage.contains(W::COPY_DST) {
+ access |= A::TRANSFER_WRITE;
+ }
+ if usage.contains(W::SAMPLED) {
+ access |= A::SHADER_READ;
+ }
+ if usage.contains(W::ATTACHMENT_READ) {
+ access |= if is_color {
+ A::COLOR_ATTACHMENT_READ
+ } else {
+ A::DEPTH_STENCIL_ATTACHMENT_READ
+ };
+ }
+ if usage.contains(W::ATTACHMENT_WRITE) {
+ access |= if is_color {
+ A::COLOR_ATTACHMENT_WRITE
+ } else {
+ A::DEPTH_STENCIL_ATTACHMENT_WRITE
+ };
+ }
+ if usage.contains(W::STORAGE_LOAD) {
+ access |= A::SHADER_READ;
+ }
+ if usage.contains(W::STORAGE_STORE) {
+ access |= A::SHADER_WRITE;
+ }
+
+ (access, layout)
+}
+
+pub fn map_load_store_ops<V>(channel: &PassChannel<V>) -> hal::pass::AttachmentOps {
+ hal::pass::AttachmentOps {
+ load: match channel.load_op {
+ LoadOp::Clear => hal::pass::AttachmentLoadOp::Clear,
+ LoadOp::Load => hal::pass::AttachmentLoadOp::Load,
+ },
+ store: match channel.store_op {
+ StoreOp::Clear => hal::pass::AttachmentStoreOp::DontCare, //TODO!
+ StoreOp::Store => hal::pass::AttachmentStoreOp::Store,
+ },
+ }
+}
+
+pub fn map_color_f32(color: &wgt::Color) -> hal::pso::ColorValue {
+ [
+ color.r as f32,
+ color.g as f32,
+ color.b as f32,
+ color.a as f32,
+ ]
+}
+pub fn map_color_i32(color: &wgt::Color) -> [i32; 4] {
+ [
+ color.r as i32,
+ color.g as i32,
+ color.b as i32,
+ color.a as i32,
+ ]
+}
+pub fn map_color_u32(color: &wgt::Color) -> [u32; 4] {
+ [
+ color.r as u32,
+ color.g as u32,
+ color.b as u32,
+ color.a as u32,
+ ]
+}
+
+pub fn map_filter(filter: wgt::FilterMode) -> hal::image::Filter {
+ match filter {
+ wgt::FilterMode::Nearest => hal::image::Filter::Nearest,
+ wgt::FilterMode::Linear => hal::image::Filter::Linear,
+ }
+}
+
+pub fn map_wrap(address: wgt::AddressMode) -> hal::image::WrapMode {
+ use hal::image::WrapMode as W;
+ use wgt::AddressMode as Am;
+ match address {
+ Am::ClampToEdge => W::Clamp,
+ Am::Repeat => W::Tile,
+ Am::MirrorRepeat => W::Mirror,
+ Am::ClampToBorder => W::Border,
+ }
+}
+
+pub fn map_rasterization_state_descriptor(
+ desc: &wgt::RasterizationStateDescriptor,
+) -> hal::pso::Rasterizer {
+ use hal::pso;
+ pso::Rasterizer {
+ depth_clamping: desc.clamp_depth,
+ polygon_mode: match desc.polygon_mode {
+ wgt::PolygonMode::Fill => pso::PolygonMode::Fill,
+ wgt::PolygonMode::Line => pso::PolygonMode::Line,
+ wgt::PolygonMode::Point => pso::PolygonMode::Point,
+ },
+ cull_face: match desc.cull_mode {
+ wgt::CullMode::None => pso::Face::empty(),
+ wgt::CullMode::Front => pso::Face::FRONT,
+ wgt::CullMode::Back => pso::Face::BACK,
+ },
+ front_face: match desc.front_face {
+ wgt::FrontFace::Ccw => pso::FrontFace::CounterClockwise,
+ wgt::FrontFace::Cw => pso::FrontFace::Clockwise,
+ },
+ depth_bias: if desc.depth_bias != 0
+ || desc.depth_bias_slope_scale != 0.0
+ || desc.depth_bias_clamp != 0.0
+ {
+ Some(pso::State::Static(pso::DepthBias {
+ const_factor: desc.depth_bias as f32,
+ slope_factor: desc.depth_bias_slope_scale,
+ clamp: desc.depth_bias_clamp,
+ }))
+ } else {
+ None
+ },
+ conservative: false,
+ line_width: pso::State::Static(1.0),
+ }
+}
+
+pub fn map_index_format(index_format: wgt::IndexFormat) -> hal::IndexType {
+ match index_format {
+ wgt::IndexFormat::Uint16 => hal::IndexType::U16,
+ wgt::IndexFormat::Uint32 => hal::IndexType::U32,
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/alloc.rs b/gfx/wgpu/wgpu-core/src/device/alloc.rs
new file mode 100644
index 0000000000..893830ace6
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/alloc.rs
@@ -0,0 +1,294 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use super::DeviceError;
+use hal::device::Device;
+use std::{borrow::Cow, fmt, iter, ptr::NonNull, sync::Arc};
+
+pub struct MemoryAllocator<B: hal::Backend>(gpu_alloc::GpuAllocator<Arc<B::Memory>>);
+#[derive(Debug)]
+pub struct MemoryBlock<B: hal::Backend>(gpu_alloc::MemoryBlock<Arc<B::Memory>>);
+struct MemoryDevice<'a, B: hal::Backend>(&'a B::Device);
+
+//TODO: https://github.com/zakarumych/gpu-alloc/issues/9
+impl<B: hal::Backend> fmt::Debug for MemoryAllocator<B> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "MemoryAllocator")
+ }
+}
+
+impl<B: hal::Backend> MemoryAllocator<B> {
+ pub fn new(mem_props: hal::adapter::MemoryProperties, limits: hal::Limits) -> Self {
+ let mem_config = gpu_alloc::Config {
+ dedicated_treshold: 32 << 20,
+ preferred_dedicated_treshold: 8 << 20,
+ transient_dedicated_treshold: 128 << 20,
+ linear_chunk: 128 << 20,
+ minimal_buddy_size: 1 << 10,
+ initial_buddy_dedicated_size: 8 << 20,
+ };
+ let properties = gpu_alloc::DeviceProperties {
+ memory_types: Cow::Owned(
+ mem_props
+ .memory_types
+ .iter()
+ .map(|mt| gpu_alloc::MemoryType {
+ heap: mt.heap_index as u32,
+ props: gpu_alloc::MemoryPropertyFlags::from_bits_truncate(
+ mt.properties.bits() as u8,
+ ),
+ })
+ .collect::<Vec<_>>(),
+ ),
+ memory_heaps: Cow::Owned(
+ mem_props
+ .memory_heaps
+ .iter()
+ .map(|mh| gpu_alloc::MemoryHeap { size: mh.size })
+ .collect::<Vec<_>>(),
+ ),
+ max_memory_allocation_count: if limits.max_memory_allocation_count == 0 {
+ tracing::warn!("max_memory_allocation_count is not set by gfx-rs backend");
+ !0
+ } else {
+ limits.max_memory_allocation_count.min(!0u32 as usize) as u32
+ },
+ max_memory_allocation_size: !0,
+ non_coherent_atom_size: limits.non_coherent_atom_size as u64,
+ buffer_device_address: false,
+ };
+ MemoryAllocator(gpu_alloc::GpuAllocator::new(mem_config, properties))
+ }
+
+ pub fn allocate(
+ &mut self,
+ device: &B::Device,
+ requirements: hal::memory::Requirements,
+ usage: gpu_alloc::UsageFlags,
+ ) -> Result<MemoryBlock<B>, DeviceError> {
+ assert!(requirements.alignment.is_power_of_two());
+ let request = gpu_alloc::Request {
+ size: requirements.size,
+ align_mask: requirements.alignment - 1,
+ memory_types: requirements.type_mask,
+ usage,
+ };
+
+ unsafe { self.0.alloc(&MemoryDevice::<B>(device), request) }
+ .map(MemoryBlock)
+ .map_err(|err| match err {
+ gpu_alloc::AllocationError::OutOfHostMemory
+ | gpu_alloc::AllocationError::OutOfDeviceMemory => DeviceError::OutOfMemory,
+ _ => panic!("Unable to allocate memory: {:?}", err),
+ })
+ }
+
+ pub fn free(&mut self, device: &B::Device, block: MemoryBlock<B>) {
+ unsafe { self.0.dealloc(&MemoryDevice::<B>(device), block.0) }
+ }
+
+ pub fn clear(&mut self, device: &B::Device) {
+ unsafe { self.0.cleanup(&MemoryDevice::<B>(device)) }
+ }
+}
+
+impl<B: hal::Backend> MemoryBlock<B> {
+ pub fn bind_buffer(
+ &self,
+ device: &B::Device,
+ buffer: &mut B::Buffer,
+ ) -> Result<(), DeviceError> {
+ unsafe {
+ device
+ .bind_buffer_memory(self.0.memory(), self.0.offset(), buffer)
+ .map_err(DeviceError::from_bind)
+ }
+ }
+
+ pub fn bind_image(&self, device: &B::Device, image: &mut B::Image) -> Result<(), DeviceError> {
+ unsafe {
+ device
+ .bind_image_memory(self.0.memory(), self.0.offset(), image)
+ .map_err(DeviceError::from_bind)
+ }
+ }
+
+ pub fn is_coherent(&self) -> bool {
+ self.0
+ .props()
+ .contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT)
+ }
+
+ pub fn map(
+ &mut self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ size: wgt::BufferAddress,
+ ) -> Result<NonNull<u8>, DeviceError> {
+ let offset = inner_offset;
+ unsafe {
+ self.0
+ .map(&MemoryDevice::<B>(device), offset, size as usize)
+ .map_err(DeviceError::from)
+ }
+ }
+
+ pub fn unmap(&mut self, device: &B::Device) {
+ unsafe { self.0.unmap(&MemoryDevice::<B>(device)) };
+ }
+
+ pub fn write_bytes(
+ &mut self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ data: &[u8],
+ ) -> Result<(), DeviceError> {
+ let offset = inner_offset;
+ unsafe {
+ self.0
+ .write_bytes(&MemoryDevice::<B>(device), offset, data)
+ .map_err(DeviceError::from)
+ }
+ }
+
+ pub fn read_bytes(
+ &mut self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ data: &mut [u8],
+ ) -> Result<(), DeviceError> {
+ let offset = inner_offset;
+ unsafe {
+ self.0
+ .read_bytes(&MemoryDevice::<B>(device), offset, data)
+ .map_err(DeviceError::from)
+ }
+ }
+
+ fn segment(
+ &self,
+ inner_offset: wgt::BufferAddress,
+ size: Option<wgt::BufferAddress>,
+ ) -> hal::memory::Segment {
+ hal::memory::Segment {
+ offset: self.0.offset() + inner_offset,
+ size: size.or(Some(self.0.size())),
+ }
+ }
+
+ pub fn flush_range(
+ &self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ size: Option<wgt::BufferAddress>,
+ ) -> Result<(), DeviceError> {
+ let segment = self.segment(inner_offset, size);
+ unsafe {
+ device
+ .flush_mapped_memory_ranges(iter::once((&**self.0.memory(), segment)))
+ .or(Err(DeviceError::OutOfMemory))
+ }
+ }
+
+ pub fn invalidate_range(
+ &self,
+ device: &B::Device,
+ inner_offset: wgt::BufferAddress,
+ size: Option<wgt::BufferAddress>,
+ ) -> Result<(), DeviceError> {
+ let segment = self.segment(inner_offset, size);
+ unsafe {
+ device
+ .invalidate_mapped_memory_ranges(iter::once((&**self.0.memory(), segment)))
+ .or(Err(DeviceError::OutOfMemory))
+ }
+ }
+}
+
+impl<B: hal::Backend> gpu_alloc::MemoryDevice<Arc<B::Memory>> for MemoryDevice<'_, B> {
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn allocate_memory(
+ &self,
+ size: u64,
+ memory_type: u32,
+ flags: gpu_alloc::AllocationFlags,
+ ) -> Result<Arc<B::Memory>, gpu_alloc::OutOfMemory> {
+ assert!(flags.is_empty());
+
+ self.0
+ .allocate_memory(hal::MemoryTypeId(memory_type as _), size)
+ .map(Arc::new)
+ .map_err(|_| gpu_alloc::OutOfMemory::OutOfDeviceMemory)
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn deallocate_memory(&self, memory: Arc<B::Memory>) {
+ let memory = Arc::try_unwrap(memory).expect("Memory must not be used anywhere");
+ self.0.free_memory(memory);
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn map_memory(
+ &self,
+ memory: &Arc<B::Memory>,
+ offset: u64,
+ size: u64,
+ ) -> Result<NonNull<u8>, gpu_alloc::DeviceMapError> {
+ match self.0.map_memory(
+ memory,
+ hal::memory::Segment {
+ offset,
+ size: Some(size),
+ },
+ ) {
+ Ok(ptr) => Ok(NonNull::new(ptr).expect("Pointer to memory mapping must not be null")),
+ Err(hal::device::MapError::OutOfMemory(_)) => {
+ Err(gpu_alloc::DeviceMapError::OutOfDeviceMemory)
+ }
+ Err(hal::device::MapError::MappingFailed) => Err(gpu_alloc::DeviceMapError::MapFailed),
+ Err(other) => panic!("Unexpected map error: {:?}", other),
+ }
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn unmap_memory(&self, memory: &Arc<B::Memory>) {
+ self.0.unmap_memory(memory);
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn invalidate_memory_ranges(
+ &self,
+ ranges: &[gpu_alloc::MappedMemoryRange<'_, Arc<B::Memory>>],
+ ) -> Result<(), gpu_alloc::OutOfMemory> {
+ self.0
+ .invalidate_mapped_memory_ranges(ranges.iter().map(|range| {
+ (
+ &**range.memory,
+ hal::memory::Segment {
+ offset: range.offset,
+ size: Some(range.size),
+ },
+ )
+ }))
+ .map_err(|_| gpu_alloc::OutOfMemory::OutOfHostMemory)
+ }
+
+ #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
+ unsafe fn flush_memory_ranges(
+ &self,
+ ranges: &[gpu_alloc::MappedMemoryRange<'_, Arc<B::Memory>>],
+ ) -> Result<(), gpu_alloc::OutOfMemory> {
+ self.0
+ .flush_mapped_memory_ranges(ranges.iter().map(|range| {
+ (
+ &**range.memory,
+ hal::memory::Segment {
+ offset: range.offset,
+ size: Some(range.size),
+ },
+ )
+ }))
+ .map_err(|_| gpu_alloc::OutOfMemory::OutOfHostMemory)
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/descriptor.rs b/gfx/wgpu/wgpu-core/src/device/descriptor.rs
new file mode 100644
index 0000000000..92b896bd31
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/descriptor.rs
@@ -0,0 +1,168 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use super::DeviceError;
+use arrayvec::ArrayVec;
+
+pub use gpu_descriptor::DescriptorTotalCount;
+
+pub type DescriptorSet<B> = gpu_descriptor::DescriptorSet<<B as hal::Backend>::DescriptorSet>;
+
+#[derive(Debug)]
+pub struct DescriptorAllocator<B: hal::Backend>(
+ gpu_descriptor::DescriptorAllocator<B::DescriptorPool, B::DescriptorSet>,
+);
+struct DescriptorDevice<'a, B: hal::Backend>(&'a B::Device);
+
+impl<B: hal::Backend> DescriptorAllocator<B> {
+ pub fn new() -> Self {
+ DescriptorAllocator(unsafe { gpu_descriptor::DescriptorAllocator::new(0) })
+ }
+
+ pub fn allocate(
+ &mut self,
+ device: &B::Device,
+ layout: &B::DescriptorSetLayout,
+ layout_descriptor_count: &DescriptorTotalCount,
+ count: u32,
+ ) -> Result<Vec<DescriptorSet<B>>, DeviceError> {
+ self.0
+ .allocate(
+ &DescriptorDevice::<B>(device),
+ layout,
+ gpu_descriptor::DescriptorSetLayoutCreateFlags::empty(),
+ layout_descriptor_count,
+ count,
+ )
+ .map_err(|err| {
+ tracing::warn!("Descriptor set allocation failed: {}", err);
+ DeviceError::OutOfMemory
+ })
+ }
+
+ pub fn free(&mut self, device: &B::Device, sets: impl IntoIterator<Item = DescriptorSet<B>>) {
+ unsafe { self.0.free(&DescriptorDevice::<B>(device), sets) }
+ }
+
+ pub fn cleanup(&mut self, device: &B::Device) {
+ self.0.cleanup(&DescriptorDevice::<B>(device))
+ }
+}
+
+impl<B: hal::Backend>
+ gpu_descriptor::DescriptorDevice<B::DescriptorSetLayout, B::DescriptorPool, B::DescriptorSet>
+ for DescriptorDevice<'_, B>
+{
+ unsafe fn create_descriptor_pool(
+ &self,
+ descriptor_count: &DescriptorTotalCount,
+ max_sets: u32,
+ flags: gpu_descriptor::DescriptorPoolCreateFlags,
+ ) -> Result<B::DescriptorPool, gpu_descriptor::CreatePoolError> {
+ let mut ranges = ArrayVec::<[_; 7]>::new();
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Sampler,
+ count: descriptor_count.sampler as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Image {
+ ty: hal::pso::ImageDescriptorType::Sampled {
+ with_sampler: false,
+ },
+ },
+ count: descriptor_count.sampled_image as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Image {
+ ty: hal::pso::ImageDescriptorType::Storage { read_only: false },
+ },
+ count: descriptor_count.storage_image as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Buffer {
+ ty: hal::pso::BufferDescriptorType::Uniform,
+ format: hal::pso::BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ },
+ count: descriptor_count.uniform_buffer as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Buffer {
+ ty: hal::pso::BufferDescriptorType::Storage { read_only: false },
+ format: hal::pso::BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ },
+ count: descriptor_count.storage_buffer as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Buffer {
+ ty: hal::pso::BufferDescriptorType::Uniform,
+ format: hal::pso::BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ },
+ count: descriptor_count.uniform_buffer_dynamic as _,
+ });
+ ranges.push(hal::pso::DescriptorRangeDesc {
+ ty: hal::pso::DescriptorType::Buffer {
+ ty: hal::pso::BufferDescriptorType::Storage { read_only: false },
+ format: hal::pso::BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ },
+ count: descriptor_count.storage_buffer_dynamic as _,
+ });
+ ranges.retain(|rd| rd.count != 0);
+
+ match hal::device::Device::create_descriptor_pool(
+ self.0,
+ max_sets as usize,
+ ranges,
+ hal::pso::DescriptorPoolCreateFlags::from_bits_truncate(flags.bits() as u32),
+ ) {
+ Ok(pool) => Ok(pool),
+ Err(hal::device::OutOfMemory::Host) => {
+ Err(gpu_descriptor::CreatePoolError::OutOfHostMemory)
+ }
+ Err(hal::device::OutOfMemory::Device) => {
+ Err(gpu_descriptor::CreatePoolError::OutOfDeviceMemory)
+ }
+ }
+ }
+
+ unsafe fn destroy_descriptor_pool(&self, pool: B::DescriptorPool) {
+ hal::device::Device::destroy_descriptor_pool(self.0, pool);
+ }
+
+ unsafe fn alloc_descriptor_sets<'a>(
+ &self,
+ pool: &mut B::DescriptorPool,
+ layouts: impl Iterator<Item = &'a B::DescriptorSetLayout>,
+ sets: &mut impl Extend<B::DescriptorSet>,
+ ) -> Result<(), gpu_descriptor::DeviceAllocationError> {
+ use gpu_descriptor::DeviceAllocationError as Dae;
+ match hal::pso::DescriptorPool::allocate(pool, layouts, sets) {
+ Ok(()) => Ok(()),
+ Err(hal::pso::AllocationError::OutOfMemory(oom)) => Err(match oom {
+ hal::device::OutOfMemory::Host => Dae::OutOfHostMemory,
+ hal::device::OutOfMemory::Device => Dae::OutOfDeviceMemory,
+ }),
+ Err(hal::pso::AllocationError::OutOfPoolMemory) => Err(Dae::OutOfPoolMemory),
+ Err(hal::pso::AllocationError::FragmentedPool) => Err(Dae::FragmentedPool),
+ Err(hal::pso::AllocationError::IncompatibleLayout) => {
+ panic!("Incompatible descriptor set layout")
+ }
+ }
+ }
+
+ unsafe fn dealloc_descriptor_sets<'a>(
+ &self,
+ pool: &mut B::DescriptorPool,
+ sets: impl Iterator<Item = B::DescriptorSet>,
+ ) {
+ hal::pso::DescriptorPool::free(pool, sets)
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/life.rs b/gfx/wgpu/wgpu-core/src/device/life.rs
new file mode 100644
index 0000000000..3c46b72362
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/life.rs
@@ -0,0 +1,760 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#[cfg(feature = "trace")]
+use crate::device::trace;
+use crate::{
+ device::{
+ alloc,
+ descriptor::{DescriptorAllocator, DescriptorSet},
+ queue::TempResource,
+ DeviceError,
+ },
+ hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token},
+ id, resource,
+ track::TrackerSet,
+ FastHashMap, RefCount, Stored, SubmissionIndex,
+};
+
+use copyless::VecHelper as _;
+use hal::device::Device as _;
+use parking_lot::Mutex;
+use thiserror::Error;
+
+use std::sync::atomic::Ordering;
+
+const CLEANUP_WAIT_MS: u64 = 5000;
+
+/// A struct that keeps lists of resources that are no longer needed by the user.
+#[derive(Debug, Default)]
+pub struct SuspectedResources {
+ pub(crate) buffers: Vec<id::Valid<id::BufferId>>,
+ pub(crate) textures: Vec<id::Valid<id::TextureId>>,
+ pub(crate) texture_views: Vec<id::Valid<id::TextureViewId>>,
+ pub(crate) samplers: Vec<id::Valid<id::SamplerId>>,
+ pub(crate) bind_groups: Vec<id::Valid<id::BindGroupId>>,
+ pub(crate) compute_pipelines: Vec<id::Valid<id::ComputePipelineId>>,
+ pub(crate) render_pipelines: Vec<id::Valid<id::RenderPipelineId>>,
+ pub(crate) bind_group_layouts: Vec<id::Valid<id::BindGroupLayoutId>>,
+ pub(crate) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>,
+ pub(crate) render_bundles: Vec<id::Valid<id::RenderBundleId>>,
+}
+
+impl SuspectedResources {
+ pub(crate) fn clear(&mut self) {
+ self.buffers.clear();
+ self.textures.clear();
+ self.texture_views.clear();
+ self.samplers.clear();
+ self.bind_groups.clear();
+ self.compute_pipelines.clear();
+ self.render_pipelines.clear();
+ self.bind_group_layouts.clear();
+ self.pipeline_layouts.clear();
+ self.render_bundles.clear();
+ }
+
+ pub(crate) fn extend(&mut self, other: &Self) {
+ self.buffers.extend_from_slice(&other.buffers);
+ self.textures.extend_from_slice(&other.textures);
+ self.texture_views.extend_from_slice(&other.texture_views);
+ self.samplers.extend_from_slice(&other.samplers);
+ self.bind_groups.extend_from_slice(&other.bind_groups);
+ self.compute_pipelines
+ .extend_from_slice(&other.compute_pipelines);
+ self.render_pipelines
+ .extend_from_slice(&other.render_pipelines);
+ self.bind_group_layouts
+ .extend_from_slice(&other.bind_group_layouts);
+ self.pipeline_layouts
+ .extend_from_slice(&other.pipeline_layouts);
+ self.render_bundles.extend_from_slice(&other.render_bundles);
+ }
+
+ pub(crate) fn add_trackers(&mut self, trackers: &TrackerSet) {
+ self.buffers.extend(trackers.buffers.used());
+ self.textures.extend(trackers.textures.used());
+ self.texture_views.extend(trackers.views.used());
+ self.samplers.extend(trackers.samplers.used());
+ self.bind_groups.extend(trackers.bind_groups.used());
+ self.compute_pipelines.extend(trackers.compute_pipes.used());
+ self.render_pipelines.extend(trackers.render_pipes.used());
+ self.render_bundles.extend(trackers.bundles.used());
+ }
+}
+
+/// A struct that keeps lists of resources that are no longer needed.
+#[derive(Debug)]
+struct NonReferencedResources<B: hal::Backend> {
+ buffers: Vec<(B::Buffer, alloc::MemoryBlock<B>)>,
+ images: Vec<(B::Image, alloc::MemoryBlock<B>)>,
+ // Note: we keep the associated ID here in order to be able to check
+ // at any point what resources are used in a submission.
+ image_views: Vec<(id::Valid<id::TextureViewId>, B::ImageView)>,
+ samplers: Vec<B::Sampler>,
+ framebuffers: Vec<B::Framebuffer>,
+ desc_sets: Vec<DescriptorSet<B>>,
+ compute_pipes: Vec<B::ComputePipeline>,
+ graphics_pipes: Vec<B::GraphicsPipeline>,
+ descriptor_set_layouts: Vec<B::DescriptorSetLayout>,
+ pipeline_layouts: Vec<B::PipelineLayout>,
+}
+
+impl<B: hal::Backend> NonReferencedResources<B> {
+ fn new() -> Self {
+ Self {
+ buffers: Vec::new(),
+ images: Vec::new(),
+ image_views: Vec::new(),
+ samplers: Vec::new(),
+ framebuffers: Vec::new(),
+ desc_sets: Vec::new(),
+ compute_pipes: Vec::new(),
+ graphics_pipes: Vec::new(),
+ descriptor_set_layouts: Vec::new(),
+ pipeline_layouts: Vec::new(),
+ }
+ }
+
+ fn extend(&mut self, other: Self) {
+ self.buffers.extend(other.buffers);
+ self.images.extend(other.images);
+ self.image_views.extend(other.image_views);
+ self.samplers.extend(other.samplers);
+ self.framebuffers.extend(other.framebuffers);
+ self.desc_sets.extend(other.desc_sets);
+ self.compute_pipes.extend(other.compute_pipes);
+ self.graphics_pipes.extend(other.graphics_pipes);
+ assert!(other.descriptor_set_layouts.is_empty());
+ assert!(other.pipeline_layouts.is_empty());
+ }
+
+ unsafe fn clean(
+ &mut self,
+ device: &B::Device,
+ memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<B>>,
+ descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
+ ) {
+ if !self.buffers.is_empty() || !self.images.is_empty() {
+ let mut allocator = memory_allocator_mutex.lock();
+ for (raw, memory) in self.buffers.drain(..) {
+ tracing::trace!("Buffer {:?} is destroyed with memory {:?}", raw, memory);
+ device.destroy_buffer(raw);
+ allocator.free(device, memory);
+ }
+ for (raw, memory) in self.images.drain(..) {
+ tracing::trace!("Image {:?} is destroyed with memory {:?}", raw, memory);
+ device.destroy_image(raw);
+ allocator.free(device, memory);
+ }
+ }
+
+ for (_, raw) in self.image_views.drain(..) {
+ device.destroy_image_view(raw);
+ }
+ for raw in self.samplers.drain(..) {
+ device.destroy_sampler(raw);
+ }
+ for raw in self.framebuffers.drain(..) {
+ device.destroy_framebuffer(raw);
+ }
+
+ if !self.desc_sets.is_empty() {
+ descriptor_allocator_mutex
+ .lock()
+ .free(device, self.desc_sets.drain(..));
+ }
+
+ for raw in self.compute_pipes.drain(..) {
+ device.destroy_compute_pipeline(raw);
+ }
+ for raw in self.graphics_pipes.drain(..) {
+ device.destroy_graphics_pipeline(raw);
+ }
+ for raw in self.descriptor_set_layouts.drain(..) {
+ device.destroy_descriptor_set_layout(raw);
+ }
+ for raw in self.pipeline_layouts.drain(..) {
+ device.destroy_pipeline_layout(raw);
+ }
+ }
+}
+
+#[derive(Debug)]
+struct ActiveSubmission<B: hal::Backend> {
+ index: SubmissionIndex,
+ fence: B::Fence,
+ last_resources: NonReferencedResources<B>,
+ mapped: Vec<id::Valid<id::BufferId>>,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum WaitIdleError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("GPU got stuck :(")]
+ StuckGpu,
+}
+
+/// A struct responsible for tracking resource lifetimes.
+///
+/// Here is how host mapping is handled:
+/// 1. When mapping is requested we add the buffer to the life_tracker list of `mapped` buffers.
+/// 2. When `triage_suspected` is called, it checks the last submission index associated with each of the mapped buffer,
+/// and register the buffer with either a submission in flight, or straight into `ready_to_map` vector.
+/// 3. When `ActiveSubmission` is retired, the mapped buffers associated with it are moved to `ready_to_map` vector.
+/// 4. Finally, `handle_mapping` issues all the callbacks.
+#[derive(Debug)]
+pub(crate) struct LifetimeTracker<B: hal::Backend> {
+ /// Resources that the user has requested be mapped, but are still in use.
+ mapped: Vec<Stored<id::BufferId>>,
+ /// Buffers can be used in a submission that is yet to be made, by the
+ /// means of `write_buffer()`, so we have a special place for them.
+ pub future_suspected_buffers: Vec<Stored<id::BufferId>>,
+ /// Textures can be used in the upcoming submission by `write_texture`.
+ pub future_suspected_textures: Vec<Stored<id::TextureId>>,
+ /// Resources that are suspected for destruction.
+ pub suspected_resources: SuspectedResources,
+ /// Resources that are not referenced any more but still used by GPU.
+ /// Grouped by submissions associated with a fence and a submission index.
+ /// The active submissions have to be stored in FIFO order: oldest come first.
+ active: Vec<ActiveSubmission<B>>,
+ /// Resources that are neither referenced or used, just life_tracker
+ /// actual deletion.
+ free_resources: NonReferencedResources<B>,
+ ready_to_map: Vec<id::Valid<id::BufferId>>,
+}
+
+impl<B: hal::Backend> LifetimeTracker<B> {
+ pub fn new() -> Self {
+ Self {
+ mapped: Vec::new(),
+ future_suspected_buffers: Vec::new(),
+ future_suspected_textures: Vec::new(),
+ suspected_resources: SuspectedResources::default(),
+ active: Vec::new(),
+ free_resources: NonReferencedResources::new(),
+ ready_to_map: Vec::new(),
+ }
+ }
+
+ pub fn track_submission(
+ &mut self,
+ index: SubmissionIndex,
+ fence: B::Fence,
+ new_suspects: &SuspectedResources,
+ temp_resources: impl Iterator<Item = (TempResource<B>, alloc::MemoryBlock<B>)>,
+ ) {
+ let mut last_resources = NonReferencedResources::new();
+ for (res, memory) in temp_resources {
+ match res {
+ TempResource::Buffer(raw) => last_resources.buffers.push((raw, memory)),
+ TempResource::Image(raw) => last_resources.images.push((raw, memory)),
+ }
+ }
+
+ self.suspected_resources.buffers.extend(
+ self.future_suspected_buffers
+ .drain(..)
+ .map(|stored| stored.value),
+ );
+ self.suspected_resources.textures.extend(
+ self.future_suspected_textures
+ .drain(..)
+ .map(|stored| stored.value),
+ );
+ self.suspected_resources.extend(new_suspects);
+
+ self.active.alloc().init(ActiveSubmission {
+ index,
+ fence,
+ last_resources,
+ mapped: Vec::new(),
+ });
+ }
+
+ pub(crate) fn map(&mut self, value: id::Valid<id::BufferId>, ref_count: RefCount) {
+ self.mapped.push(Stored { value, ref_count });
+ }
+
+ fn wait_idle(&self, device: &B::Device) -> Result<(), WaitIdleError> {
+ if !self.active.is_empty() {
+ tracing::debug!("Waiting for IDLE...");
+ let status = unsafe {
+ device
+ .wait_for_fences(
+ self.active.iter().map(|a| &a.fence),
+ hal::device::WaitFor::All,
+ CLEANUP_WAIT_MS * 1_000_000,
+ )
+ .map_err(DeviceError::from)?
+ };
+ tracing::debug!("...Done");
+
+ if status == false {
+ // We timed out while waiting for the fences
+ return Err(WaitIdleError::StuckGpu);
+ }
+ }
+ Ok(())
+ }
+
+ /// Returns the last submission index that is done.
+ pub fn triage_submissions(
+ &mut self,
+ device: &B::Device,
+ force_wait: bool,
+ ) -> Result<SubmissionIndex, WaitIdleError> {
+ if force_wait {
+ self.wait_idle(device)?;
+ }
+ //TODO: enable when `is_sorted_by_key` is stable
+ //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
+ let done_count = self
+ .active
+ .iter()
+ .position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap_or(false) })
+ .unwrap_or_else(|| self.active.len());
+ let last_done = if done_count != 0 {
+ self.active[done_count - 1].index
+ } else {
+ return Ok(0);
+ };
+
+ for a in self.active.drain(..done_count) {
+ tracing::trace!("Active submission {} is done", a.index);
+ self.free_resources.extend(a.last_resources);
+ self.ready_to_map.extend(a.mapped);
+ unsafe {
+ device.destroy_fence(a.fence);
+ }
+ }
+
+ Ok(last_done)
+ }
+
+ pub fn cleanup(
+ &mut self,
+ device: &B::Device,
+ memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<B>>,
+ descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
+ ) {
+ unsafe {
+ self.free_resources
+ .clean(device, memory_allocator_mutex, descriptor_allocator_mutex);
+ descriptor_allocator_mutex.lock().cleanup(device);
+ }
+ }
+
+ pub fn schedule_resource_destruction(
+ &mut self,
+ temp_resource: TempResource<B>,
+ memory: alloc::MemoryBlock<B>,
+ last_submit_index: SubmissionIndex,
+ ) {
+ let resources = self
+ .active
+ .iter_mut()
+ .find(|a| a.index == last_submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources);
+ match temp_resource {
+ TempResource::Buffer(raw) => resources.buffers.push((raw, memory)),
+ TempResource::Image(raw) => resources.images.push((raw, memory)),
+ }
+ }
+}
+
+impl<B: GfxBackend> LifetimeTracker<B> {
+ pub(crate) fn triage_suspected<G: GlobalIdentityHandlerFactory>(
+ &mut self,
+ hub: &Hub<B, G>,
+ trackers: &Mutex<TrackerSet>,
+ #[cfg(feature = "trace")] trace: Option<&Mutex<trace::Trace>>,
+ token: &mut Token<super::Device<B>>,
+ ) {
+ if !self.suspected_resources.render_bundles.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.render_bundles.write(token);
+
+ while let Some(id) = self.suspected_resources.render_bundles.pop() {
+ if trackers.bundles.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyRenderBundle(id.0)));
+
+ if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
+ self.suspected_resources.add_trackers(&res.used);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.bind_groups.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.bind_groups.write(token);
+
+ while let Some(id) = self.suspected_resources.bind_groups.pop() {
+ if trackers.bind_groups.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyBindGroup(id.0)));
+
+ if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
+ self.suspected_resources.add_trackers(&res.used);
+
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .desc_sets
+ .push(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.texture_views.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.texture_views.write(token);
+
+ for id in self.suspected_resources.texture_views.drain(..) {
+ if trackers.views.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyTextureView(id.0)));
+
+ if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
+ let raw = match res.inner {
+ resource::TextureViewInner::Native { raw, source_id } => {
+ self.suspected_resources.textures.push(source_id.value);
+ raw
+ }
+ resource::TextureViewInner::SwapChain { .. } => unreachable!(),
+ };
+
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .image_views
+ .push((id, raw));
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.textures.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.textures.write(token);
+
+ for id in self.suspected_resources.textures.drain(..) {
+ if trackers.textures.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyTexture(id.0)));
+
+ if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .images
+ .extend(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.samplers.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.samplers.write(token);
+
+ for id in self.suspected_resources.samplers.drain(..) {
+ if trackers.samplers.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroySampler(id.0)));
+
+ if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .samplers
+ .push(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.buffers.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.buffers.write(token);
+
+ for id in self.suspected_resources.buffers.drain(..) {
+ if trackers.buffers.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyBuffer(id.0)));
+ tracing::debug!("Buffer {:?} is detached", id);
+
+ if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .buffers
+ .extend(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.compute_pipelines.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.compute_pipelines.write(token);
+
+ for id in self.suspected_resources.compute_pipelines.drain(..) {
+ if trackers.compute_pipes.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
+
+ if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .compute_pipes
+ .push(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.render_pipelines.is_empty() {
+ let mut trackers = trackers.lock();
+ let (mut guard, _) = hub.render_pipelines.write(token);
+
+ for id in self.suspected_resources.render_pipelines.drain(..) {
+ if trackers.render_pipes.remove_abandoned(id) {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyRenderPipeline(id.0)));
+
+ if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) {
+ let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .graphics_pipes
+ .push(res.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.pipeline_layouts.is_empty() {
+ let (mut guard, _) = hub.pipeline_layouts.write(token);
+
+ for Stored {
+ value: id,
+ ref_count,
+ } in self.suspected_resources.pipeline_layouts.drain(..)
+ {
+ //Note: this has to happen after all the suspected pipelines are destroyed
+ if ref_count.load() == 1 {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyPipelineLayout(id.0)));
+
+ if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) {
+ self.suspected_resources
+ .bind_group_layouts
+ .extend_from_slice(&lay.bind_group_layout_ids);
+ self.free_resources.pipeline_layouts.push(lay.raw);
+ }
+ }
+ }
+ }
+
+ if !self.suspected_resources.bind_group_layouts.is_empty() {
+ let (mut guard, _) = hub.bind_group_layouts.write(token);
+
+ for id in self.suspected_resources.bind_group_layouts.drain(..) {
+ //Note: this has to happen after all the suspected pipelines are destroyed
+ //Note: nothing else can bump the refcount since the guard is locked exclusively
+ //Note: same BGL can appear multiple times in the list, but only the last
+ // encounter could drop the refcount to 0.
+ if guard[id].multi_ref_count.dec_and_check_empty() {
+ #[cfg(feature = "trace")]
+ trace.map(|t| t.lock().add(trace::Action::DestroyBindGroupLayout(id.0)));
+ if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) {
+ self.free_resources.descriptor_set_layouts.push(lay.raw);
+ }
+ }
+ }
+ }
+ }
+
+ pub(crate) fn triage_mapped<G: GlobalIdentityHandlerFactory>(
+ &mut self,
+ hub: &Hub<B, G>,
+ token: &mut Token<super::Device<B>>,
+ ) {
+ if self.mapped.is_empty() {
+ return;
+ }
+ let (buffer_guard, _) = hub.buffers.read(token);
+
+ for stored in self.mapped.drain(..) {
+ let resource_id = stored.value;
+ let buf = &buffer_guard[resource_id];
+
+ let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire);
+ tracing::trace!(
+ "Mapping of {:?} at submission {:?} gets assigned to active {:?}",
+ resource_id,
+ submit_index,
+ self.active.iter().position(|a| a.index == submit_index)
+ );
+
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
+ .push(resource_id);
+ }
+ }
+
+ pub(crate) fn triage_framebuffers<G: GlobalIdentityHandlerFactory>(
+ &mut self,
+ hub: &Hub<B, G>,
+ framebuffers: &mut FastHashMap<super::FramebufferKey, B::Framebuffer>,
+ token: &mut Token<super::Device<B>>,
+ ) {
+ let (texture_view_guard, _) = hub.texture_views.read(token);
+ let remove_list = framebuffers
+ .keys()
+ .filter_map(|key| {
+ let mut last_submit = None;
+ let mut needs_cleanup = false;
+
+ // A framebuffer needs to be scheduled for cleanup, if there's at least one
+ // attachment is no longer valid.
+
+ for &at in key.all() {
+ // If this attachment is still registered, it's still valid
+ if texture_view_guard.contains(at.0) {
+ continue;
+ }
+
+ // This attachment is no longer registered, this framebuffer needs cleanup
+ needs_cleanup = true;
+
+ // Check if there's any active submissions that are still referring to this
+ // attachment, if there are we need to get the greatest submission index, as
+ // that's the last time this attachment is still valid
+ let mut attachment_last_submit = None;
+ for a in &self.active {
+ if a.last_resources.image_views.iter().any(|&(id, _)| id == at) {
+ let max = attachment_last_submit.unwrap_or(0).max(a.index);
+ attachment_last_submit = Some(max);
+ }
+ }
+
+ // Between all attachments, we need the smallest index, because that's the last
+ // time this framebuffer is still valid
+ if let Some(attachment_last_submit) = attachment_last_submit {
+ let min = last_submit
+ .unwrap_or(std::usize::MAX)
+ .min(attachment_last_submit);
+ last_submit = Some(min);
+ }
+ }
+
+ if needs_cleanup {
+ Some((key.clone(), last_submit.unwrap_or(0)))
+ } else {
+ None
+ }
+ })
+ .collect::<FastHashMap<_, _>>();
+
+ if !remove_list.is_empty() {
+ tracing::debug!("Free framebuffers {:?}", remove_list);
+ for (ref key, submit_index) in remove_list {
+ let framebuffer = framebuffers.remove(key).unwrap();
+ self.active
+ .iter_mut()
+ .find(|a| a.index == submit_index)
+ .map_or(&mut self.free_resources, |a| &mut a.last_resources)
+ .framebuffers
+ .push(framebuffer);
+ }
+ }
+ }
+
+ pub(crate) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
+ &mut self,
+ hub: &Hub<B, G>,
+ raw: &B::Device,
+ trackers: &Mutex<TrackerSet>,
+ token: &mut Token<super::Device<B>>,
+ ) -> Vec<super::BufferMapPendingCallback> {
+ if self.ready_to_map.is_empty() {
+ return Vec::new();
+ }
+ let (mut buffer_guard, _) = hub.buffers.write(token);
+ let mut pending_callbacks: Vec<super::BufferMapPendingCallback> =
+ Vec::with_capacity(self.ready_to_map.len());
+ let mut trackers = trackers.lock();
+ for buffer_id in self.ready_to_map.drain(..) {
+ let buffer = &mut buffer_guard[buffer_id];
+ if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id)
+ {
+ buffer.map_state = resource::BufferMapState::Idle;
+ tracing::debug!("Mapping request is dropped because the buffer is destroyed.");
+ if let Some(buf) = hub
+ .buffers
+ .unregister_locked(buffer_id.0, &mut *buffer_guard)
+ {
+ self.free_resources.buffers.extend(buf.raw);
+ }
+ } else {
+ let mapping = match std::mem::replace(
+ &mut buffer.map_state,
+ resource::BufferMapState::Idle,
+ ) {
+ resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
+ _ => panic!("No pending mapping."),
+ };
+ let status = if mapping.range.start != mapping.range.end {
+ tracing::debug!("Buffer {:?} map state -> Active", buffer_id);
+ let host = mapping.op.host;
+ let size = mapping.range.end - mapping.range.start;
+ match super::map_buffer(raw, buffer, mapping.range.start, size, host) {
+ Ok(ptr) => {
+ buffer.map_state = resource::BufferMapState::Active {
+ ptr,
+ sub_range: hal::buffer::SubRange {
+ offset: mapping.range.start,
+ size: Some(size),
+ },
+ host,
+ };
+ resource::BufferMapAsyncStatus::Success
+ }
+ Err(e) => {
+ tracing::error!("Mapping failed {:?}", e);
+ resource::BufferMapAsyncStatus::Error
+ }
+ }
+ } else {
+ resource::BufferMapAsyncStatus::Success
+ };
+ pending_callbacks.push((mapping.op, status));
+ }
+ }
+ pending_callbacks
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/mod.rs b/gfx/wgpu/wgpu-core/src/device/mod.rs
new file mode 100644
index 0000000000..b1b7105fe3
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/mod.rs
@@ -0,0 +1,4217 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ binding_model, command, conv,
+ device::life::WaitIdleError,
+ hub::{
+ GfxBackend, Global, GlobalIdentityHandlerFactory, Hub, Input, InvalidId, Storage, Token,
+ },
+ id, pipeline, resource, span, swap_chain,
+ track::{BufferState, TextureSelector, TextureState, TrackerSet},
+ validation::{self, check_buffer_usage, check_texture_usage},
+ FastHashMap, Label, LabelHelpers, LifeGuard, MultiRefCount, PrivateFeatures, Stored,
+ SubmissionIndex, MAX_BIND_GROUPS,
+};
+
+use arrayvec::ArrayVec;
+use copyless::VecHelper as _;
+use hal::{
+ command::CommandBuffer as _,
+ device::Device as _,
+ window::{PresentationSurface as _, Surface as _},
+};
+use parking_lot::{Mutex, MutexGuard};
+use thiserror::Error;
+use wgt::{
+ BufferAddress, BufferSize, InputStepMode, TextureDimension, TextureFormat, TextureViewDimension,
+};
+
+use std::{
+ borrow::Cow,
+ collections::{hash_map::Entry, BTreeMap},
+ iter,
+ marker::PhantomData,
+ mem,
+ ops::Range,
+ ptr,
+ sync::atomic::Ordering,
+};
+
+pub mod alloc;
+pub mod descriptor;
+mod life;
+mod queue;
+#[cfg(any(feature = "trace", feature = "replay"))]
+pub mod trace;
+
+use smallvec::SmallVec;
+#[cfg(feature = "trace")]
+use trace::{Action, Trace};
+
+pub const MAX_COLOR_TARGETS: usize = 4;
+pub const MAX_MIP_LEVELS: u32 = 16;
+pub const MAX_VERTEX_BUFFERS: usize = 16;
+pub const MAX_ANISOTROPY: u8 = 16;
+pub const SHADER_STAGE_COUNT: usize = 3;
+
+pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
+
+pub fn all_buffer_stages() -> hal::pso::PipelineStage {
+ use hal::pso::PipelineStage as Ps;
+ Ps::DRAW_INDIRECT
+ | Ps::VERTEX_INPUT
+ | Ps::VERTEX_SHADER
+ | Ps::FRAGMENT_SHADER
+ | Ps::COMPUTE_SHADER
+ | Ps::TRANSFER
+ | Ps::HOST
+}
+pub fn all_image_stages() -> hal::pso::PipelineStage {
+ use hal::pso::PipelineStage as Ps;
+ Ps::EARLY_FRAGMENT_TESTS
+ | Ps::LATE_FRAGMENT_TESTS
+ | Ps::COLOR_ATTACHMENT_OUTPUT
+ | Ps::VERTEX_SHADER
+ | Ps::FRAGMENT_SHADER
+ | Ps::COMPUTE_SHADER
+ | Ps::TRANSFER
+}
+
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum HostMap {
+ Read,
+ Write,
+}
+
+#[derive(Clone, Debug, Hash, PartialEq)]
+#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
+pub(crate) struct AttachmentData<T> {
+ pub colors: ArrayVec<[T; MAX_COLOR_TARGETS]>,
+ pub resolves: ArrayVec<[T; MAX_COLOR_TARGETS]>,
+ pub depth_stencil: Option<T>,
+}
+impl<T: PartialEq> Eq for AttachmentData<T> {}
+impl<T> AttachmentData<T> {
+ pub(crate) fn all(&self) -> impl Iterator<Item = &T> {
+ self.colors
+ .iter()
+ .chain(&self.resolves)
+ .chain(&self.depth_stencil)
+ }
+}
+
+pub(crate) type AttachmentDataVec<T> = ArrayVec<[T; MAX_COLOR_TARGETS + MAX_COLOR_TARGETS + 1]>;
+
+pub(crate) type RenderPassKey = AttachmentData<(hal::pass::Attachment, hal::image::Layout)>;
+pub(crate) type FramebufferKey = AttachmentData<id::Valid<id::TextureViewId>>;
+
+#[derive(Clone, Debug, Hash, PartialEq)]
+#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
+pub(crate) struct RenderPassContext {
+ pub attachments: AttachmentData<TextureFormat>,
+ pub sample_count: u8,
+}
+#[derive(Clone, Debug, Error)]
+pub enum RenderPassCompatibilityError {
+ #[error("Incompatible color attachment: {0:?} != {1:?}")]
+ IncompatibleColorAttachment(
+ ArrayVec<[TextureFormat; MAX_COLOR_TARGETS]>,
+ ArrayVec<[TextureFormat; MAX_COLOR_TARGETS]>,
+ ),
+ #[error("Incompatible depth-stencil attachment: {0:?} != {1:?}")]
+ IncompatibleDepthStencilAttachment(Option<TextureFormat>, Option<TextureFormat>),
+ #[error("Incompatible sample count: {0:?} != {1:?}")]
+ IncompatibleSampleCount(u8, u8),
+}
+
+impl RenderPassContext {
+ // Assumed the renderpass only contains one subpass
+ pub(crate) fn check_compatible(
+ &self,
+ other: &RenderPassContext,
+ ) -> Result<(), RenderPassCompatibilityError> {
+ if self.attachments.colors != other.attachments.colors {
+ return Err(RenderPassCompatibilityError::IncompatibleColorAttachment(
+ self.attachments.colors.clone(),
+ other.attachments.colors.clone(),
+ ));
+ }
+ if self.attachments.depth_stencil != other.attachments.depth_stencil {
+ return Err(
+ RenderPassCompatibilityError::IncompatibleDepthStencilAttachment(
+ self.attachments.depth_stencil.clone(),
+ other.attachments.depth_stencil.clone(),
+ ),
+ );
+ }
+ if self.sample_count != other.sample_count {
+ return Err(RenderPassCompatibilityError::IncompatibleSampleCount(
+ self.sample_count,
+ other.sample_count,
+ ));
+ }
+ Ok(())
+ }
+}
+
+type BufferMapPendingCallback = (resource::BufferMapOperation, resource::BufferMapAsyncStatus);
+
+fn map_buffer<B: hal::Backend>(
+ raw: &B::Device,
+ buffer: &mut resource::Buffer<B>,
+ offset: hal::buffer::Offset,
+ size: BufferAddress,
+ kind: HostMap,
+) -> Result<ptr::NonNull<u8>, resource::BufferAccessError> {
+ let &mut (_, ref mut block) = buffer
+ .raw
+ .as_mut()
+ .ok_or(resource::BufferAccessError::Destroyed)?;
+ let ptr = block.map(raw, offset, size).map_err(DeviceError::from)?;
+
+ buffer.sync_mapped_writes = match kind {
+ HostMap::Read if !block.is_coherent() => {
+ block.invalidate_range(raw, offset, Some(size))?;
+ None
+ }
+ HostMap::Write if !block.is_coherent() => Some(hal::memory::Segment {
+ offset,
+ size: Some(size),
+ }),
+ _ => None,
+ };
+ Ok(ptr)
+}
+
+fn unmap_buffer<B: hal::Backend>(
+ raw: &B::Device,
+ buffer: &mut resource::Buffer<B>,
+) -> Result<(), resource::BufferAccessError> {
+ let &mut (_, ref mut block) = buffer
+ .raw
+ .as_mut()
+ .ok_or(resource::BufferAccessError::Destroyed)?;
+ if let Some(segment) = buffer.sync_mapped_writes.take() {
+ block.flush_range(raw, segment.offset, segment.size)?;
+ }
+ block.unmap(raw);
+ Ok(())
+}
+
+//Note: this logic is specifically moved out of `handle_mapping()` in order to
+// have nothing locked by the time we execute users callback code.
+fn fire_map_callbacks<I: IntoIterator<Item = BufferMapPendingCallback>>(callbacks: I) {
+ for (operation, status) in callbacks {
+ unsafe { (operation.callback)(status, operation.user_data) }
+ }
+}
+
+#[derive(Debug)]
+pub struct Device<B: hal::Backend> {
+ pub(crate) raw: B::Device,
+ pub(crate) adapter_id: Stored<id::AdapterId>,
+ pub(crate) queue_group: hal::queue::QueueGroup<B>,
+ pub(crate) cmd_allocator: command::CommandAllocator<B>,
+ mem_allocator: Mutex<alloc::MemoryAllocator<B>>,
+ desc_allocator: Mutex<descriptor::DescriptorAllocator<B>>,
+ //Note: The submission index here corresponds to the last submission that is done.
+ pub(crate) life_guard: LifeGuard,
+ pub(crate) active_submission_index: SubmissionIndex,
+ pub(crate) trackers: Mutex<TrackerSet>,
+ pub(crate) render_passes: Mutex<FastHashMap<RenderPassKey, B::RenderPass>>,
+ pub(crate) framebuffers: Mutex<FastHashMap<FramebufferKey, B::Framebuffer>>,
+ // Life tracker should be locked right after the device and before anything else.
+ life_tracker: Mutex<life::LifetimeTracker<B>>,
+ temp_suspected: life::SuspectedResources,
+ pub(crate) hal_limits: hal::Limits,
+ pub(crate) private_features: PrivateFeatures,
+ pub(crate) limits: wgt::Limits,
+ pub(crate) features: wgt::Features,
+ //TODO: move this behind another mutex. This would allow several methods to switch
+ // to borrow Device immutably, such as `write_buffer`, `write_texture`, and `buffer_unmap`.
+ pending_writes: queue::PendingWrites<B>,
+ #[cfg(feature = "trace")]
+ pub(crate) trace: Option<Mutex<Trace>>,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateDeviceError {
+ #[error("not enough memory left")]
+ OutOfMemory,
+}
+
+impl<B: GfxBackend> Device<B> {
+ pub(crate) fn new(
+ raw: B::Device,
+ adapter_id: Stored<id::AdapterId>,
+ queue_group: hal::queue::QueueGroup<B>,
+ mem_props: hal::adapter::MemoryProperties,
+ hal_limits: hal::Limits,
+ private_features: PrivateFeatures,
+ desc: &DeviceDescriptor,
+ trace_path: Option<&std::path::Path>,
+ ) -> Result<Self, CreateDeviceError> {
+ let cmd_allocator = command::CommandAllocator::new(queue_group.family, &raw)
+ .or(Err(CreateDeviceError::OutOfMemory))?;
+
+ let mem_allocator = alloc::MemoryAllocator::new(mem_props, hal_limits);
+ let descriptors = descriptor::DescriptorAllocator::new();
+ #[cfg(not(feature = "trace"))]
+ match trace_path {
+ Some(_) => tracing::error!("Feature 'trace' is not enabled"),
+ None => (),
+ }
+
+ Ok(Self {
+ raw,
+ adapter_id,
+ cmd_allocator,
+ mem_allocator: Mutex::new(mem_allocator),
+ desc_allocator: Mutex::new(descriptors),
+ queue_group,
+ life_guard: LifeGuard::new("<device>"),
+ active_submission_index: 0,
+ trackers: Mutex::new(TrackerSet::new(B::VARIANT)),
+ render_passes: Mutex::new(FastHashMap::default()),
+ framebuffers: Mutex::new(FastHashMap::default()),
+ life_tracker: Mutex::new(life::LifetimeTracker::new()),
+ temp_suspected: life::SuspectedResources::default(),
+ #[cfg(feature = "trace")]
+ trace: trace_path.and_then(|path| match Trace::new(path) {
+ Ok(mut trace) => {
+ trace.add(Action::Init {
+ desc: desc.clone(),
+ backend: B::VARIANT,
+ });
+ Some(Mutex::new(trace))
+ }
+ Err(e) => {
+ tracing::error!("Unable to start a trace in '{:?}': {:?}", path, e);
+ None
+ }
+ }),
+ hal_limits,
+ private_features,
+ limits: desc.limits.clone(),
+ features: desc.features.clone(),
+ pending_writes: queue::PendingWrites::new(),
+ })
+ }
+
+ pub(crate) fn last_completed_submission_index(&self) -> SubmissionIndex {
+ self.life_guard.submission_index.load(Ordering::Acquire)
+ }
+
+ fn lock_life_internal<'this, 'token: 'this>(
+ tracker: &'this Mutex<life::LifetimeTracker<B>>,
+ _token: &mut Token<'token, Self>,
+ ) -> MutexGuard<'this, life::LifetimeTracker<B>> {
+ tracker.lock()
+ }
+
+ pub(crate) fn lock_life<'this, 'token: 'this>(
+ &'this self,
+ //TODO: fix this - the token has to be borrowed for the lock
+ token: &mut Token<'token, Self>,
+ ) -> MutexGuard<'this, life::LifetimeTracker<B>> {
+ Self::lock_life_internal(&self.life_tracker, token)
+ }
+
+ fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
+ &'this self,
+ hub: &Hub<B, G>,
+ force_wait: bool,
+ token: &mut Token<'token, Self>,
+ ) -> Result<Vec<BufferMapPendingCallback>, WaitIdleError> {
+ let mut life_tracker = self.lock_life(token);
+
+ life_tracker.triage_suspected(
+ hub,
+ &self.trackers,
+ #[cfg(feature = "trace")]
+ self.trace.as_ref(),
+ token,
+ );
+ life_tracker.triage_mapped(hub, token);
+ life_tracker.triage_framebuffers(hub, &mut *self.framebuffers.lock(), token);
+ let last_done = life_tracker.triage_submissions(&self.raw, force_wait)?;
+ let callbacks = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token);
+ life_tracker.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator);
+
+ self.life_guard
+ .submission_index
+ .store(last_done, Ordering::Release);
+ self.cmd_allocator.maintain(&self.raw, last_done);
+ Ok(callbacks)
+ }
+
+ fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
+ &'this mut self,
+ hub: &Hub<B, G>,
+ trackers: &TrackerSet,
+ mut token: &mut Token<'token, Self>,
+ ) {
+ self.temp_suspected.clear();
+ // As the tracker is cleared/dropped, we need to consider all the resources
+ // that it references for destruction in the next GC pass.
+ {
+ let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
+ let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
+ let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
+ let (buffer_guard, mut token) = hub.buffers.read(&mut token);
+ let (texture_guard, mut token) = hub.textures.read(&mut token);
+ let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
+ let (sampler_guard, _) = hub.samplers.read(&mut token);
+
+ for id in trackers.buffers.used() {
+ if buffer_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.buffers.push(id);
+ }
+ }
+ for id in trackers.textures.used() {
+ if texture_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.textures.push(id);
+ }
+ }
+ for id in trackers.views.used() {
+ if texture_view_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.texture_views.push(id);
+ }
+ }
+ for id in trackers.bind_groups.used() {
+ if bind_group_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.bind_groups.push(id);
+ }
+ }
+ for id in trackers.samplers.used() {
+ if sampler_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.samplers.push(id);
+ }
+ }
+ for id in trackers.compute_pipes.used() {
+ if compute_pipe_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.compute_pipelines.push(id);
+ }
+ }
+ for id in trackers.render_pipes.used() {
+ if render_pipe_guard[id].life_guard.ref_count.is_none() {
+ self.temp_suspected.render_pipelines.push(id);
+ }
+ }
+ }
+
+ self.lock_life(&mut token)
+ .suspected_resources
+ .extend(&self.temp_suspected);
+ }
+
+ fn create_buffer(
+ &self,
+ self_id: id::DeviceId,
+ desc: &resource::BufferDescriptor,
+ transient: bool,
+ ) -> Result<resource::Buffer<B>, resource::CreateBufferError> {
+ debug_assert_eq!(self_id.backend(), B::VARIANT);
+ let (mut usage, _memory_properties) = conv::map_buffer_usage(desc.usage);
+ if desc.mapped_at_creation {
+ if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ return Err(resource::CreateBufferError::UnalignedSize);
+ }
+ if !desc.usage.contains(wgt::BufferUsage::MAP_WRITE) {
+ // we are going to be copying into it, internally
+ usage |= hal::buffer::Usage::TRANSFER_DST;
+ }
+ }
+
+ let mem_usage = {
+ use gpu_alloc::UsageFlags as Uf;
+ use wgt::BufferUsage as Bu;
+
+ let mut flags = Uf::empty();
+ let map_flags = desc.usage & (Bu::MAP_READ | Bu::MAP_WRITE);
+ if !(desc.usage - map_flags).is_empty() {
+ flags |= Uf::FAST_DEVICE_ACCESS;
+ }
+ if transient {
+ flags |= Uf::TRANSIENT;
+ }
+
+ if !map_flags.is_empty() {
+ let upload_usage = Bu::MAP_WRITE | Bu::COPY_SRC;
+ let download_usage = Bu::MAP_READ | Bu::COPY_DST;
+
+ flags |= Uf::HOST_ACCESS;
+ if desc.usage.contains(upload_usage) {
+ flags |= Uf::UPLOAD;
+ }
+ if desc.usage.contains(download_usage) {
+ flags |= Uf::DOWNLOAD;
+ }
+
+ let is_native_only = self
+ .features
+ .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS);
+ if !is_native_only
+ && !upload_usage.contains(desc.usage)
+ && !download_usage.contains(desc.usage)
+ {
+ return Err(resource::CreateBufferError::UsageMismatch(desc.usage));
+ }
+ }
+
+ flags
+ };
+
+ let mut buffer = unsafe { self.raw.create_buffer(desc.size.max(1), usage) }.map_err(
+ |err| match err {
+ hal::buffer::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create buffer: {}", err),
+ },
+ )?;
+ if let Some(ref label) = desc.label {
+ unsafe { self.raw.set_buffer_name(&mut buffer, label) };
+ }
+
+ let requirements = unsafe { self.raw.get_buffer_requirements(&buffer) };
+ let block = self
+ .mem_allocator
+ .lock()
+ .allocate(&self.raw, requirements, mem_usage)?;
+ block.bind_buffer(&self.raw, &mut buffer)?;
+
+ Ok(resource::Buffer {
+ raw: Some((buffer, block)),
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ usage: desc.usage,
+ size: desc.size,
+ full_range: (),
+ sync_mapped_writes: None,
+ map_state: resource::BufferMapState::Idle,
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ })
+ }
+
+ fn create_texture(
+ &self,
+ self_id: id::DeviceId,
+ desc: &resource::TextureDescriptor,
+ ) -> Result<resource::Texture<B>, resource::CreateTextureError> {
+ debug_assert_eq!(self_id.backend(), B::VARIANT);
+
+ let features = conv::texture_features(desc.format);
+ if !self.features.contains(features) {
+ return Err(resource::CreateTextureError::MissingFeature(
+ features,
+ desc.format,
+ ));
+ }
+
+ // Ensure `D24Plus` textures cannot be copied
+ match desc.format {
+ TextureFormat::Depth24Plus | TextureFormat::Depth24PlusStencil8 => {
+ if desc
+ .usage
+ .intersects(wgt::TextureUsage::COPY_SRC | wgt::TextureUsage::COPY_DST)
+ {
+ return Err(resource::CreateTextureError::CannotCopyD24Plus);
+ }
+ }
+ _ => {}
+ }
+
+ let kind = conv::map_texture_dimension_size(desc.dimension, desc.size, desc.sample_count)?;
+ let format = conv::map_texture_format(desc.format, self.private_features);
+ let aspects = format.surface_desc().aspects;
+ let usage = conv::map_texture_usage(desc.usage, aspects);
+
+ let mip_level_count = desc.mip_level_count;
+ if mip_level_count == 0
+ || mip_level_count > MAX_MIP_LEVELS
+ || mip_level_count > kind.compute_num_levels() as u32
+ {
+ return Err(resource::CreateTextureError::InvalidMipLevelCount(
+ mip_level_count,
+ ));
+ }
+ let mut view_capabilities = hal::image::ViewCapabilities::empty();
+
+ // 2D textures with array layer counts that are multiples of 6 could be cubemaps
+ // Following gpuweb/gpuweb#68 always add the hint in that case
+ if desc.dimension == TextureDimension::D2 && desc.size.depth % 6 == 0 {
+ view_capabilities |= hal::image::ViewCapabilities::KIND_CUBE;
+ };
+
+ // TODO: 2D arrays, cubemap arrays
+
+ let mut image = unsafe {
+ let mut image = self
+ .raw
+ .create_image(
+ kind,
+ desc.mip_level_count as hal::image::Level,
+ format,
+ hal::image::Tiling::Optimal,
+ usage,
+ view_capabilities,
+ )
+ .map_err(|err| match err {
+ hal::image::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create texture: {}", err),
+ })?;
+ if let Some(ref label) = desc.label {
+ self.raw.set_image_name(&mut image, label);
+ }
+ image
+ };
+
+ let requirements = unsafe { self.raw.get_image_requirements(&image) };
+ let block = self.mem_allocator.lock().allocate(
+ &self.raw,
+ requirements,
+ gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS,
+ )?;
+ block.bind_image(&self.raw, &mut image)?;
+
+ Ok(resource::Texture {
+ raw: Some((image, block)),
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ usage: desc.usage,
+ aspects,
+ dimension: desc.dimension,
+ kind,
+ format: desc.format,
+ full_range: TextureSelector {
+ levels: 0..desc.mip_level_count as hal::image::Level,
+ layers: 0..kind.num_layers(),
+ },
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ })
+ }
+
+ fn create_texture_view(
+ &self,
+ texture: &resource::Texture<B>,
+ texture_id: id::TextureId,
+ desc: &resource::TextureViewDescriptor,
+ ) -> Result<resource::TextureView<B>, resource::CreateTextureViewError> {
+ let &(ref texture_raw, _) = texture
+ .raw
+ .as_ref()
+ .ok_or(resource::CreateTextureViewError::InvalidTexture)?;
+
+ let view_dim =
+ match desc.dimension {
+ Some(dim) => {
+ use hal::image::Kind;
+
+ let required_tex_dim = dim.compatible_texture_dimension();
+
+ if required_tex_dim != texture.dimension {
+ return Err(
+ resource::CreateTextureViewError::InvalidTextureViewDimension {
+ view: dim,
+ image: texture.dimension,
+ },
+ );
+ }
+
+ if let Kind::D2(_, _, depth, _) = texture.kind {
+ match dim {
+ TextureViewDimension::Cube if depth != 6 => {
+ return Err(
+ resource::CreateTextureViewError::InvalidCubemapTextureDepth {
+ depth,
+ },
+ )
+ }
+ TextureViewDimension::CubeArray if depth % 6 != 0 => return Err(
+ resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth {
+ depth,
+ },
+ ),
+ _ => {}
+ }
+ }
+
+ dim
+ }
+ None => match texture.kind {
+ hal::image::Kind::D1(..) => wgt::TextureViewDimension::D1,
+ hal::image::Kind::D2(_, _, depth, _)
+ if depth > 1 && desc.array_layer_count.is_none() =>
+ {
+ wgt::TextureViewDimension::D2Array
+ }
+ hal::image::Kind::D2(..) => wgt::TextureViewDimension::D2,
+ hal::image::Kind::D3(..) => wgt::TextureViewDimension::D3,
+ },
+ };
+
+ let required_level_count =
+ desc.base_mip_level + desc.level_count.map_or(1, |count| count.get());
+ let required_layer_count =
+ desc.base_array_layer + desc.array_layer_count.map_or(1, |count| count.get());
+ let level_end = texture.full_range.levels.end;
+ let layer_end = texture.full_range.layers.end;
+ if required_level_count > level_end as u32 {
+ return Err(resource::CreateTextureViewError::TooManyMipLevels {
+ requested: required_level_count,
+ total: level_end,
+ });
+ }
+ if required_layer_count > layer_end as u32 {
+ return Err(resource::CreateTextureViewError::TooManyArrayLayers {
+ requested: required_layer_count,
+ total: layer_end,
+ });
+ };
+
+ let aspects = match desc.aspect {
+ wgt::TextureAspect::All => texture.aspects,
+ wgt::TextureAspect::DepthOnly => hal::format::Aspects::DEPTH,
+ wgt::TextureAspect::StencilOnly => hal::format::Aspects::STENCIL,
+ };
+ if !texture.aspects.contains(aspects) {
+ return Err(resource::CreateTextureViewError::InvalidAspect {
+ requested: aspects,
+ total: texture.aspects,
+ });
+ }
+
+ let end_level = desc
+ .level_count
+ .map_or(level_end, |_| required_level_count as u8);
+ let end_layer = desc
+ .array_layer_count
+ .map_or(layer_end, |_| required_layer_count as u16);
+ let selector = TextureSelector {
+ levels: desc.base_mip_level as u8..end_level,
+ layers: desc.base_array_layer as u16..end_layer,
+ };
+
+ let view_layer_count = (selector.layers.end - selector.layers.start) as u32;
+ let layer_check_ok = match view_dim {
+ wgt::TextureViewDimension::D1
+ | wgt::TextureViewDimension::D2
+ | wgt::TextureViewDimension::D3 => view_layer_count == 1,
+ wgt::TextureViewDimension::D2Array => true,
+ wgt::TextureViewDimension::Cube => view_layer_count == 6,
+ wgt::TextureViewDimension::CubeArray => view_layer_count % 6 == 0,
+ };
+ if !layer_check_ok {
+ return Err(resource::CreateTextureViewError::InvalidArrayLayerCount {
+ requested: view_layer_count,
+ dim: view_dim,
+ });
+ }
+
+ let format = desc.format.unwrap_or(texture.format);
+ let range = hal::image::SubresourceRange {
+ aspects,
+ level_start: desc.base_mip_level as _,
+ level_count: desc.level_count.map(|v| v.get() as _),
+ layer_start: desc.base_array_layer as _,
+ layer_count: desc.array_layer_count.map(|v| v.get() as _),
+ };
+
+ let raw = unsafe {
+ self.raw
+ .create_image_view(
+ texture_raw,
+ conv::map_texture_view_dimension(view_dim),
+ conv::map_texture_format(format, self.private_features),
+ hal::format::Swizzle::NO,
+ range.clone(),
+ )
+ .or(Err(resource::CreateTextureViewError::OutOfMemory))?
+ };
+
+ Ok(resource::TextureView {
+ inner: resource::TextureViewInner::Native {
+ raw,
+ source_id: Stored {
+ value: id::Valid(texture_id),
+ ref_count: texture.life_guard.add_ref(),
+ },
+ },
+ aspects,
+ format: texture.format,
+ extent: texture.kind.extent().at_level(desc.base_mip_level as _),
+ samples: texture.kind.num_samples(),
+ selector,
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ })
+ }
+
+ fn create_sampler(
+ &self,
+ self_id: id::DeviceId,
+ desc: &resource::SamplerDescriptor,
+ ) -> Result<resource::Sampler<B>, resource::CreateSamplerError> {
+ let clamp_to_border_enabled = self
+ .features
+ .contains(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER);
+ let clamp_to_border_found = desc
+ .address_modes
+ .iter()
+ .any(|am| am == &wgt::AddressMode::ClampToBorder);
+ if clamp_to_border_found && !clamp_to_border_enabled {
+ return Err(resource::CreateSamplerError::MissingFeature(
+ wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER,
+ ));
+ }
+
+ let actual_clamp = if let Some(clamp) = desc.anisotropy_clamp {
+ let clamp = clamp.get();
+ let valid_clamp = clamp <= MAX_ANISOTROPY && conv::is_power_of_two(clamp as u32);
+ if !valid_clamp {
+ return Err(resource::CreateSamplerError::InvalidClamp(clamp));
+ }
+ if self.private_features.anisotropic_filtering {
+ Some(clamp)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ let border = match desc.border_color {
+ None | Some(wgt::SamplerBorderColor::TransparentBlack) => {
+ hal::image::BorderColor::TransparentBlack
+ }
+ Some(wgt::SamplerBorderColor::OpaqueBlack) => hal::image::BorderColor::OpaqueBlack,
+ Some(wgt::SamplerBorderColor::OpaqueWhite) => hal::image::BorderColor::OpaqueWhite,
+ };
+
+ let info = hal::image::SamplerDesc {
+ min_filter: conv::map_filter(desc.min_filter),
+ mag_filter: conv::map_filter(desc.mag_filter),
+ mip_filter: conv::map_filter(desc.mipmap_filter),
+ wrap_mode: (
+ conv::map_wrap(desc.address_modes[0]),
+ conv::map_wrap(desc.address_modes[1]),
+ conv::map_wrap(desc.address_modes[2]),
+ ),
+ lod_bias: hal::image::Lod(0.0),
+ lod_range: hal::image::Lod(desc.lod_min_clamp)..hal::image::Lod(desc.lod_max_clamp),
+ comparison: desc.compare.map(conv::map_compare_function),
+ border,
+ normalized: true,
+ anisotropy_clamp: actual_clamp,
+ };
+
+ let raw = unsafe {
+ self.raw.create_sampler(&info).map_err(|err| match err {
+ hal::device::AllocationError::OutOfMemory(_) => {
+ resource::CreateSamplerError::Device(DeviceError::OutOfMemory)
+ }
+ hal::device::AllocationError::TooManyObjects => {
+ resource::CreateSamplerError::TooManyObjects
+ }
+ })?
+ };
+ Ok(resource::Sampler {
+ raw,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ comparison: info.comparison.is_some(),
+ })
+ }
+
+ fn create_shader_module<'a>(
+ &self,
+ self_id: id::DeviceId,
+ desc: &'a pipeline::ShaderModuleDescriptor<'a>,
+ ) -> Result<(pipeline::ShaderModule<B>, Cow<'a, [u32]>), pipeline::CreateShaderModuleError>
+ {
+ let spv_flags = if cfg!(debug_assertions) {
+ naga::back::spv::WriterFlags::DEBUG
+ } else {
+ naga::back::spv::WriterFlags::empty()
+ };
+
+ let (spv, naga) = match desc.source {
+ pipeline::ShaderModuleSource::SpirV(ref spv) => {
+ let module = if self.private_features.shader_validation {
+ // Parse the given shader code and store its representation.
+ let spv_iter = spv.iter().cloned();
+ naga::front::spv::Parser::new(spv_iter, &Default::default())
+ .parse()
+ .map_err(|err| {
+ // TODO: eventually, when Naga gets support for all features,
+ // we want to convert these to a hard error,
+ tracing::warn!("Failed to parse shader SPIR-V code: {:?}", err);
+ tracing::warn!("Shader module will not be validated");
+ })
+ .ok()
+ } else {
+ None
+ };
+ (Cow::Borrowed(&**spv), module)
+ }
+ pipeline::ShaderModuleSource::Wgsl(ref code) => {
+ // TODO: refactor the corresponding Naga error to be owned, and then
+ // display it instead of unwrapping
+ let module = naga::front::wgsl::parse_str(code).unwrap();
+ let spv = naga::back::spv::Writer::new(&module.header, spv_flags).write(&module);
+ (
+ Cow::Owned(spv),
+ if self.private_features.shader_validation {
+ Some(module)
+ } else {
+ None
+ },
+ )
+ } /*
+ pipeline::ShaderModuleSource::Naga(module) => {
+ let spv = naga::back::spv::Writer::new(&module.header, spv_flags).write(&module);
+ (
+ Cow::Owned(spv),
+ if device.private_features.shader_validation {
+ Some(module)
+ } else {
+ None
+ },
+ )
+ }*/
+ };
+
+ if let Some(ref module) = naga {
+ naga::proc::Validator::new().validate(module)?;
+ }
+
+ let raw = unsafe {
+ self.raw
+ .create_shader_module(&spv)
+ .map_err(|err| match err {
+ hal::device::ShaderError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create shader module: {}", err),
+ })?
+ };
+ let shader = pipeline::ShaderModule {
+ raw,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ module: naga,
+ #[cfg(debug_assertions)]
+ label: desc.label.to_string_or_default(),
+ };
+ Ok((shader, spv))
+ }
+
+ /// Create a compatible render pass with a given key.
+ ///
+ /// This functions doesn't consider the following aspects for compatibility:
+ /// - image layouts
+ /// - resolve attachments
+ fn create_compatible_render_pass(
+ &self,
+ key: &RenderPassKey,
+ ) -> Result<B::RenderPass, hal::device::OutOfMemory> {
+ let mut color_ids = [(0, hal::image::Layout::ColorAttachmentOptimal); MAX_COLOR_TARGETS];
+ for i in 0..key.colors.len() {
+ color_ids[i].0 = i;
+ }
+ let depth_id = key.depth_stencil.as_ref().map(|_| {
+ (
+ key.colors.len(),
+ hal::image::Layout::DepthStencilAttachmentOptimal,
+ )
+ });
+
+ let subpass = hal::pass::SubpassDesc {
+ colors: &color_ids[..key.colors.len()],
+ depth_stencil: depth_id.as_ref(),
+ inputs: &[],
+ resolves: &[],
+ preserves: &[],
+ };
+ let all = key
+ .all()
+ .map(|(at, _)| at)
+ .collect::<AttachmentDataVec<_>>();
+
+ unsafe { self.raw.create_render_pass(all, iter::once(subpass), &[]) }
+ }
+
+ fn deduplicate_bind_group_layout(
+ self_id: id::DeviceId,
+ entry_map: &binding_model::BindEntryMap,
+ guard: &Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
+ ) -> Option<id::BindGroupLayoutId> {
+ guard
+ .iter(self_id.backend())
+ .find(|(_, bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map)
+ .map(|(id, value)| {
+ value.multi_ref_count.inc();
+ id
+ })
+ }
+
+ fn get_introspection_bind_group_layouts<'a>(
+ pipeline_layout: &binding_model::PipelineLayout<B>,
+ bgl_guard: &'a Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
+ ) -> validation::IntrospectionBindGroupLayouts<'a> {
+ validation::IntrospectionBindGroupLayouts::Given(
+ pipeline_layout
+ .bind_group_layout_ids
+ .iter()
+ .map(|&id| &bgl_guard[id].entries)
+ .collect(),
+ )
+ }
+
+ fn create_bind_group_layout(
+ &self,
+ self_id: id::DeviceId,
+ label: Option<&str>,
+ entry_map: binding_model::BindEntryMap,
+ ) -> Result<binding_model::BindGroupLayout<B>, binding_model::CreateBindGroupLayoutError> {
+ let mut desc_count = descriptor::DescriptorTotalCount::default();
+ for binding in entry_map.values() {
+ use wgt::BindingType as Bt;
+ let (counter, array_feature) = match binding.ty {
+ Bt::Buffer {
+ ty: wgt::BufferBindingType::Uniform,
+ has_dynamic_offset: false,
+ min_binding_size: _,
+ } => (&mut desc_count.uniform_buffer, None),
+ Bt::Buffer {
+ ty: wgt::BufferBindingType::Uniform,
+ has_dynamic_offset: true,
+ min_binding_size: _,
+ } => (&mut desc_count.uniform_buffer_dynamic, None),
+ Bt::Buffer {
+ ty: wgt::BufferBindingType::Storage { .. },
+ has_dynamic_offset: false,
+ min_binding_size: _,
+ } => (&mut desc_count.storage_buffer, None),
+ Bt::Buffer {
+ ty: wgt::BufferBindingType::Storage { .. },
+ has_dynamic_offset: true,
+ min_binding_size: _,
+ } => (&mut desc_count.storage_buffer_dynamic, None),
+ Bt::Sampler { .. } => (&mut desc_count.sampler, None),
+ Bt::Texture { .. } => (
+ &mut desc_count.sampled_image,
+ Some(wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY),
+ ),
+ Bt::StorageTexture { .. } => (&mut desc_count.storage_image, None),
+ };
+ *counter += match binding.count {
+ // Validate the count parameter
+ Some(count) => {
+ let feature = array_feature
+ .ok_or(binding_model::CreateBindGroupLayoutError::ArrayUnsupported)?;
+ if !self.features.contains(feature) {
+ return Err(binding_model::CreateBindGroupLayoutError::MissingFeature(
+ feature,
+ ));
+ }
+ count.get()
+ }
+ None => 1,
+ };
+ }
+
+ let raw_bindings = entry_map
+ .values()
+ .map(|entry| hal::pso::DescriptorSetLayoutBinding {
+ binding: entry.binding,
+ ty: conv::map_binding_type(entry),
+ count: entry
+ .count
+ .map_or(1, |v| v.get() as hal::pso::DescriptorArrayIndex), //TODO: consolidate
+ stage_flags: conv::map_shader_stage_flags(entry.visibility),
+ immutable_samplers: false, // TODO
+ });
+ let raw = unsafe {
+ let mut raw_layout = self
+ .raw
+ .create_descriptor_set_layout(raw_bindings, &[])
+ .or(Err(DeviceError::OutOfMemory))?;
+ if let Some(label) = label {
+ self.raw
+ .set_descriptor_set_layout_name(&mut raw_layout, label);
+ }
+ raw_layout
+ };
+
+ let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
+ for entry in entry_map.values() {
+ count_validator.add_binding(entry);
+ }
+ // If a single bind group layout violates limits, the pipeline layout is definitely
+ // going to violate limits too, lets catch it now.
+ count_validator
+ .validate(&self.limits)
+ .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?;
+
+ Ok(binding_model::BindGroupLayout {
+ raw,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ multi_ref_count: MultiRefCount::new(),
+ desc_count,
+ dynamic_count: entry_map
+ .values()
+ .filter(|b| b.ty.has_dynamic_offset())
+ .count(),
+ count_validator,
+ entries: entry_map,
+ #[cfg(debug_assertions)]
+ label: label.unwrap_or("").to_string(),
+ })
+ }
+
+ fn create_bind_group<G: GlobalIdentityHandlerFactory>(
+ &self,
+ self_id: id::DeviceId,
+ layout: &binding_model::BindGroupLayout<B>,
+ desc: &binding_model::BindGroupDescriptor,
+ hub: &Hub<B, G>,
+ token: &mut Token<binding_model::BindGroupLayout<B>>,
+ ) -> Result<binding_model::BindGroup<B>, binding_model::CreateBindGroupError> {
+ use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error};
+ {
+ // Check that the number of entries in the descriptor matches
+ // the number of entries in the layout.
+ let actual = desc.entries.len();
+ let expected = layout.entries.len();
+ if actual != expected {
+ return Err(Error::BindingsNumMismatch { expected, actual });
+ }
+ }
+
+ // TODO: arrayvec/smallvec
+ // Record binding info for dynamic offset validation
+ let mut dynamic_binding_info = Vec::new();
+ // fill out the descriptors
+ let mut used = TrackerSet::new(B::VARIANT);
+
+ let (buffer_guard, mut token) = hub.buffers.read(token);
+ let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token
+ let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
+ let (sampler_guard, _) = hub.samplers.read(&mut token);
+
+ // `BTreeMap` has ordered bindings as keys, which allows us to coalesce
+ // the descriptor writes into a single transaction.
+ let mut write_map = BTreeMap::new();
+ for entry in desc.entries.iter() {
+ let binding = entry.binding;
+ // Find the corresponding declaration in the layout
+ let decl = layout
+ .entries
+ .get(&binding)
+ .ok_or(Error::MissingBindingDeclaration(binding))?;
+ let descriptors: SmallVec<[_; 1]> = match entry.resource {
+ Br::Buffer(ref bb) => {
+ let (binding_ty, dynamic, min_size) = match decl.ty {
+ wgt::BindingType::Buffer {
+ ty,
+ has_dynamic_offset,
+ min_binding_size,
+ } => (ty, has_dynamic_offset, min_binding_size),
+ _ => {
+ return Err(Error::WrongBindingType {
+ binding,
+ actual: decl.ty.clone(),
+ expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer",
+ })
+ }
+ };
+ let (pub_usage, internal_use) = match binding_ty {
+ wgt::BufferBindingType::Uniform => {
+ (wgt::BufferUsage::UNIFORM, resource::BufferUse::UNIFORM)
+ }
+ wgt::BufferBindingType::Storage { read_only } => (
+ wgt::BufferUsage::STORAGE,
+ if read_only {
+ resource::BufferUse::STORAGE_LOAD
+ } else {
+ resource::BufferUse::STORAGE_STORE
+ },
+ ),
+ };
+
+ if bb.offset % wgt::BIND_BUFFER_ALIGNMENT != 0 {
+ return Err(Error::UnalignedBufferOffset(bb.offset));
+ }
+
+ let buffer = used
+ .buffers
+ .use_extend(&*buffer_guard, bb.buffer_id, (), internal_use)
+ .map_err(|_| Error::InvalidBuffer(bb.buffer_id))?;
+ check_buffer_usage(buffer.usage, pub_usage)?;
+ let &(ref buffer_raw, _) = buffer
+ .raw
+ .as_ref()
+ .ok_or(Error::InvalidBuffer(bb.buffer_id))?;
+
+ let (bind_size, bind_end) = match bb.size {
+ Some(size) => {
+ let end = bb.offset + size.get();
+ if end > buffer.size {
+ return Err(Error::BindingRangeTooLarge {
+ range: bb.offset..end,
+ size: buffer.size,
+ });
+ }
+ (size.get(), end)
+ }
+ None => (buffer.size - bb.offset, buffer.size),
+ };
+
+ if binding_ty == wgt::BufferBindingType::Uniform
+ && (self.limits.max_uniform_buffer_binding_size as u64) < bind_size
+ {
+ return Err(Error::UniformBufferRangeTooLarge);
+ }
+
+ // Record binding info for validating dynamic offsets
+ if dynamic {
+ dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData {
+ maximum_dynamic_offset: buffer.size - bind_end,
+ });
+ }
+
+ if let Some(non_zero) = min_size {
+ let min_size = non_zero.get();
+ if min_size > bind_size {
+ return Err(Error::BindingSizeTooSmall {
+ actual: bind_size,
+ min: min_size,
+ });
+ }
+ }
+
+ let sub_range = hal::buffer::SubRange {
+ offset: bb.offset,
+ size: Some(bind_size),
+ };
+ SmallVec::from([hal::pso::Descriptor::Buffer(buffer_raw, sub_range)])
+ }
+ Br::Sampler(id) => {
+ match decl.ty {
+ wgt::BindingType::Sampler {
+ filtering: _,
+ comparison,
+ } => {
+ let sampler = used
+ .samplers
+ .use_extend(&*sampler_guard, id, (), ())
+ .map_err(|_| Error::InvalidSampler(id))?;
+
+ // Check the actual sampler to also (not) be a comparison sampler
+ if sampler.comparison != comparison {
+ return Err(Error::WrongSamplerComparison);
+ }
+
+ SmallVec::from([hal::pso::Descriptor::Sampler(&sampler.raw)])
+ }
+ _ => {
+ return Err(Error::WrongBindingType {
+ binding,
+ actual: decl.ty.clone(),
+ expected: "Sampler",
+ })
+ }
+ }
+ }
+ Br::TextureView(id) => {
+ let view = used
+ .views
+ .use_extend(&*texture_view_guard, id, (), ())
+ .map_err(|_| Error::InvalidTextureView(id))?;
+ let (pub_usage, internal_use) = match decl.ty {
+ wgt::BindingType::Texture { .. } => {
+ (wgt::TextureUsage::SAMPLED, resource::TextureUse::SAMPLED)
+ }
+ wgt::BindingType::StorageTexture { access, .. } => (
+ wgt::TextureUsage::STORAGE,
+ match access {
+ wgt::StorageTextureAccess::ReadOnly => {
+ resource::TextureUse::STORAGE_LOAD
+ }
+ wgt::StorageTextureAccess::WriteOnly => {
+ resource::TextureUse::STORAGE_STORE
+ }
+ },
+ ),
+ _ => return Err(Error::WrongBindingType {
+ binding,
+ actual: decl.ty.clone(),
+ expected:
+ "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
+ }),
+ };
+ if view
+ .aspects
+ .contains(hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL)
+ {
+ return Err(Error::DepthStencilAspect);
+ }
+ match view.inner {
+ resource::TextureViewInner::Native {
+ ref raw,
+ ref source_id,
+ } => {
+ // Careful here: the texture may no longer have its own ref count,
+ // if it was deleted by the user.
+ let texture = &texture_guard[source_id.value];
+ used.textures
+ .change_extend(
+ source_id.value,
+ &source_id.ref_count,
+ view.selector.clone(),
+ internal_use,
+ )
+ .unwrap();
+ check_texture_usage(texture.usage, pub_usage)?;
+ let image_layout =
+ conv::map_texture_state(internal_use, view.aspects).1;
+ SmallVec::from([hal::pso::Descriptor::Image(raw, image_layout)])
+ }
+ resource::TextureViewInner::SwapChain { .. } => {
+ return Err(Error::SwapChainImage);
+ }
+ }
+ }
+ Br::TextureViewArray(ref bindings_array) => {
+ let required_feats = wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY;
+ if !self.features.contains(required_feats) {
+ return Err(Error::MissingFeatures(required_feats));
+ }
+
+ if let Some(count) = decl.count {
+ let count = count.get() as usize;
+ let num_bindings = bindings_array.len();
+ if count != num_bindings {
+ return Err(Error::BindingArrayLengthMismatch {
+ actual: num_bindings,
+ expected: count,
+ });
+ }
+ } else {
+ return Err(Error::SingleBindingExpected);
+ }
+
+ let (pub_usage, internal_use) = match decl.ty {
+ wgt::BindingType::Texture { .. } => {
+ (wgt::TextureUsage::SAMPLED, resource::TextureUse::SAMPLED)
+ }
+ _ => {
+ return Err(Error::WrongBindingType {
+ binding,
+ actual: decl.ty.clone(),
+ expected: "SampledTextureArray",
+ })
+ }
+ };
+ bindings_array
+ .iter()
+ .map(|&id| {
+ let view = used
+ .views
+ .use_extend(&*texture_view_guard, id, (), ())
+ .map_err(|_| Error::InvalidTextureView(id))?;
+ match view.inner {
+ resource::TextureViewInner::Native {
+ ref raw,
+ ref source_id,
+ } => {
+ // Careful here: the texture may no longer have its own ref count,
+ // if it was deleted by the user.
+ let texture = &texture_guard[source_id.value];
+ used.textures
+ .change_extend(
+ source_id.value,
+ &source_id.ref_count,
+ view.selector.clone(),
+ internal_use,
+ )
+ .unwrap();
+ check_texture_usage(texture.usage, pub_usage)?;
+ let image_layout =
+ conv::map_texture_state(internal_use, view.aspects).1;
+ Ok(hal::pso::Descriptor::Image(raw, image_layout))
+ }
+ resource::TextureViewInner::SwapChain { .. } => {
+ Err(Error::SwapChainImage)
+ }
+ }
+ })
+ .collect::<Result<_, _>>()?
+ }
+ };
+ if write_map.insert(binding, descriptors).is_some() {
+ return Err(Error::DuplicateBinding(binding));
+ }
+ }
+
+ let mut desc_sets =
+ self.desc_allocator
+ .lock()
+ .allocate(&self.raw, &layout.raw, &layout.desc_count, 1)?;
+ let mut desc_set = desc_sets.pop().unwrap();
+
+ // Set the descriptor set's label for easier debugging.
+ if let Some(label) = desc.label.as_ref() {
+ unsafe {
+ self.raw.set_descriptor_set_name(desc_set.raw_mut(), &label);
+ }
+ }
+
+ if let Some(start_binding) = write_map.keys().next().cloned() {
+ let descriptors = write_map
+ .into_iter()
+ .flat_map(|(_, list)| list)
+ .collect::<Vec<_>>();
+ let write = hal::pso::DescriptorSetWrite {
+ set: desc_set.raw(),
+ binding: start_binding,
+ array_offset: 0,
+ descriptors,
+ };
+ unsafe {
+ self.raw.write_descriptor_sets(iter::once(write));
+ }
+ }
+
+ Ok(binding_model::BindGroup {
+ raw: desc_set,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ layout_id: id::Valid(desc.layout),
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ used,
+ dynamic_binding_info,
+ })
+ }
+
+ fn create_pipeline_layout(
+ &self,
+ self_id: id::DeviceId,
+ desc: &binding_model::PipelineLayoutDescriptor,
+ bgl_guard: &Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
+ ) -> Result<binding_model::PipelineLayout<B>, binding_model::CreatePipelineLayoutError> {
+ use crate::binding_model::CreatePipelineLayoutError as Error;
+
+ let bind_group_layouts_count = desc.bind_group_layouts.len();
+ let device_max_bind_groups = self.limits.max_bind_groups as usize;
+ if bind_group_layouts_count > device_max_bind_groups {
+ return Err(Error::TooManyGroups {
+ actual: bind_group_layouts_count,
+ max: device_max_bind_groups,
+ });
+ }
+
+ if !desc.push_constant_ranges.is_empty()
+ && !self.features.contains(wgt::Features::PUSH_CONSTANTS)
+ {
+ return Err(Error::MissingFeature(wgt::Features::PUSH_CONSTANTS));
+ }
+ let mut used_stages = wgt::ShaderStage::empty();
+ for (index, pc) in desc.push_constant_ranges.iter().enumerate() {
+ if pc.stages.intersects(used_stages) {
+ return Err(Error::MoreThanOnePushConstantRangePerStage {
+ index,
+ provided: pc.stages,
+ intersected: pc.stages & used_stages,
+ });
+ }
+ used_stages |= pc.stages;
+
+ let device_max_pc_size = self.limits.max_push_constant_size;
+ if device_max_pc_size < pc.range.end {
+ return Err(Error::PushConstantRangeTooLarge {
+ index,
+ range: pc.range.clone(),
+ max: device_max_pc_size,
+ });
+ }
+
+ if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
+ return Err(Error::MisalignedPushConstantRange {
+ index,
+ bound: pc.range.start,
+ });
+ }
+ if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
+ return Err(Error::MisalignedPushConstantRange {
+ index,
+ bound: pc.range.end,
+ });
+ }
+ }
+
+ let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
+
+ // validate total resource counts
+ for &id in desc.bind_group_layouts.iter() {
+ let bind_group_layout = bgl_guard
+ .get(id)
+ .map_err(|_| Error::InvalidBindGroupLayout(id))?;
+ count_validator.merge(&bind_group_layout.count_validator);
+ }
+ count_validator
+ .validate(&self.limits)
+ .map_err(Error::TooManyBindings)?;
+
+ let descriptor_set_layouts = desc
+ .bind_group_layouts
+ .iter()
+ .map(|&id| &bgl_guard.get(id).unwrap().raw);
+ let push_constants = desc
+ .push_constant_ranges
+ .iter()
+ .map(|pc| (conv::map_shader_stage_flags(pc.stages), pc.range.clone()));
+
+ let raw = unsafe {
+ let raw_layout = self
+ .raw
+ .create_pipeline_layout(descriptor_set_layouts, push_constants)
+ .or(Err(DeviceError::OutOfMemory))?;
+ if let Some(_) = desc.label {
+ //TODO-0.6: needs gfx changes published
+ //self.raw.set_pipeline_layout_name(&mut raw_layout, label);
+ }
+ raw_layout
+ };
+
+ Ok(binding_model::PipelineLayout {
+ raw,
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ bind_group_layout_ids: desc
+ .bind_group_layouts
+ .iter()
+ .map(|&id| {
+ bgl_guard.get(id).unwrap().multi_ref_count.inc();
+ id::Valid(id)
+ })
+ .collect(),
+ push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
+ })
+ }
+
+ //TODO: refactor this. It's the only method of `Device` that registers new objects
+ // (the pipeline layout).
+ fn derive_pipeline_layout<G: GlobalIdentityHandlerFactory>(
+ &self,
+ self_id: id::DeviceId,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ mut derived_group_layouts: ArrayVec<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>,
+ bgl_guard: &mut Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
+ pipeline_layout_guard: &mut Storage<binding_model::PipelineLayout<B>, id::PipelineLayoutId>,
+ hub: &Hub<B, G>,
+ ) -> Result<
+ (id::PipelineLayoutId, pipeline::ImplicitBindGroupCount),
+ pipeline::ImplicitLayoutError,
+ > {
+ let derived_bind_group_count =
+ derived_group_layouts.len() as pipeline::ImplicitBindGroupCount;
+
+ while derived_group_layouts
+ .last()
+ .map_or(false, |map| map.is_empty())
+ {
+ derived_group_layouts.pop();
+ }
+ let ids = implicit_pipeline_ids
+ .as_ref()
+ .ok_or(pipeline::ImplicitLayoutError::MissingIds(0))?;
+ if ids.group_ids.len() < derived_group_layouts.len() {
+ tracing::error!(
+ "Not enough bind group IDs ({}) specified for the implicit layout ({})",
+ ids.group_ids.len(),
+ derived_group_layouts.len()
+ );
+ return Err(pipeline::ImplicitLayoutError::MissingIds(
+ derived_bind_group_count,
+ ));
+ }
+
+ let mut derived_group_layout_ids =
+ ArrayVec::<[id::BindGroupLayoutId; MAX_BIND_GROUPS]>::new();
+ for (bgl_id, map) in ids.group_ids.iter().zip(derived_group_layouts) {
+ let processed_id = match Device::deduplicate_bind_group_layout(self_id, &map, bgl_guard)
+ {
+ Some(dedup_id) => dedup_id,
+ None => {
+ #[cfg(feature = "trace")]
+ let bgl_desc = binding_model::BindGroupLayoutDescriptor {
+ label: None,
+ entries: if self.trace.is_some() {
+ Cow::Owned(map.values().cloned().collect())
+ } else {
+ Cow::Borrowed(&[])
+ },
+ };
+ let bgl = self.create_bind_group_layout(self_id, None, map)?;
+ let out_id = hub.bind_group_layouts.register_identity_locked(
+ bgl_id.clone(),
+ bgl,
+ bgl_guard,
+ );
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = self.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateBindGroupLayout(out_id.0, bgl_desc));
+ }
+ out_id.0
+ }
+ };
+ derived_group_layout_ids.push(processed_id);
+ }
+
+ let layout_desc = binding_model::PipelineLayoutDescriptor {
+ label: None,
+ bind_group_layouts: Cow::Borrowed(&derived_group_layout_ids),
+ push_constant_ranges: Cow::Borrowed(&[]), //TODO?
+ };
+ let layout = self.create_pipeline_layout(self_id, &layout_desc, bgl_guard)?;
+ let layout_id = hub.pipeline_layouts.register_identity_locked(
+ ids.root_id.clone(),
+ layout,
+ pipeline_layout_guard,
+ );
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = self.trace {
+ trace.lock().add(trace::Action::CreatePipelineLayout(
+ layout_id.0,
+ layout_desc,
+ ));
+ }
+ Ok((layout_id.0, derived_bind_group_count))
+ }
+
+ fn create_compute_pipeline<G: GlobalIdentityHandlerFactory>(
+ &self,
+ self_id: id::DeviceId,
+ desc: &pipeline::ComputePipelineDescriptor,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ hub: &Hub<B, G>,
+ token: &mut Token<Self>,
+ ) -> Result<
+ (
+ pipeline::ComputePipeline<B>,
+ pipeline::ImplicitBindGroupCount,
+ id::PipelineLayoutId,
+ ),
+ pipeline::CreateComputePipelineError,
+ > {
+ //TODO: only lock mutable if the layout is derived
+ let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
+ let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
+
+ let mut derived_group_layouts =
+ ArrayVec::<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>::new();
+
+ let interface = validation::StageInterface::default();
+ let pipeline_stage = &desc.compute_stage;
+ let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
+
+ let entry_point_name = &pipeline_stage.entry_point;
+ let shader_module = shader_module_guard
+ .get(pipeline_stage.module)
+ .map_err(|_| {
+ pipeline::CreateComputePipelineError::Stage(validation::StageError::InvalidModule)
+ })?;
+
+ let flag = wgt::ShaderStage::COMPUTE;
+ if let Some(ref module) = shader_module.module {
+ let group_layouts = match desc.layout {
+ Some(pipeline_layout_id) => Device::get_introspection_bind_group_layouts(
+ pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?,
+ &*bgl_guard,
+ ),
+ None => {
+ for _ in 0..self.limits.max_bind_groups {
+ derived_group_layouts.push(binding_model::BindEntryMap::default());
+ }
+ validation::IntrospectionBindGroupLayouts::Derived(&mut derived_group_layouts)
+ }
+ };
+ let _ =
+ validation::check_stage(module, group_layouts, &entry_point_name, flag, interface)
+ .map_err(pipeline::CreateComputePipelineError::Stage)?;
+ } else if desc.layout.is_none() {
+ return Err(pipeline::ImplicitLayoutError::ReflectionError(flag).into());
+ }
+
+ let shader = hal::pso::EntryPoint::<B> {
+ entry: &entry_point_name, // TODO
+ module: &shader_module.raw,
+ specialization: hal::pso::Specialization::EMPTY,
+ };
+
+ // TODO
+ let flags = hal::pso::PipelineCreationFlags::empty();
+ // TODO
+ let parent = hal::pso::BasePipeline::None;
+
+ let (pipeline_layout_id, derived_bind_group_count) = match desc.layout {
+ Some(id) => (id, 0),
+ None => self.derive_pipeline_layout(
+ self_id,
+ implicit_pipeline_ids,
+ derived_group_layouts,
+ &mut *bgl_guard,
+ &mut *pipeline_layout_guard,
+ &hub,
+ )?,
+ };
+ let layout = pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?;
+
+ let pipeline_desc = hal::pso::ComputePipelineDesc {
+ shader,
+ layout: &layout.raw,
+ flags,
+ parent,
+ };
+
+ let raw = match unsafe { self.raw.create_compute_pipeline(&pipeline_desc, None) } {
+ Ok(pipeline) => pipeline,
+ Err(hal::pso::CreationError::OutOfMemory(_)) => {
+ return Err(pipeline::CreateComputePipelineError::Device(
+ DeviceError::OutOfMemory,
+ ))
+ }
+ other => panic!("Compute pipeline creation error: {:?}", other),
+ };
+ if let Some(_) = desc.label {
+ //TODO-0.6: self.raw.set_compute_pipeline_name(&mut raw, label);
+ }
+
+ let pipeline = pipeline::ComputePipeline {
+ raw,
+ layout_id: Stored {
+ value: id::Valid(pipeline_layout_id),
+ ref_count: layout.life_guard.add_ref(),
+ },
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ };
+ Ok((pipeline, derived_bind_group_count, pipeline_layout_id))
+ }
+
+ fn create_render_pipeline<G: GlobalIdentityHandlerFactory>(
+ &self,
+ self_id: id::DeviceId,
+ desc: &pipeline::RenderPipelineDescriptor,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ hub: &Hub<B, G>,
+ token: &mut Token<Self>,
+ ) -> Result<
+ (
+ pipeline::RenderPipeline<B>,
+ pipeline::ImplicitBindGroupCount,
+ id::PipelineLayoutId,
+ ),
+ pipeline::CreateRenderPipelineError,
+ > {
+ //TODO: only lock mutable if the layout is derived
+ let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
+ let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
+
+ let mut derived_group_layouts =
+ ArrayVec::<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>::new();
+
+ let samples = {
+ let sc = desc.sample_count;
+ if sc == 0 || sc > 32 || !conv::is_power_of_two(sc) {
+ return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc));
+ }
+ sc as u8
+ };
+
+ let color_states = &desc.color_states;
+ let depth_stencil_state = desc.depth_stencil_state.as_ref();
+
+ let rasterization_state = desc
+ .rasterization_state
+ .as_ref()
+ .cloned()
+ .unwrap_or_default();
+ let rasterizer = conv::map_rasterization_state_descriptor(&rasterization_state);
+
+ let mut interface = validation::StageInterface::default();
+ let mut validated_stages = wgt::ShaderStage::empty();
+
+ let desc_vbs = &desc.vertex_state.vertex_buffers;
+ let mut vertex_strides = Vec::with_capacity(desc_vbs.len());
+ let mut vertex_buffers = Vec::with_capacity(desc_vbs.len());
+ let mut attributes = Vec::new();
+ for (i, vb_state) in desc_vbs.iter().enumerate() {
+ vertex_strides
+ .alloc()
+ .init((vb_state.stride, vb_state.step_mode));
+ if vb_state.attributes.is_empty() {
+ continue;
+ }
+ if vb_state.stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 {
+ return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride {
+ index: i as u32,
+ stride: vb_state.stride,
+ });
+ }
+ vertex_buffers.alloc().init(hal::pso::VertexBufferDesc {
+ binding: i as u32,
+ stride: vb_state.stride as u32,
+ rate: match vb_state.step_mode {
+ InputStepMode::Vertex => hal::pso::VertexInputRate::Vertex,
+ InputStepMode::Instance => hal::pso::VertexInputRate::Instance(1),
+ },
+ });
+ let desc_atts = &vb_state.attributes;
+ for attribute in desc_atts.iter() {
+ if attribute.offset >= 0x10000000 {
+ return Err(
+ pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset {
+ location: attribute.shader_location,
+ offset: attribute.offset,
+ },
+ );
+ }
+ attributes.alloc().init(hal::pso::AttributeDesc {
+ location: attribute.shader_location,
+ binding: i as u32,
+ element: hal::pso::Element {
+ format: conv::map_vertex_format(attribute.format),
+ offset: attribute.offset as u32,
+ },
+ });
+ interface.insert(
+ attribute.shader_location,
+ validation::MaybeOwned::Owned(validation::map_vertex_format(attribute.format)),
+ );
+ }
+ }
+
+ let input_assembler = hal::pso::InputAssemblerDesc {
+ primitive: conv::map_primitive_topology(desc.primitive_topology),
+ with_adjacency: false,
+ restart_index: None, //TODO
+ };
+
+ let blender = hal::pso::BlendDesc {
+ logic_op: None, // TODO
+ targets: color_states
+ .iter()
+ .map(conv::map_color_state_descriptor)
+ .collect(),
+ };
+ let depth_stencil = depth_stencil_state
+ .map(conv::map_depth_stencil_state_descriptor)
+ .unwrap_or_default();
+
+ let multisampling: Option<hal::pso::Multisampling> = if samples == 1 {
+ None
+ } else {
+ Some(hal::pso::Multisampling {
+ rasterization_samples: samples,
+ sample_shading: None,
+ sample_mask: desc.sample_mask as u64,
+ alpha_coverage: desc.alpha_to_coverage_enabled,
+ alpha_to_one: false,
+ })
+ };
+
+ // TODO
+ let baked_states = hal::pso::BakedStates {
+ viewport: None,
+ scissor: None,
+ blend_color: None,
+ depth_bounds: None,
+ };
+
+ if rasterization_state.clamp_depth && !self.features.contains(wgt::Features::DEPTH_CLAMPING)
+ {
+ return Err(pipeline::CreateRenderPipelineError::MissingFeature(
+ wgt::Features::DEPTH_CLAMPING,
+ ));
+ }
+ if rasterization_state.polygon_mode != wgt::PolygonMode::Fill
+ && !self.features.contains(wgt::Features::NON_FILL_POLYGON_MODE)
+ {
+ return Err(pipeline::CreateRenderPipelineError::MissingFeature(
+ wgt::Features::NON_FILL_POLYGON_MODE,
+ ));
+ }
+
+ if desc.layout.is_none() {
+ for _ in 0..self.limits.max_bind_groups {
+ derived_group_layouts.push(binding_model::BindEntryMap::default());
+ }
+ }
+
+ let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
+
+ let rp_key = RenderPassKey {
+ colors: color_states
+ .iter()
+ .map(|state| {
+ let at = hal::pass::Attachment {
+ format: Some(conv::map_texture_format(
+ state.format,
+ self.private_features,
+ )),
+ samples,
+ ops: hal::pass::AttachmentOps::PRESERVE,
+ stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
+ layouts: hal::image::Layout::General..hal::image::Layout::General,
+ };
+ (at, hal::image::Layout::ColorAttachmentOptimal)
+ })
+ .collect(),
+ // We can ignore the resolves as the vulkan specs says:
+ // As an additional special case, if two render passes have a single subpass,
+ // they are compatible even if they have different resolve attachment references
+ // or depth/stencil resolve modes but satisfy the other compatibility conditions.
+ resolves: ArrayVec::new(),
+ depth_stencil: depth_stencil_state.map(|state| {
+ let at = hal::pass::Attachment {
+ format: Some(conv::map_texture_format(
+ state.format,
+ self.private_features,
+ )),
+ samples,
+ ops: hal::pass::AttachmentOps::PRESERVE,
+ stencil_ops: hal::pass::AttachmentOps::PRESERVE,
+ layouts: hal::image::Layout::General..hal::image::Layout::General,
+ };
+ (at, hal::image::Layout::DepthStencilAttachmentOptimal)
+ }),
+ };
+
+ let vertex = {
+ let entry_point_name = &desc.vertex_stage.entry_point;
+ let flag = wgt::ShaderStage::VERTEX;
+
+ let shader_module =
+ shader_module_guard
+ .get(desc.vertex_stage.module)
+ .map_err(|_| pipeline::CreateRenderPipelineError::Stage {
+ flag,
+ error: validation::StageError::InvalidModule,
+ })?;
+
+ if let Some(ref module) = shader_module.module {
+ let group_layouts = match desc.layout {
+ Some(pipeline_layout_id) => Device::get_introspection_bind_group_layouts(
+ pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?,
+ &*bgl_guard,
+ ),
+ None => validation::IntrospectionBindGroupLayouts::Derived(
+ &mut derived_group_layouts,
+ ),
+ };
+
+ interface = validation::check_stage(
+ module,
+ group_layouts,
+ &entry_point_name,
+ flag,
+ interface,
+ )
+ .map_err(|error| pipeline::CreateRenderPipelineError::Stage { flag, error })?;
+ validated_stages |= flag;
+ }
+
+ hal::pso::EntryPoint::<B> {
+ entry: &entry_point_name, // TODO
+ module: &shader_module.raw,
+ specialization: hal::pso::Specialization::EMPTY,
+ }
+ };
+
+ let fragment = match &desc.fragment_stage {
+ Some(stage) => {
+ let entry_point_name = &stage.entry_point;
+ let flag = wgt::ShaderStage::FRAGMENT;
+
+ let shader_module = shader_module_guard.get(stage.module).map_err(|_| {
+ pipeline::CreateRenderPipelineError::Stage {
+ flag,
+ error: validation::StageError::InvalidModule,
+ }
+ })?;
+
+ let group_layouts = match desc.layout {
+ Some(pipeline_layout_id) => Device::get_introspection_bind_group_layouts(
+ pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?,
+ &*bgl_guard,
+ ),
+ None => validation::IntrospectionBindGroupLayouts::Derived(
+ &mut derived_group_layouts,
+ ),
+ };
+
+ if validated_stages == wgt::ShaderStage::VERTEX {
+ if let Some(ref module) = shader_module.module {
+ interface = validation::check_stage(
+ module,
+ group_layouts,
+ &entry_point_name,
+ flag,
+ interface,
+ )
+ .map_err(|error| {
+ pipeline::CreateRenderPipelineError::Stage { flag, error }
+ })?;
+ validated_stages |= flag;
+ }
+ }
+
+ Some(hal::pso::EntryPoint::<B> {
+ entry: &entry_point_name,
+ module: &shader_module.raw,
+ specialization: hal::pso::Specialization::EMPTY,
+ })
+ }
+ None => None,
+ };
+
+ if validated_stages.contains(wgt::ShaderStage::FRAGMENT) {
+ for (i, state) in color_states.iter().enumerate() {
+ match interface.get(&(i as wgt::ShaderLocation)) {
+ Some(output) if validation::check_texture_format(state.format, output) => {}
+ Some(output) => {
+ tracing::warn!(
+ "Incompatible fragment output[{}] from shader: {:?}, expected {:?}",
+ i,
+ &**output,
+ state.format,
+ );
+ return Err(
+ pipeline::CreateRenderPipelineError::IncompatibleOutputFormat {
+ index: i as u8,
+ },
+ );
+ }
+ None if state.write_mask.is_empty() => {}
+ None => {
+ tracing::warn!("Missing fragment output[{}], expected {:?}", i, state,);
+ return Err(pipeline::CreateRenderPipelineError::MissingOutput {
+ index: i as u8,
+ });
+ }
+ }
+ }
+ }
+ let last_stage = match desc.fragment_stage {
+ Some(_) => wgt::ShaderStage::FRAGMENT,
+ None => wgt::ShaderStage::VERTEX,
+ };
+ if desc.layout.is_none() && !validated_stages.contains(last_stage) {
+ return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into());
+ }
+
+ let primitive_assembler = hal::pso::PrimitiveAssemblerDesc::Vertex {
+ buffers: &vertex_buffers,
+ attributes: &attributes,
+ input_assembler,
+ vertex,
+ tessellation: None,
+ geometry: None,
+ };
+
+ // TODO
+ let flags = hal::pso::PipelineCreationFlags::empty();
+ // TODO
+ let parent = hal::pso::BasePipeline::None;
+
+ let (pipeline_layout_id, derived_bind_group_count) = match desc.layout {
+ Some(id) => (id, 0),
+ None => self.derive_pipeline_layout(
+ self_id,
+ implicit_pipeline_ids,
+ derived_group_layouts,
+ &mut *bgl_guard,
+ &mut *pipeline_layout_guard,
+ &hub,
+ )?,
+ };
+ let layout = pipeline_layout_guard
+ .get(pipeline_layout_id)
+ .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?;
+
+ let mut render_pass_cache = self.render_passes.lock();
+ let pipeline_desc = hal::pso::GraphicsPipelineDesc {
+ primitive_assembler,
+ rasterizer,
+ fragment,
+ blender,
+ depth_stencil,
+ multisampling,
+ baked_states,
+ layout: &layout.raw,
+ subpass: hal::pass::Subpass {
+ index: 0,
+ main_pass: match render_pass_cache.entry(rp_key) {
+ Entry::Occupied(e) => e.into_mut(),
+ Entry::Vacant(e) => {
+ let pass = self
+ .create_compatible_render_pass(e.key())
+ .or(Err(DeviceError::OutOfMemory))?;
+ e.insert(pass)
+ }
+ },
+ },
+ flags,
+ parent,
+ };
+ // TODO: cache
+ let raw = unsafe {
+ self.raw
+ .create_graphics_pipeline(&pipeline_desc, None)
+ .map_err(|err| match err {
+ hal::pso::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create graphics pipeline: {}", err),
+ })?
+ };
+ if let Some(_) = desc.label {
+ //TODO-0.6: self.set_graphics_pipeline_name(&mut raw, label)
+ }
+
+ let pass_context = RenderPassContext {
+ attachments: AttachmentData {
+ colors: color_states.iter().map(|state| state.format).collect(),
+ resolves: ArrayVec::new(),
+ depth_stencil: depth_stencil_state
+ .as_ref()
+ .map(|state| state.format.clone()),
+ },
+ sample_count: samples,
+ };
+
+ let mut flags = pipeline::PipelineFlags::empty();
+ for state in color_states.iter() {
+ if state.color_blend.uses_color() | state.alpha_blend.uses_color() {
+ flags |= pipeline::PipelineFlags::BLEND_COLOR;
+ }
+ }
+ if let Some(ds) = depth_stencil_state.as_ref() {
+ if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() {
+ flags |= pipeline::PipelineFlags::STENCIL_REFERENCE;
+ }
+ if !ds.is_read_only() {
+ flags |= pipeline::PipelineFlags::WRITES_DEPTH_STENCIL;
+ }
+ }
+
+ let pipeline = pipeline::RenderPipeline {
+ raw,
+ layout_id: Stored {
+ value: id::Valid(pipeline_layout_id),
+ ref_count: layout.life_guard.add_ref(),
+ },
+ device_id: Stored {
+ value: id::Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ pass_context,
+ flags,
+ index_format: desc.vertex_state.index_format,
+ vertex_strides,
+ life_guard: LifeGuard::new(desc.label.borrow_or_default()),
+ };
+ Ok((pipeline, derived_bind_group_count, pipeline_layout_id))
+ }
+
+ fn wait_for_submit(
+ &self,
+ submission_index: SubmissionIndex,
+ token: &mut Token<Self>,
+ ) -> Result<(), WaitIdleError> {
+ if self.last_completed_submission_index() <= submission_index {
+ tracing::info!("Waiting for submission {:?}", submission_index);
+ self.lock_life(token)
+ .triage_submissions(&self.raw, true)
+ .map(|_| ())
+ } else {
+ Ok(())
+ }
+ }
+}
+
+impl<B: hal::Backend> Device<B> {
+ pub(crate) fn destroy_bind_group(&self, bind_group: binding_model::BindGroup<B>) {
+ self.desc_allocator
+ .lock()
+ .free(&self.raw, iter::once(bind_group.raw));
+ }
+
+ pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer<B>) {
+ if let Some((raw, memory)) = buffer.raw {
+ unsafe {
+ self.mem_allocator.lock().free(&self.raw, memory);
+ self.raw.destroy_buffer(raw);
+ }
+ }
+ }
+
+ pub(crate) fn destroy_texture(&self, texture: resource::Texture<B>) {
+ if let Some((raw, memory)) = texture.raw {
+ unsafe {
+ self.mem_allocator.lock().free(&self.raw, memory);
+ self.raw.destroy_image(raw);
+ }
+ }
+ }
+
+ /// Wait for idle and remove resources that we can, before we die.
+ pub(crate) fn prepare_to_die(&mut self) {
+ let mut life_tracker = self.life_tracker.lock();
+ if let Err(error) = life_tracker.triage_submissions(&self.raw, true) {
+ tracing::error!("failed to triage submissions: {}", error);
+ }
+ life_tracker.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator);
+ }
+
+ pub(crate) fn dispose(self) {
+ let mut desc_alloc = self.desc_allocator.into_inner();
+ let mut mem_alloc = self.mem_allocator.into_inner();
+ self.pending_writes
+ .dispose(&self.raw, &self.cmd_allocator, &mut mem_alloc);
+ self.cmd_allocator.destroy(&self.raw);
+ unsafe {
+ desc_alloc.cleanup(&self.raw);
+ mem_alloc.clear(&self.raw);
+ for (_, rp) in self.render_passes.lock().drain() {
+ self.raw.destroy_render_pass(rp);
+ }
+ for (_, fbo) in self.framebuffers.lock().drain() {
+ self.raw.destroy_framebuffer(fbo);
+ }
+ }
+ }
+}
+
+impl<B: hal::Backend> crate::hub::Resource for Device<B> {
+ const TYPE: &'static str = "Device";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+#[error("device is invalid")]
+#[derive(Clone, Debug, Error)]
+pub struct InvalidDevice;
+
+#[derive(Clone, Debug, Error)]
+pub enum DeviceError {
+ #[error("parent device is invalid")]
+ Invalid,
+ #[error("parent device is lost")]
+ Lost,
+ #[error("not enough memory left")]
+ OutOfMemory,
+}
+
+impl From<hal::device::OomOrDeviceLost> for DeviceError {
+ fn from(err: hal::device::OomOrDeviceLost) -> Self {
+ match err {
+ hal::device::OomOrDeviceLost::OutOfMemory(_) => Self::OutOfMemory,
+ hal::device::OomOrDeviceLost::DeviceLost(_) => Self::Lost,
+ }
+ }
+}
+
+impl From<gpu_alloc::MapError> for DeviceError {
+ fn from(err: gpu_alloc::MapError) -> Self {
+ match err {
+ gpu_alloc::MapError::OutOfDeviceMemory | gpu_alloc::MapError::OutOfHostMemory => {
+ DeviceError::OutOfMemory
+ }
+ _ => panic!("failed to map buffer: {}", err),
+ }
+ }
+}
+
+impl DeviceError {
+ fn from_bind(err: hal::device::BindError) -> Self {
+ match err {
+ hal::device::BindError::OutOfMemory(_) => Self::OutOfMemory,
+ _ => panic!("failed to bind memory: {}", err),
+ }
+ }
+}
+
+pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> {
+ pub root_id: Input<G, id::PipelineLayoutId>,
+ pub group_ids: &'a [Input<G, id::BindGroupLayoutId>],
+}
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn device_features<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ ) -> Result<wgt::Features, InvalidDevice> {
+ span!(_guard, INFO, "Device::features");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, _) = hub.devices.read(&mut token);
+ let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
+
+ Ok(device.features)
+ }
+
+ pub fn device_limits<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ ) -> Result<wgt::Limits, InvalidDevice> {
+ span!(_guard, INFO, "Device::limits");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, _) = hub.devices.read(&mut token);
+ let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
+
+ Ok(device.limits.clone())
+ }
+
+ pub fn device_create_buffer<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &resource::BufferDescriptor,
+ id_in: Input<G, id::BufferId>,
+ ) -> (id::BufferId, Option<resource::CreateBufferError>) {
+ span!(_guard, INFO, "Device::create_buffer");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ tracing::info!("Create buffer {:?} with ID {:?}", desc, id_in);
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let mut buffer = match device.create_buffer(device_id, desc, false) {
+ Ok(buffer) => buffer,
+ Err(e) => break e,
+ };
+ let ref_count = buffer.life_guard.add_ref();
+
+ let buffer_use = if !desc.mapped_at_creation {
+ resource::BufferUse::EMPTY
+ } else if desc.usage.contains(wgt::BufferUsage::MAP_WRITE) {
+ // buffer is mappable, so we are just doing that at start
+ let map_size = buffer.size;
+ let ptr = match map_buffer(&device.raw, &mut buffer, 0, map_size, HostMap::Write) {
+ Ok(ptr) => ptr,
+ Err(e) => {
+ let (raw, memory) = buffer.raw.unwrap();
+ device.lock_life(&mut token).schedule_resource_destruction(
+ queue::TempResource::Buffer(raw),
+ memory,
+ !0,
+ );
+ break e.into();
+ }
+ };
+ buffer.map_state = resource::BufferMapState::Active {
+ ptr,
+ sub_range: hal::buffer::SubRange::WHOLE,
+ host: HostMap::Write,
+ };
+ resource::BufferUse::MAP_WRITE
+ } else {
+ // buffer needs staging area for initialization only
+ let stage_desc = wgt::BufferDescriptor {
+ label: Some(Cow::Borrowed("<init_buffer>")),
+ size: desc.size,
+ usage: wgt::BufferUsage::MAP_WRITE | wgt::BufferUsage::COPY_SRC,
+ mapped_at_creation: false,
+ };
+ let stage = match device.create_buffer(device_id, &stage_desc, true) {
+ Ok(stage) => stage,
+ Err(e) => {
+ let (raw, memory) = buffer.raw.unwrap();
+ device.lock_life(&mut token).schedule_resource_destruction(
+ queue::TempResource::Buffer(raw),
+ memory,
+ !0,
+ );
+ break e;
+ }
+ };
+ let (stage_buffer, mut stage_memory) = stage.raw.unwrap();
+ let ptr = match stage_memory.map(&device.raw, 0, stage.size) {
+ Ok(ptr) => ptr,
+ Err(e) => {
+ let (raw, memory) = buffer.raw.unwrap();
+ let mut life_lock = device.lock_life(&mut token);
+ life_lock.schedule_resource_destruction(
+ queue::TempResource::Buffer(raw),
+ memory,
+ !0,
+ );
+ life_lock.schedule_resource_destruction(
+ queue::TempResource::Buffer(stage_buffer),
+ stage_memory,
+ !0,
+ );
+ break e.into();
+ }
+ };
+ buffer.map_state = resource::BufferMapState::Init {
+ ptr,
+ needs_flush: !stage_memory.is_coherent(),
+ stage_buffer,
+ stage_memory,
+ };
+ resource::BufferUse::COPY_DST
+ };
+
+ let id = hub.buffers.register_identity(id_in, buffer, &mut token);
+ tracing::info!("Created buffer {:?} with {:?}", id, desc);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut desc = desc.clone();
+ let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false);
+ if mapped_at_creation && !desc.usage.contains(wgt::BufferUsage::MAP_WRITE) {
+ desc.usage |= wgt::BufferUsage::COPY_DST;
+ }
+ trace.lock().add(trace::Action::CreateBuffer(id.0, desc));
+ }
+
+ device
+ .trackers
+ .lock()
+ .buffers
+ .init(id, ref_count, BufferState::with_usage(buffer_use))
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .buffers
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ #[cfg(feature = "replay")]
+ pub fn device_wait_for_buffer<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ buffer_id: id::BufferId,
+ ) -> Result<(), WaitIdleError> {
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let last_submission = {
+ let (buffer_guard, _) = hub.buffers.write(&mut token);
+ match buffer_guard.get(buffer_id) {
+ Ok(buffer) => buffer.life_guard.submission_index.load(Ordering::Acquire),
+ Err(_) => return Ok(()),
+ }
+ };
+
+ device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?
+ .wait_for_submit(last_submission, &mut token)
+ }
+
+ pub fn device_set_buffer_sub_data<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ data: &[u8],
+ ) -> Result<(), resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::set_buffer_sub_data");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let device = device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+ check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_WRITE)?;
+ //assert!(buffer isn't used by the GPU);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data_path = trace.make_binary("bin", data);
+ trace.add(trace::Action::WriteBuffer {
+ id: buffer_id,
+ data: data_path,
+ range: offset..offset + data.len() as BufferAddress,
+ queued: false,
+ });
+ }
+
+ let (_, block) = buffer.raw.as_mut().unwrap();
+ block.write_bytes(&device.raw, offset, data)?;
+
+ Ok(())
+ }
+
+ pub fn device_get_buffer_sub_data<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ data: &mut [u8],
+ ) -> Result<(), resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::get_buffer_sub_data");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let device = device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+ check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_READ)?;
+ //assert!(buffer isn't used by the GPU);
+
+ let (_, block) = buffer.raw.as_mut().unwrap();
+ block.read_bytes(&device.raw, offset, data)?;
+
+ Ok(())
+ }
+
+ pub fn buffer_label<B: GfxBackend>(&self, id: id::BufferId) -> String {
+ B::hub(self).buffers.label_for_resource(id)
+ }
+
+ pub fn buffer_destroy<B: GfxBackend>(
+ &self,
+ buffer_id: id::BufferId,
+ ) -> Result<(), resource::DestroyError> {
+ span!(_guard, INFO, "Buffer::destroy");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ //TODO: lock pending writes separately, keep the device read-only
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+
+ tracing::info!("Buffer {:?} is destroyed", buffer_id);
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::DestroyError::Invalid)?;
+
+ let device = &mut device_guard[buffer.device_id.value];
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::FreeBuffer(buffer_id));
+ }
+
+ let (raw, memory) = buffer
+ .raw
+ .take()
+ .ok_or(resource::DestroyError::AlreadyDestroyed)?;
+ let temp = queue::TempResource::Buffer(raw);
+
+ if device.pending_writes.dst_buffers.contains(&buffer_id) {
+ device.pending_writes.temp_resources.push((temp, memory));
+ } else {
+ let last_submit_index = buffer.life_guard.submission_index.load(Ordering::Acquire);
+ drop(buffer_guard);
+ device.lock_life(&mut token).schedule_resource_destruction(
+ temp,
+ memory,
+ last_submit_index,
+ );
+ }
+
+ Ok(())
+ }
+
+ pub fn buffer_drop<B: GfxBackend>(&self, buffer_id: id::BufferId, wait: bool) {
+ span!(_guard, INFO, "Buffer::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ tracing::info!("Buffer {:?} is dropped", buffer_id);
+ let (ref_count, last_submit_index, device_id) = {
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ match buffer_guard.get_mut(buffer_id) {
+ Ok(buffer) => {
+ let ref_count = buffer.life_guard.ref_count.take().unwrap();
+ let last_submit_index =
+ buffer.life_guard.submission_index.load(Ordering::Acquire);
+ (ref_count, last_submit_index, buffer.device_id.value)
+ }
+ Err(InvalidId) => {
+ hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device = &device_guard[device_id];
+ {
+ let mut life_lock = device.lock_life(&mut token);
+ if device.pending_writes.dst_buffers.contains(&buffer_id) {
+ life_lock.future_suspected_buffers.push(Stored {
+ value: id::Valid(buffer_id),
+ ref_count,
+ });
+ } else {
+ drop(ref_count);
+ life_lock
+ .suspected_resources
+ .buffers
+ .push(id::Valid(buffer_id));
+ }
+ }
+
+ if wait {
+ match device.wait_for_submit(last_submit_index, &mut token) {
+ Ok(()) => (),
+ Err(e) => tracing::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e),
+ }
+ }
+ }
+
+ pub fn device_create_texture<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &resource::TextureDescriptor,
+ id_in: Input<G, id::TextureId>,
+ ) -> (id::TextureId, Option<resource::CreateTextureError>) {
+ span!(_guard, INFO, "Device::create_texture");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let texture = match device.create_texture(device_id, desc) {
+ Ok(texture) => texture,
+ Err(error) => break error,
+ };
+ let num_levels = texture.full_range.levels.end;
+ let num_layers = texture.full_range.layers.end;
+ let ref_count = texture.life_guard.add_ref();
+
+ let id = hub.textures.register_identity(id_in, texture, &mut token);
+ tracing::info!("Created texture {:?} with {:?}", id, desc);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateTexture(id.0, desc.clone()));
+ }
+
+ device
+ .trackers
+ .lock()
+ .textures
+ .init(id, ref_count, TextureState::new(num_levels, num_layers))
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .textures
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn texture_label<B: GfxBackend>(&self, id: id::TextureId) -> String {
+ B::hub(self).textures.label_for_resource(id)
+ }
+
+ pub fn texture_destroy<B: GfxBackend>(
+ &self,
+ texture_id: id::TextureId,
+ ) -> Result<(), resource::DestroyError> {
+ span!(_guard, INFO, "Texture::destroy");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ //TODO: lock pending writes separately, keep the device read-only
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+
+ tracing::info!("Buffer {:?} is destroyed", texture_id);
+ let (mut texture_guard, _) = hub.textures.write(&mut token);
+ let texture = texture_guard
+ .get_mut(texture_id)
+ .map_err(|_| resource::DestroyError::Invalid)?;
+
+ let device = &mut device_guard[texture.device_id.value];
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::FreeTexture(texture_id));
+ }
+
+ let (raw, memory) = texture
+ .raw
+ .take()
+ .ok_or(resource::DestroyError::AlreadyDestroyed)?;
+ let temp = queue::TempResource::Image(raw);
+
+ if device.pending_writes.dst_textures.contains(&texture_id) {
+ device.pending_writes.temp_resources.push((temp, memory));
+ } else {
+ let last_submit_index = texture.life_guard.submission_index.load(Ordering::Acquire);
+ drop(texture_guard);
+ device.lock_life(&mut token).schedule_resource_destruction(
+ temp,
+ memory,
+ last_submit_index,
+ );
+ }
+
+ Ok(())
+ }
+
+ pub fn texture_drop<B: GfxBackend>(&self, texture_id: id::TextureId, wait: bool) {
+ span!(_guard, INFO, "Texture::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (ref_count, last_submit_index, device_id) = {
+ let (mut texture_guard, _) = hub.textures.write(&mut token);
+ match texture_guard.get_mut(texture_id) {
+ Ok(texture) => {
+ let ref_count = texture.life_guard.ref_count.take().unwrap();
+ let last_submit_index =
+ texture.life_guard.submission_index.load(Ordering::Acquire);
+ (ref_count, last_submit_index, texture.device_id.value)
+ }
+ Err(InvalidId) => {
+ hub.textures
+ .unregister_locked(texture_id, &mut *texture_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device = &device_guard[device_id];
+ {
+ let mut life_lock = device.lock_life(&mut token);
+ if device.pending_writes.dst_textures.contains(&texture_id) {
+ life_lock.future_suspected_textures.push(Stored {
+ value: id::Valid(texture_id),
+ ref_count,
+ });
+ } else {
+ drop(ref_count);
+ life_lock
+ .suspected_resources
+ .textures
+ .push(id::Valid(texture_id));
+ }
+ }
+
+ if wait {
+ match device.wait_for_submit(last_submit_index, &mut token) {
+ Ok(()) => (),
+ Err(e) => tracing::error!("Failed to wait for texture {:?}: {:?}", texture_id, e),
+ }
+ }
+ }
+
+ pub fn texture_create_view<B: GfxBackend>(
+ &self,
+ texture_id: id::TextureId,
+ desc: &resource::TextureViewDescriptor,
+ id_in: Input<G, id::TextureViewId>,
+ ) -> (id::TextureViewId, Option<resource::CreateTextureViewError>) {
+ span!(_guard, INFO, "Texture::create_view");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (texture_guard, mut token) = hub.textures.read(&mut token);
+ let error = loop {
+ let texture = match texture_guard.get(texture_id) {
+ Ok(texture) => texture,
+ Err(_) => break resource::CreateTextureViewError::InvalidTexture,
+ };
+ let device = &device_guard[texture.device_id.value];
+
+ let view = match device.create_texture_view(texture, texture_id, desc) {
+ Ok(view) => view,
+ Err(e) => break e,
+ };
+ let ref_count = view.life_guard.add_ref();
+
+ let id = hub.texture_views.register_identity(id_in, view, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::CreateTextureView {
+ id: id.0,
+ parent_id: texture_id,
+ desc: desc.clone(),
+ });
+ }
+
+ device
+ .trackers
+ .lock()
+ .views
+ .init(id, ref_count, PhantomData)
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id =
+ hub.texture_views
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn texture_view_label<B: GfxBackend>(&self, id: id::TextureViewId) -> String {
+ B::hub(self).texture_views.label_for_resource(id)
+ }
+
+ pub fn texture_view_drop<B: GfxBackend>(
+ &self,
+ texture_view_id: id::TextureViewId,
+ ) -> Result<(), resource::TextureViewDestroyError> {
+ span!(_guard, INFO, "TextureView::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let device_id = {
+ let (texture_guard, mut token) = hub.textures.read(&mut token);
+ let (mut texture_view_guard, _) = hub.texture_views.write(&mut token);
+
+ match texture_view_guard.get_mut(texture_view_id) {
+ Ok(view) => {
+ view.life_guard.ref_count.take();
+ match view.inner {
+ resource::TextureViewInner::Native { ref source_id, .. } => {
+ texture_guard[source_id.value].device_id.value
+ }
+ resource::TextureViewInner::SwapChain { .. } => {
+ return Err(resource::TextureViewDestroyError::SwapChainImage)
+ }
+ }
+ }
+ Err(InvalidId) => {
+ hub.texture_views
+ .unregister_locked(texture_view_id, &mut *texture_view_guard);
+ return Ok(());
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .texture_views
+ .push(id::Valid(texture_view_id));
+ Ok(())
+ }
+
+ pub fn device_create_sampler<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &resource::SamplerDescriptor,
+ id_in: Input<G, id::SamplerId>,
+ ) -> (id::SamplerId, Option<resource::CreateSamplerError>) {
+ span!(_guard, INFO, "Device::create_sampler");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+
+ let sampler = match device.create_sampler(device_id, desc) {
+ Ok(sampler) => sampler,
+ Err(e) => break e,
+ };
+ let ref_count = sampler.life_guard.add_ref();
+
+ let id = hub.samplers.register_identity(id_in, sampler, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateSampler(id.0, desc.clone()));
+ }
+
+ device
+ .trackers
+ .lock()
+ .samplers
+ .init(id, ref_count, PhantomData)
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .samplers
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn sampler_label<B: GfxBackend>(&self, id: id::SamplerId) -> String {
+ B::hub(self).samplers.label_for_resource(id)
+ }
+
+ pub fn sampler_drop<B: GfxBackend>(&self, sampler_id: id::SamplerId) {
+ span!(_guard, INFO, "Sampler::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let device_id = {
+ let (mut sampler_guard, _) = hub.samplers.write(&mut token);
+ match sampler_guard.get_mut(sampler_id) {
+ Ok(sampler) => {
+ sampler.life_guard.ref_count.take();
+ sampler.device_id.value
+ }
+ Err(InvalidId) => {
+ hub.samplers
+ .unregister_locked(sampler_id, &mut *sampler_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .samplers
+ .push(id::Valid(sampler_id));
+ }
+
+ pub fn device_create_bind_group_layout<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &binding_model::BindGroupLayoutDescriptor,
+ id_in: Input<G, id::BindGroupLayoutId>,
+ ) -> (
+ id::BindGroupLayoutId,
+ Option<binding_model::CreateBindGroupLayoutError>,
+ ) {
+ span!(_guard, INFO, "Device::create_bind_group_layout");
+
+ let mut token = Token::root();
+ let hub = B::hub(self);
+
+ let error = 'outer: loop {
+ let mut entry_map = FastHashMap::default();
+ for entry in desc.entries.iter() {
+ if entry_map.insert(entry.binding, entry.clone()).is_some() {
+ break 'outer binding_model::CreateBindGroupLayoutError::ConflictBinding(
+ entry.binding,
+ );
+ }
+ }
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+
+ // If there is an equivalent BGL, just bump the refcount and return it.
+ // This is only applicable for identity filters that are generating new IDs,
+ // so their inputs are `PhantomData` of size 0.
+ if mem::size_of::<Input<G, id::BindGroupLayoutId>>() == 0 {
+ let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
+ if let Some(id) =
+ Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard)
+ {
+ return (id, None);
+ }
+ }
+
+ let layout = match device.create_bind_group_layout(
+ device_id,
+ desc.label.as_ref().map(|cow| cow.as_ref()),
+ entry_map,
+ ) {
+ Ok(layout) => layout,
+ Err(e) => break e,
+ };
+
+ let id = hub
+ .bind_group_layouts
+ .register_identity(id_in, layout, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateBindGroupLayout(id.0, desc.clone()));
+ }
+ return (id.0, None);
+ };
+
+ let id = hub.bind_group_layouts.register_error(
+ id_in,
+ desc.label.borrow_or_default(),
+ &mut token,
+ );
+ (id, Some(error))
+ }
+
+ pub fn bind_group_layout_label<B: GfxBackend>(&self, id: id::BindGroupLayoutId) -> String {
+ B::hub(self).bind_group_layouts.label_for_resource(id)
+ }
+
+ pub fn bind_group_layout_drop<B: GfxBackend>(
+ &self,
+ bind_group_layout_id: id::BindGroupLayoutId,
+ ) {
+ span!(_guard, INFO, "BindGroupLayout::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let device_id = {
+ let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token);
+ match bind_group_layout_guard.get_mut(bind_group_layout_id) {
+ Ok(layout) => layout.device_id.value,
+ Err(InvalidId) => {
+ hub.bind_group_layouts
+ .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .bind_group_layouts
+ .push(id::Valid(bind_group_layout_id));
+ }
+
+ pub fn device_create_pipeline_layout<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &binding_model::PipelineLayoutDescriptor,
+ id_in: Input<G, id::PipelineLayoutId>,
+ ) -> (
+ id::PipelineLayoutId,
+ Option<binding_model::CreatePipelineLayoutError>,
+ ) {
+ span!(_guard, INFO, "Device::create_pipeline_layout");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+
+ let layout = {
+ let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
+ match device.create_pipeline_layout(device_id, desc, &*bgl_guard) {
+ Ok(layout) => layout,
+ Err(e) => break e,
+ }
+ };
+
+ let id = hub
+ .pipeline_layouts
+ .register_identity(id_in, layout, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreatePipelineLayout(id.0, desc.clone()));
+ }
+ return (id.0, None);
+ };
+
+ let id =
+ hub.pipeline_layouts
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn pipeline_layout_label<B: GfxBackend>(&self, id: id::PipelineLayoutId) -> String {
+ B::hub(self).pipeline_layouts.label_for_resource(id)
+ }
+
+ pub fn pipeline_layout_drop<B: GfxBackend>(&self, pipeline_layout_id: id::PipelineLayoutId) {
+ span!(_guard, INFO, "PipelineLayout::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_id, ref_count) = {
+ let (mut pipeline_layout_guard, _) = hub.pipeline_layouts.write(&mut token);
+ match pipeline_layout_guard.get_mut(pipeline_layout_id) {
+ Ok(layout) => (
+ layout.device_id.value,
+ layout.life_guard.ref_count.take().unwrap(),
+ ),
+ Err(InvalidId) => {
+ hub.pipeline_layouts
+ .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .pipeline_layouts
+ .push(Stored {
+ value: id::Valid(pipeline_layout_id),
+ ref_count,
+ });
+ }
+
+ pub fn device_create_bind_group<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &binding_model::BindGroupDescriptor,
+ id_in: Input<G, id::BindGroupId>,
+ ) -> (id::BindGroupId, Option<binding_model::CreateBindGroupError>) {
+ span!(_guard, INFO, "Device::create_bind_group");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token);
+
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let bind_group_layout = match bind_group_layout_guard.get(desc.layout) {
+ Ok(layout) => layout,
+ Err(_) => break binding_model::CreateBindGroupError::InvalidLayout,
+ };
+
+ let bind_group = match device.create_bind_group(
+ device_id,
+ bind_group_layout,
+ desc,
+ &hub,
+ &mut token,
+ ) {
+ Ok(bind_group) => bind_group,
+ Err(e) => break e,
+ };
+ let ref_count = bind_group.life_guard.add_ref();
+
+ let id = hub
+ .bind_groups
+ .register_identity(id_in, bind_group, &mut token);
+ tracing::debug!(
+ "Bind group {:?} {:#?}",
+ id,
+ hub.bind_groups.read(&mut token).0[id].used
+ );
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::CreateBindGroup(id.0, desc.clone()));
+ }
+
+ device
+ .trackers
+ .lock()
+ .bind_groups
+ .init(id, ref_count, PhantomData)
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .bind_groups
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn bind_group_label<B: GfxBackend>(&self, id: id::BindGroupId) -> String {
+ B::hub(self).bind_groups.label_for_resource(id)
+ }
+
+ pub fn bind_group_drop<B: GfxBackend>(&self, bind_group_id: id::BindGroupId) {
+ span!(_guard, INFO, "BindGroup::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let device_id = {
+ let (mut bind_group_guard, _) = hub.bind_groups.write(&mut token);
+ match bind_group_guard.get_mut(bind_group_id) {
+ Ok(bind_group) => {
+ bind_group.life_guard.ref_count.take();
+ bind_group.device_id.value
+ }
+ Err(InvalidId) => {
+ hub.bind_groups
+ .unregister_locked(bind_group_id, &mut *bind_group_guard);
+ return;
+ }
+ }
+ };
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .bind_groups
+ .push(id::Valid(bind_group_id));
+ }
+
+ pub fn device_create_shader_module<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &pipeline::ShaderModuleDescriptor,
+ id_in: Input<G, id::ShaderModuleId>,
+ ) -> (
+ id::ShaderModuleId,
+ Option<pipeline::CreateShaderModuleError>,
+ ) {
+ span!(_guard, INFO, "Device::create_shader_module");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let (shader, spv) = match device.create_shader_module(device_id, desc) {
+ Ok(pair) => pair,
+ Err(e) => break e,
+ };
+
+ let id = hub
+ .shader_modules
+ .register_identity(id_in, shader, &mut token);
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data = trace.make_binary("spv", unsafe {
+ std::slice::from_raw_parts(spv.as_ptr() as *const u8, spv.len() * 4)
+ });
+ let label = desc.label.clone();
+ trace.add(trace::Action::CreateShaderModule {
+ id: id.0,
+ data,
+ label,
+ });
+ }
+
+ let _ = spv;
+ return (id.0, None);
+ };
+
+ let id =
+ hub.shader_modules
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+
+ pub fn shader_module_label<B: GfxBackend>(&self, id: id::ShaderModuleId) -> String {
+ B::hub(self).shader_modules.label_for_resource(id)
+ }
+
+ pub fn shader_module_drop<B: GfxBackend>(&self, shader_module_id: id::ShaderModuleId) {
+ span!(_guard, INFO, "ShaderModule::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token);
+ if let Some(module) = module {
+ let device = &device_guard[module.device_id.value];
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(trace::Action::DestroyShaderModule(shader_module_id));
+ }
+ unsafe {
+ device.raw.destroy_shader_module(module.raw);
+ }
+ }
+ }
+
+ pub fn device_create_command_encoder<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &wgt::CommandEncoderDescriptor<Label>,
+ id_in: Input<G, id::CommandEncoderId>,
+ ) -> (id::CommandEncoderId, Option<command::CommandAllocatorError>) {
+ span!(_guard, INFO, "Device::create_command_encoder");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+
+ let dev_stored = Stored {
+ value: id::Valid(device_id),
+ ref_count: device.life_guard.add_ref(),
+ };
+
+ let mut command_buffer = match device.cmd_allocator.allocate(
+ dev_stored,
+ &device.raw,
+ device.limits.clone(),
+ device.private_features,
+ &desc.label,
+ #[cfg(feature = "trace")]
+ device.trace.is_some(),
+ ) {
+ Ok(cmd_buf) => cmd_buf,
+ Err(e) => break e,
+ };
+
+ unsafe {
+ let raw_command_buffer = command_buffer.raw.last_mut().unwrap();
+ if let Some(ref label) = desc.label {
+ device
+ .raw
+ .set_command_buffer_name(raw_command_buffer, label);
+ }
+ raw_command_buffer.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
+ }
+
+ let id = hub
+ .command_buffers
+ .register_identity(id_in, command_buffer, &mut token);
+
+ return (id.0, None);
+ };
+
+ let id = B::hub(self).command_buffers.register_error(
+ id_in,
+ desc.label.borrow_or_default(),
+ &mut token,
+ );
+ (id, Some(error))
+ }
+
+ pub fn command_buffer_label<B: GfxBackend>(&self, id: id::CommandBufferId) -> String {
+ B::hub(self).command_buffers.label_for_resource(id)
+ }
+
+ pub fn command_encoder_drop<B: GfxBackend>(&self, command_encoder_id: id::CommandEncoderId) {
+ span!(_guard, INFO, "CommandEncoder::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let (cmdbuf, _) = hub
+ .command_buffers
+ .unregister(command_encoder_id, &mut token);
+ if let Some(cmdbuf) = cmdbuf {
+ let device = &mut device_guard[cmdbuf.device_id.value];
+ device.untrack::<G>(&hub, &cmdbuf.trackers, &mut token);
+ device.cmd_allocator.discard(cmdbuf);
+ }
+ }
+
+ pub fn command_buffer_drop<B: GfxBackend>(&self, command_buffer_id: id::CommandBufferId) {
+ span!(_guard, INFO, "CommandBuffer::drop");
+ self.command_encoder_drop::<B>(command_buffer_id)
+ }
+
+ pub fn device_create_render_bundle_encoder(
+ &self,
+ device_id: id::DeviceId,
+ desc: &command::RenderBundleEncoderDescriptor,
+ ) -> (
+ id::RenderBundleEncoderId,
+ Option<command::CreateRenderBundleError>,
+ ) {
+ span!(_guard, INFO, "Device::create_render_bundle_encoder");
+ let (encoder, error) = match command::RenderBundleEncoder::new(desc, device_id, None) {
+ Ok(encoder) => (encoder, None),
+ Err(e) => (command::RenderBundleEncoder::dummy(device_id), Some(e)),
+ };
+ (Box::into_raw(Box::new(encoder)), error)
+ }
+
+ pub fn render_bundle_encoder_finish<B: GfxBackend>(
+ &self,
+ bundle_encoder: command::RenderBundleEncoder,
+ desc: &command::RenderBundleDescriptor,
+ id_in: Input<G, id::RenderBundleId>,
+ ) -> (id::RenderBundleId, Option<command::RenderBundleError>) {
+ span!(_guard, INFO, "RenderBundleEncoder::finish");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+
+ let error = loop {
+ let device = match device_guard.get(bundle_encoder.parent()) {
+ Ok(device) => device,
+ Err(_) => break command::RenderBundleError::INVALID_DEVICE,
+ };
+
+ let render_bundle = match bundle_encoder.finish(desc, device, &hub, &mut token) {
+ Ok(bundle) => bundle,
+ Err(e) => break e,
+ };
+
+ tracing::debug!("Render bundle {:?} = {:#?}", id_in, render_bundle.used);
+
+ let ref_count = render_bundle.life_guard.add_ref();
+ let id = hub
+ .render_bundles
+ .register_identity(id_in, render_bundle, &mut token);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let (bundle_guard, _) = hub.render_bundles.read(&mut token);
+ let bundle = &bundle_guard[id];
+ let label = desc.label.as_ref().map(|l| l.as_ref());
+ trace.lock().add(trace::Action::CreateRenderBundle {
+ id: id.0,
+ desc: trace::new_render_bundle_encoder_descriptor(label, &bundle.context),
+ base: bundle.to_base_pass(),
+ });
+ }
+
+ device
+ .trackers
+ .lock()
+ .bundles
+ .init(id, ref_count, PhantomData)
+ .unwrap();
+ return (id.0, None);
+ };
+
+ let id = B::hub(self).render_bundles.register_error(
+ id_in,
+ desc.label.borrow_or_default(),
+ &mut token,
+ );
+ (id, Some(error))
+ }
+
+ pub fn render_bundle_label<B: GfxBackend>(&self, id: id::RenderBundleId) -> String {
+ B::hub(self).render_bundles.label_for_resource(id)
+ }
+
+ pub fn render_bundle_drop<B: GfxBackend>(&self, render_bundle_id: id::RenderBundleId) {
+ span!(_guard, INFO, "RenderBundle::drop");
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device_id = {
+ let (mut bundle_guard, _) = hub.render_bundles.write(&mut token);
+ match bundle_guard.get_mut(render_bundle_id) {
+ Ok(bundle) => {
+ bundle.life_guard.ref_count.take();
+ bundle.device_id.value
+ }
+ Err(InvalidId) => {
+ hub.render_bundles
+ .unregister_locked(render_bundle_id, &mut *bundle_guard);
+ return;
+ }
+ }
+ };
+
+ device_guard[device_id]
+ .lock_life(&mut token)
+ .suspected_resources
+ .render_bundles
+ .push(id::Valid(render_bundle_id));
+ }
+
+ pub fn device_create_render_pipeline<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &pipeline::RenderPipelineDescriptor,
+ id_in: Input<G, id::RenderPipelineId>,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ ) -> (
+ id::RenderPipelineId,
+ pipeline::ImplicitBindGroupCount,
+ Option<pipeline::CreateRenderPipelineError>,
+ ) {
+ span!(_guard, INFO, "Device::create_render_pipeline");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let (pipeline, derived_bind_group_count, layout_id) = match device
+ .create_render_pipeline(device_id, desc, implicit_pipeline_ids, &hub, &mut token)
+ {
+ Ok(pair) => pair,
+ Err(e) => break e,
+ };
+
+ let id = hub
+ .render_pipelines
+ .register_identity(id_in, pipeline, &mut token);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::CreateRenderPipeline(
+ id.0,
+ pipeline::RenderPipelineDescriptor {
+ layout: Some(layout_id),
+ ..desc.clone()
+ },
+ ));
+ }
+ let _ = layout_id;
+ return (id.0, derived_bind_group_count, None);
+ };
+
+ let id =
+ hub.render_pipelines
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, 0, Some(error))
+ }
+
+ /// Get an ID of one of the bind group layouts. The ID adds a refcount,
+ /// which needs to be released by calling `bind_group_layout_drop`.
+ pub fn render_pipeline_get_bind_group_layout<B: GfxBackend>(
+ &self,
+ pipeline_id: id::RenderPipelineId,
+ index: u32,
+ id_in: Input<G, id::BindGroupLayoutId>,
+ ) -> (
+ id::BindGroupLayoutId,
+ Option<binding_model::GetBindGroupLayoutError>,
+ ) {
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
+
+ let error = loop {
+ let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
+ let (_, mut token) = hub.bind_groups.read(&mut token);
+ let (pipeline_guard, _) = hub.render_pipelines.read(&mut token);
+
+ let pipeline = match pipeline_guard.get(pipeline_id) {
+ Ok(pipeline) => pipeline,
+ Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
+ };
+ let id = match pipeline_layout_guard[pipeline.layout_id.value]
+ .bind_group_layout_ids
+ .get(index as usize)
+ {
+ Some(id) => id,
+ None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
+ };
+
+ bgl_guard[*id].multi_ref_count.inc();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .bind_group_layouts
+ .register_error(id_in, "<derived>", &mut token);
+ (id, Some(error))
+ }
+
+ pub fn render_pipeline_label<B: GfxBackend>(&self, id: id::RenderPipelineId) -> String {
+ B::hub(self).render_pipelines.label_for_resource(id)
+ }
+
+ pub fn render_pipeline_drop<B: GfxBackend>(&self, render_pipeline_id: id::RenderPipelineId) {
+ span!(_guard, INFO, "RenderPipeline::drop");
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+
+ let (device_id, layout_id) = {
+ let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token);
+ match pipeline_guard.get_mut(render_pipeline_id) {
+ Ok(pipeline) => {
+ pipeline.life_guard.ref_count.take();
+ (pipeline.device_id.value, pipeline.layout_id.clone())
+ }
+ Err(InvalidId) => {
+ hub.render_pipelines
+ .unregister_locked(render_pipeline_id, &mut *pipeline_guard);
+ return;
+ }
+ }
+ };
+
+ let mut life_lock = device_guard[device_id].lock_life(&mut token);
+ life_lock
+ .suspected_resources
+ .render_pipelines
+ .push(id::Valid(render_pipeline_id));
+ life_lock
+ .suspected_resources
+ .pipeline_layouts
+ .push(layout_id);
+ }
+
+ pub fn device_create_compute_pipeline<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ desc: &pipeline::ComputePipelineDescriptor,
+ id_in: Input<G, id::ComputePipelineId>,
+ implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
+ ) -> (
+ id::ComputePipelineId,
+ pipeline::ImplicitBindGroupCount,
+ Option<pipeline::CreateComputePipelineError>,
+ ) {
+ span!(_guard, INFO, "Device::create_compute_pipeline");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let error = loop {
+ let device = match device_guard.get(device_id) {
+ Ok(device) => device,
+ Err(_) => break DeviceError::Invalid.into(),
+ };
+ let (pipeline, derived_bind_group_count, layout_id) = match device
+ .create_compute_pipeline(device_id, desc, implicit_pipeline_ids, &hub, &mut token)
+ {
+ Ok(pair) => pair,
+ Err(e) => break e,
+ };
+
+ let id = hub
+ .compute_pipelines
+ .register_identity(id_in, pipeline, &mut token);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(trace::Action::CreateComputePipeline(
+ id.0,
+ pipeline::ComputePipelineDescriptor {
+ layout: Some(layout_id),
+ ..desc.clone()
+ },
+ ));
+ }
+ let _ = layout_id;
+ return (id.0, derived_bind_group_count, None);
+ };
+
+ let id =
+ hub.compute_pipelines
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, 0, Some(error))
+ }
+
+ /// Get an ID of one of the bind group layouts. The ID adds a refcount,
+ /// which needs to be released by calling `bind_group_layout_drop`.
+ pub fn compute_pipeline_get_bind_group_layout<B: GfxBackend>(
+ &self,
+ pipeline_id: id::ComputePipelineId,
+ index: u32,
+ id_in: Input<G, id::BindGroupLayoutId>,
+ ) -> (
+ id::BindGroupLayoutId,
+ Option<binding_model::GetBindGroupLayoutError>,
+ ) {
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
+
+ let error = loop {
+ let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
+ let (_, mut token) = hub.bind_groups.read(&mut token);
+ let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token);
+
+ let pipeline = match pipeline_guard.get(pipeline_id) {
+ Ok(pipeline) => pipeline,
+ Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
+ };
+ let id = match pipeline_layout_guard[pipeline.layout_id.value]
+ .bind_group_layout_ids
+ .get(index as usize)
+ {
+ Some(id) => id,
+ None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
+ };
+
+ bgl_guard[*id].multi_ref_count.inc();
+ return (id.0, None);
+ };
+
+ let id = hub
+ .bind_group_layouts
+ .register_error(id_in, "<derived>", &mut token);
+ (id, Some(error))
+ }
+
+ pub fn compute_pipeline_label<B: GfxBackend>(&self, id: id::ComputePipelineId) -> String {
+ B::hub(self).compute_pipelines.label_for_resource(id)
+ }
+
+ pub fn compute_pipeline_drop<B: GfxBackend>(&self, compute_pipeline_id: id::ComputePipelineId) {
+ span!(_guard, INFO, "ComputePipeline::drop");
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+
+ let (device_id, layout_id) = {
+ let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token);
+ match pipeline_guard.get_mut(compute_pipeline_id) {
+ Ok(pipeline) => {
+ pipeline.life_guard.ref_count.take();
+ (pipeline.device_id.value, pipeline.layout_id.clone())
+ }
+ Err(InvalidId) => {
+ hub.compute_pipelines
+ .unregister_locked(compute_pipeline_id, &mut *pipeline_guard);
+ return;
+ }
+ }
+ };
+
+ let mut life_lock = device_guard[device_id].lock_life(&mut token);
+ life_lock
+ .suspected_resources
+ .compute_pipelines
+ .push(id::Valid(compute_pipeline_id));
+ life_lock
+ .suspected_resources
+ .pipeline_layouts
+ .push(layout_id);
+ }
+
+ pub fn device_get_swap_chain_preferred_format<B: GfxBackend>(
+ &self,
+ _device_id: id::DeviceId,
+ ) -> Result<TextureFormat, InvalidDevice> {
+ span!(_guard, INFO, "Device::get_swap_chain_preferred_format");
+ //TODO: we can query the formats like done in `device_create_swapchain`,
+ // but its not clear which format in the list to return.
+ // For now, return `Bgra8UnormSrgb` that we know is supported everywhere.
+ Ok(TextureFormat::Bgra8UnormSrgb)
+ }
+
+ pub fn device_create_swap_chain<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ surface_id: id::SurfaceId,
+ desc: &wgt::SwapChainDescriptor,
+ ) -> Result<id::SwapChainId, swap_chain::CreateSwapChainError> {
+ span!(_guard, INFO, "Device::create_swap_chain");
+
+ fn validate_swap_chain_descriptor(
+ config: &mut hal::window::SwapchainConfig,
+ caps: &hal::window::SurfaceCapabilities,
+ ) {
+ let width = config.extent.width;
+ let height = config.extent.height;
+ if width < caps.extents.start().width
+ || width > caps.extents.end().width
+ || height < caps.extents.start().height
+ || height > caps.extents.end().height
+ {
+ tracing::warn!(
+ "Requested size {}x{} is outside of the supported range: {:?}",
+ width,
+ height,
+ caps.extents
+ );
+ }
+ if !caps.present_modes.contains(config.present_mode) {
+ tracing::warn!(
+ "Surface does not support present mode: {:?}, falling back to {:?}",
+ config.present_mode,
+ hal::window::PresentMode::FIFO
+ );
+ config.present_mode = hal::window::PresentMode::FIFO;
+ }
+ }
+
+ tracing::info!("creating swap chain {:?}", desc);
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
+ let (adapter_guard, mut token) = hub.adapters.read(&mut token);
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (mut swap_chain_guard, _) = hub.swap_chains.write(&mut token);
+ let device = device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let surface = surface_guard
+ .get_mut(surface_id)
+ .map_err(|_| swap_chain::CreateSwapChainError::InvalidSurface)?;
+
+ let (caps, formats) = {
+ let surface = B::get_surface_mut(surface);
+ let adapter = &adapter_guard[device.adapter_id.value];
+ let queue_family = &adapter.raw.queue_families[0];
+ if !surface.supports_queue_family(queue_family) {
+ return Err(swap_chain::CreateSwapChainError::UnsupportedQueueFamily);
+ }
+ let formats = surface.supported_formats(&adapter.raw.physical_device);
+ let caps = surface.capabilities(&adapter.raw.physical_device);
+ (caps, formats)
+ };
+ let num_frames = swap_chain::DESIRED_NUM_FRAMES
+ .max(*caps.image_count.start())
+ .min(*caps.image_count.end());
+ let mut config =
+ swap_chain::swap_chain_descriptor_to_hal(&desc, num_frames, device.private_features);
+ if let Some(formats) = formats {
+ if !formats.contains(&config.format) {
+ return Err(swap_chain::CreateSwapChainError::UnsupportedFormat {
+ requested: config.format,
+ available: formats,
+ });
+ }
+ }
+ validate_swap_chain_descriptor(&mut config, &caps);
+
+ unsafe {
+ B::get_surface_mut(surface)
+ .configure_swapchain(&device.raw, config)
+ .map_err(|err| match err {
+ hal::window::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ hal::window::CreationError::DeviceLost(_) => DeviceError::Lost,
+ _ => panic!("failed to configure swap chain on creation: {}", err),
+ })?;
+ }
+
+ let sc_id = surface_id.to_swap_chain_id(B::VARIANT);
+ if let Some(sc) = swap_chain_guard.try_remove(sc_id) {
+ if !sc.acquired_view_id.is_none() {
+ return Err(swap_chain::CreateSwapChainError::SwapChainOutputExists);
+ }
+ unsafe {
+ device.raw.destroy_semaphore(sc.semaphore);
+ }
+ }
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace
+ .lock()
+ .add(Action::CreateSwapChain(sc_id, desc.clone()));
+ }
+
+ let swap_chain = swap_chain::SwapChain {
+ life_guard: LifeGuard::new("<SwapChain>"),
+ device_id: Stored {
+ value: id::Valid(device_id),
+ ref_count: device.life_guard.add_ref(),
+ },
+ desc: desc.clone(),
+ num_frames,
+ semaphore: device
+ .raw
+ .create_semaphore()
+ .or(Err(DeviceError::OutOfMemory))?,
+ acquired_view_id: None,
+ acquired_framebuffers: Vec::new(),
+ active_submission_index: 0,
+ };
+ swap_chain_guard.insert(sc_id, swap_chain);
+ Ok(sc_id)
+ }
+
+ #[cfg(feature = "replay")]
+ /// Only triange suspected resource IDs. This helps us to avoid ID collisions
+ /// upon creating new resources when re-playing a trace.
+ pub fn device_maintain_ids<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ ) -> Result<(), InvalidDevice> {
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
+ device.lock_life(&mut token).triage_suspected(
+ &hub,
+ &device.trackers,
+ #[cfg(feature = "trace")]
+ None,
+ &mut token,
+ );
+ Ok(())
+ }
+
+ pub fn device_poll<B: GfxBackend>(
+ &self,
+ device_id: id::DeviceId,
+ force_wait: bool,
+ ) -> Result<(), WaitIdleError> {
+ span!(_guard, INFO, "Device::poll");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let callbacks = {
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ device_guard
+ .get(device_id)
+ .map_err(|_| DeviceError::Invalid)?
+ .maintain(&hub, force_wait, &mut token)?
+ };
+ fire_map_callbacks(callbacks);
+ Ok(())
+ }
+
+ fn poll_devices<B: GfxBackend>(
+ &self,
+ force_wait: bool,
+ callbacks: &mut Vec<BufferMapPendingCallback>,
+ ) -> Result<(), WaitIdleError> {
+ span!(_guard, INFO, "Device::poll_devices");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ for (_, device) in device_guard.iter(B::VARIANT) {
+ let cbs = device.maintain(&hub, force_wait, &mut token)?;
+ callbacks.extend(cbs);
+ }
+ Ok(())
+ }
+
+ pub fn poll_all_devices(&self, force_wait: bool) -> Result<(), WaitIdleError> {
+ use crate::backend;
+ let mut callbacks = Vec::new();
+
+ #[cfg(vulkan)]
+ {
+ self.poll_devices::<backend::Vulkan>(force_wait, &mut callbacks)?;
+ }
+ #[cfg(metal)]
+ {
+ self.poll_devices::<backend::Metal>(force_wait, &mut callbacks)?;
+ }
+ #[cfg(dx12)]
+ {
+ self.poll_devices::<backend::Dx12>(force_wait, &mut callbacks)?;
+ }
+ #[cfg(dx11)]
+ {
+ self.poll_devices::<backend::Dx11>(force_wait, &mut callbacks)?;
+ }
+
+ fire_map_callbacks(callbacks);
+
+ Ok(())
+ }
+
+ pub fn device_label<B: GfxBackend>(&self, id: id::DeviceId) -> String {
+ B::hub(self).devices.label_for_resource(id)
+ }
+
+ pub fn device_drop<B: GfxBackend>(&self, device_id: id::DeviceId) {
+ span!(_guard, INFO, "Device::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device, _) = hub.devices.unregister(device_id, &mut token);
+ if let Some(mut device) = device {
+ device.prepare_to_die();
+
+ // Adapter is only referenced by the device and itself.
+ // This isn't a robust way to destroy them, we should find a better one.
+ if device.adapter_id.ref_count.load() == 1 {
+ let (_adapter, _) = hub
+ .adapters
+ .unregister(device.adapter_id.value.0, &mut token);
+ }
+
+ device.dispose();
+ }
+ }
+
+ pub fn buffer_map_async<B: GfxBackend>(
+ &self,
+ buffer_id: id::BufferId,
+ range: Range<BufferAddress>,
+ op: resource::BufferMapOperation,
+ ) -> Result<(), resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::buffer_map_async");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (pub_usage, internal_use) = match op.host {
+ HostMap::Read => (wgt::BufferUsage::MAP_READ, resource::BufferUse::MAP_READ),
+ HostMap::Write => (wgt::BufferUsage::MAP_WRITE, resource::BufferUse::MAP_WRITE),
+ };
+
+ if range.start % wgt::COPY_BUFFER_ALIGNMENT != 0
+ || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0
+ {
+ return Err(resource::BufferAccessError::UnalignedRange);
+ }
+
+ let (device_id, ref_count) = {
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+
+ check_buffer_usage(buffer.usage, pub_usage)?;
+ buffer.map_state = match buffer.map_state {
+ resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => {
+ return Err(resource::BufferAccessError::AlreadyMapped);
+ }
+ resource::BufferMapState::Waiting(_) => {
+ op.call_error();
+ return Ok(());
+ }
+ resource::BufferMapState::Idle => {
+ resource::BufferMapState::Waiting(resource::BufferPendingMapping {
+ range,
+ op,
+ parent_ref_count: buffer.life_guard.add_ref(),
+ })
+ }
+ };
+ tracing::debug!("Buffer {:?} map state -> Waiting", buffer_id);
+
+ (buffer.device_id.value, buffer.life_guard.add_ref())
+ };
+
+ let device = &device_guard[device_id];
+ device.trackers.lock().buffers.change_replace(
+ id::Valid(buffer_id),
+ &ref_count,
+ (),
+ internal_use,
+ );
+
+ device
+ .lock_life(&mut token)
+ .map(id::Valid(buffer_id), ref_count);
+
+ Ok(())
+ }
+
+ pub fn buffer_get_mapped_range<B: GfxBackend>(
+ &self,
+ buffer_id: id::BufferId,
+ offset: BufferAddress,
+ _size: Option<BufferSize>,
+ ) -> Result<*mut u8, resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::buffer_get_mapped_range");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (buffer_guard, _) = hub.buffers.read(&mut token);
+ let buffer = buffer_guard
+ .get(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+
+ match buffer.map_state {
+ resource::BufferMapState::Init { ptr, .. }
+ | resource::BufferMapState::Active { ptr, .. } => unsafe {
+ Ok(ptr.as_ptr().offset(offset as isize))
+ },
+ resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => {
+ Err(resource::BufferAccessError::NotMapped)
+ }
+ }
+ }
+
+ pub fn buffer_unmap<B: GfxBackend>(
+ &self,
+ buffer_id: id::BufferId,
+ ) -> Result<(), resource::BufferAccessError> {
+ span!(_guard, INFO, "Device::buffer_unmap");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let (mut buffer_guard, _) = hub.buffers.write(&mut token);
+ let buffer = buffer_guard
+ .get_mut(buffer_id)
+ .map_err(|_| resource::BufferAccessError::Invalid)?;
+ let device = &mut device_guard[buffer.device_id.value];
+
+ tracing::debug!("Buffer {:?} map state -> Idle", buffer_id);
+ match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) {
+ resource::BufferMapState::Init {
+ ptr,
+ stage_buffer,
+ stage_memory,
+ needs_flush,
+ } => {
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data = trace.make_binary("bin", unsafe {
+ std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize)
+ });
+ trace.add(trace::Action::WriteBuffer {
+ id: buffer_id,
+ data,
+ range: 0..buffer.size,
+ queued: true,
+ });
+ }
+ let _ = ptr;
+
+ if needs_flush {
+ stage_memory.flush_range(&device.raw, 0, None)?;
+ }
+
+ let &(ref buf_raw, _) = buffer
+ .raw
+ .as_ref()
+ .ok_or(resource::BufferAccessError::Destroyed)?;
+
+ buffer.life_guard.use_at(device.active_submission_index + 1);
+ let region = hal::command::BufferCopy {
+ src: 0,
+ dst: 0,
+ size: buffer.size,
+ };
+ let transition_src = hal::memory::Barrier::Buffer {
+ states: hal::buffer::Access::HOST_WRITE..hal::buffer::Access::TRANSFER_READ,
+ target: &stage_buffer,
+ range: hal::buffer::SubRange::WHOLE,
+ families: None,
+ };
+ let transition_dst = hal::memory::Barrier::Buffer {
+ states: hal::buffer::Access::empty()..hal::buffer::Access::TRANSFER_WRITE,
+ target: buf_raw,
+ range: hal::buffer::SubRange::WHOLE,
+ families: None,
+ };
+ unsafe {
+ let cmdbuf = device.borrow_pending_writes();
+ cmdbuf.pipeline_barrier(
+ hal::pso::PipelineStage::HOST..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ iter::once(transition_src).chain(iter::once(transition_dst)),
+ );
+ if buffer.size > 0 {
+ cmdbuf.copy_buffer(&stage_buffer, buf_raw, iter::once(region));
+ }
+ }
+ device
+ .pending_writes
+ .consume_temp(queue::TempResource::Buffer(stage_buffer), stage_memory);
+ device.pending_writes.dst_buffers.insert(buffer_id);
+ }
+ resource::BufferMapState::Idle => {
+ return Err(resource::BufferAccessError::NotMapped);
+ }
+ resource::BufferMapState::Waiting(_) => {}
+ resource::BufferMapState::Active {
+ ptr,
+ sub_range,
+ host,
+ } => {
+ if host == HostMap::Write {
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let size = sub_range.size_to(buffer.size);
+ let data = trace.make_binary("bin", unsafe {
+ std::slice::from_raw_parts(ptr.as_ptr(), size as usize)
+ });
+ trace.add(trace::Action::WriteBuffer {
+ id: buffer_id,
+ data,
+ range: sub_range.offset..sub_range.offset + size,
+ queued: false,
+ });
+ }
+ let _ = (ptr, sub_range);
+ }
+ unmap_buffer(&device.raw, buffer)?;
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/queue.rs b/gfx/wgpu/wgpu-core/src/device/queue.rs
new file mode 100644
index 0000000000..ccd55b185e
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/queue.rs
@@ -0,0 +1,696 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#[cfg(feature = "trace")]
+use crate::device::trace::Action;
+use crate::{
+ command::{
+ texture_copy_view_to_hal, validate_linear_texture_data, validate_texture_copy_range,
+ CommandAllocator, CommandBuffer, CopySide, TextureCopyView, TransferError, BITS_PER_BYTE,
+ },
+ conv,
+ device::{alloc, DeviceError, WaitIdleError},
+ hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
+ id,
+ resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse},
+ span, FastHashSet,
+};
+
+use hal::{command::CommandBuffer as _, device::Device as _, queue::CommandQueue as _};
+use smallvec::SmallVec;
+use std::{iter, ptr};
+use thiserror::Error;
+
+struct StagingData<B: hal::Backend> {
+ buffer: B::Buffer,
+ memory: alloc::MemoryBlock<B>,
+ cmdbuf: B::CommandBuffer,
+}
+
+#[derive(Debug)]
+pub enum TempResource<B: hal::Backend> {
+ Buffer(B::Buffer),
+ Image(B::Image),
+}
+
+#[derive(Debug)]
+pub(crate) struct PendingWrites<B: hal::Backend> {
+ pub command_buffer: Option<B::CommandBuffer>,
+ pub temp_resources: Vec<(TempResource<B>, alloc::MemoryBlock<B>)>,
+ pub dst_buffers: FastHashSet<id::BufferId>,
+ pub dst_textures: FastHashSet<id::TextureId>,
+}
+
+impl<B: hal::Backend> PendingWrites<B> {
+ pub fn new() -> Self {
+ Self {
+ command_buffer: None,
+ temp_resources: Vec::new(),
+ dst_buffers: FastHashSet::default(),
+ dst_textures: FastHashSet::default(),
+ }
+ }
+
+ pub fn dispose(
+ self,
+ device: &B::Device,
+ cmd_allocator: &CommandAllocator<B>,
+ mem_allocator: &mut alloc::MemoryAllocator<B>,
+ ) {
+ if let Some(raw) = self.command_buffer {
+ cmd_allocator.discard_internal(raw);
+ }
+ for (resource, memory) in self.temp_resources {
+ mem_allocator.free(device, memory);
+ match resource {
+ TempResource::Buffer(buffer) => unsafe {
+ device.destroy_buffer(buffer);
+ },
+ TempResource::Image(image) => unsafe {
+ device.destroy_image(image);
+ },
+ }
+ }
+ }
+
+ pub fn consume_temp(&mut self, resource: TempResource<B>, memory: alloc::MemoryBlock<B>) {
+ self.temp_resources.push((resource, memory));
+ }
+
+ fn consume(&mut self, stage: StagingData<B>) {
+ self.temp_resources
+ .push((TempResource::Buffer(stage.buffer), stage.memory));
+ self.command_buffer = Some(stage.cmdbuf);
+ }
+
+ #[must_use]
+ fn finish(&mut self) -> Option<B::CommandBuffer> {
+ self.dst_buffers.clear();
+ self.dst_textures.clear();
+ self.command_buffer.take().map(|mut cmd_buf| unsafe {
+ cmd_buf.finish();
+ cmd_buf
+ })
+ }
+}
+
+impl<B: hal::Backend> super::Device<B> {
+ pub fn borrow_pending_writes(&mut self) -> &mut B::CommandBuffer {
+ if self.pending_writes.command_buffer.is_none() {
+ let mut cmdbuf = self.cmd_allocator.allocate_internal();
+ unsafe {
+ cmdbuf.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
+ }
+ self.pending_writes.command_buffer = Some(cmdbuf);
+ }
+ self.pending_writes.command_buffer.as_mut().unwrap()
+ }
+
+ fn prepare_stage(&mut self, size: wgt::BufferAddress) -> Result<StagingData<B>, DeviceError> {
+ let mut buffer = unsafe {
+ self.raw
+ .create_buffer(size, hal::buffer::Usage::TRANSFER_SRC)
+ .map_err(|err| match err {
+ hal::buffer::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
+ _ => panic!("failed to create staging buffer: {}", err),
+ })?
+ };
+ //TODO: do we need to transition into HOST_WRITE access first?
+ let requirements = unsafe {
+ self.raw.set_buffer_name(&mut buffer, "<write_buffer_temp>");
+ self.raw.get_buffer_requirements(&buffer)
+ };
+
+ let block = self.mem_allocator.lock().allocate(
+ &self.raw,
+ requirements,
+ gpu_alloc::UsageFlags::UPLOAD | gpu_alloc::UsageFlags::TRANSIENT,
+ )?;
+ block.bind_buffer(&self.raw, &mut buffer)?;
+
+ let cmdbuf = match self.pending_writes.command_buffer.take() {
+ Some(cmdbuf) => cmdbuf,
+ None => {
+ let mut cmdbuf = self.cmd_allocator.allocate_internal();
+ unsafe {
+ cmdbuf.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
+ }
+ cmdbuf
+ }
+ };
+ Ok(StagingData {
+ buffer,
+ memory: block,
+ cmdbuf,
+ })
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum QueueWriteError {
+ #[error(transparent)]
+ Queue(#[from] DeviceError),
+ #[error(transparent)]
+ Transfer(#[from] TransferError),
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum QueueSubmitError {
+ #[error(transparent)]
+ Queue(#[from] DeviceError),
+ #[error("command buffer {0:?} is invalid")]
+ InvalidCommandBuffer(id::CommandBufferId),
+ #[error("buffer {0:?} is destroyed")]
+ DestroyedBuffer(id::BufferId),
+ #[error("texture {0:?} is destroyed")]
+ DestroyedTexture(id::TextureId),
+ #[error(transparent)]
+ Unmap(#[from] BufferAccessError),
+ #[error("swap chain output was dropped before the command buffer got submitted")]
+ SwapChainOutputDropped,
+ #[error("GPU got stuck :(")]
+ StuckGpu,
+}
+
+//TODO: move out common parts of write_xxx.
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn queue_write_buffer<B: GfxBackend>(
+ &self,
+ queue_id: id::QueueId,
+ buffer_id: id::BufferId,
+ buffer_offset: wgt::BufferAddress,
+ data: &[u8],
+ ) -> Result<(), QueueWriteError> {
+ span!(_guard, INFO, "Queue::write_buffer");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let device = device_guard
+ .get_mut(queue_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let (buffer_guard, _) = hub.buffers.read(&mut token);
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data_path = trace.make_binary("bin", data);
+ trace.add(Action::WriteBuffer {
+ id: buffer_id,
+ data: data_path,
+ range: buffer_offset..buffer_offset + data.len() as wgt::BufferAddress,
+ queued: true,
+ });
+ }
+
+ let data_size = data.len() as wgt::BufferAddress;
+ if data_size == 0 {
+ tracing::trace!("Ignoring write_buffer of size 0");
+ return Ok(());
+ }
+
+ let mut stage = device.prepare_stage(data_size)?;
+ stage.memory.write_bytes(&device.raw, 0, data)?;
+
+ let mut trackers = device.trackers.lock();
+ let (dst, transition) = trackers
+ .buffers
+ .use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST)
+ .map_err(TransferError::InvalidBuffer)?;
+ let &(ref dst_raw, _) = dst
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidBuffer(buffer_id))?;
+ if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
+ Err(TransferError::MissingCopyDstUsageFlag(
+ Some(buffer_id),
+ None,
+ ))?;
+ }
+ dst.life_guard.use_at(device.active_submission_index + 1);
+
+ if data_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ Err(TransferError::UnalignedCopySize(data_size))?
+ }
+ if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ Err(TransferError::UnalignedBufferOffset(buffer_offset))?
+ }
+ if buffer_offset + data_size > dst.size {
+ Err(TransferError::BufferOverrun {
+ start_offset: buffer_offset,
+ end_offset: buffer_offset + data_size,
+ buffer_size: dst.size,
+ side: CopySide::Destination,
+ })?
+ }
+
+ let region = hal::command::BufferCopy {
+ src: 0,
+ dst: buffer_offset,
+ size: data.len() as _,
+ };
+ unsafe {
+ stage.cmdbuf.pipeline_barrier(
+ super::all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ iter::once(hal::memory::Barrier::Buffer {
+ states: hal::buffer::Access::HOST_WRITE..hal::buffer::Access::TRANSFER_READ,
+ target: &stage.buffer,
+ range: hal::buffer::SubRange::WHOLE,
+ families: None,
+ })
+ .chain(transition.map(|pending| pending.into_hal(dst))),
+ );
+ stage
+ .cmdbuf
+ .copy_buffer(&stage.buffer, dst_raw, iter::once(region));
+ }
+
+ device.pending_writes.consume(stage);
+ device.pending_writes.dst_buffers.insert(buffer_id);
+
+ Ok(())
+ }
+
+ pub fn queue_write_texture<B: GfxBackend>(
+ &self,
+ queue_id: id::QueueId,
+ destination: &TextureCopyView,
+ data: &[u8],
+ data_layout: &wgt::TextureDataLayout,
+ size: &wgt::Extent3d,
+ ) -> Result<(), QueueWriteError> {
+ span!(_guard, INFO, "Queue::write_texture");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let device = device_guard
+ .get_mut(queue_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let (texture_guard, _) = hub.textures.read(&mut token);
+ let (image_layers, image_range, image_offset) =
+ texture_copy_view_to_hal(destination, size, &*texture_guard)?;
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ let mut trace = trace.lock();
+ let data_path = trace.make_binary("bin", data);
+ trace.add(Action::WriteTexture {
+ to: destination.clone(),
+ data: data_path,
+ layout: data_layout.clone(),
+ size: *size,
+ });
+ }
+
+ if size.width == 0 || size.height == 0 || size.depth == 0 {
+ tracing::trace!("Ignoring write_texture of size 0");
+ return Ok(());
+ }
+
+ let texture_format = texture_guard.get(destination.texture).unwrap().format;
+ let bytes_per_block = conv::map_texture_format(texture_format, device.private_features)
+ .surface_desc()
+ .bits as u32
+ / BITS_PER_BYTE;
+ validate_linear_texture_data(
+ data_layout,
+ texture_format,
+ data.len() as wgt::BufferAddress,
+ CopySide::Source,
+ bytes_per_block as wgt::BufferAddress,
+ size,
+ )?;
+ let (block_width, block_height) = conv::texture_block_size(texture_format);
+ if !conv::is_valid_copy_dst_texture_format(texture_format) {
+ Err(TransferError::CopyToForbiddenTextureFormat(texture_format))?
+ }
+ let width_blocks = size.width / block_width;
+ let height_blocks = size.height / block_width;
+
+ let texel_rows_per_image = data_layout.rows_per_image;
+ let block_rows_per_image = data_layout.rows_per_image / block_height;
+
+ let bytes_per_row_alignment = get_lowest_common_denom(
+ device.hal_limits.optimal_buffer_copy_pitch_alignment as u32,
+ bytes_per_block,
+ );
+ let stage_bytes_per_row = align_to(bytes_per_block * width_blocks, bytes_per_row_alignment);
+
+ let block_rows_in_copy = (size.depth - 1) * block_rows_per_image + height_blocks;
+ let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64;
+ let mut stage = device.prepare_stage(stage_size)?;
+
+ let mut trackers = device.trackers.lock();
+ let (dst, transition) = trackers
+ .textures
+ .use_replace(
+ &*texture_guard,
+ destination.texture,
+ image_range,
+ TextureUse::COPY_DST,
+ )
+ .unwrap();
+ let &(ref dst_raw, _) = dst
+ .raw
+ .as_ref()
+ .ok_or(TransferError::InvalidTexture(destination.texture))?;
+
+ if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
+ Err(TransferError::MissingCopyDstUsageFlag(
+ None,
+ Some(destination.texture),
+ ))?
+ }
+ validate_texture_copy_range(
+ destination,
+ dst.format,
+ dst.kind,
+ CopySide::Destination,
+ size,
+ )?;
+ dst.life_guard.use_at(device.active_submission_index + 1);
+
+ let ptr = stage.memory.map(&device.raw, 0, stage_size)?;
+ unsafe {
+ //TODO: https://github.com/zakarumych/gpu-alloc/issues/13
+ if stage_bytes_per_row == data_layout.bytes_per_row {
+ // Fast path if the data isalready being aligned optimally.
+ ptr::copy_nonoverlapping(data.as_ptr(), ptr.as_ptr(), stage_size as usize);
+ } else {
+ // Copy row by row into the optimal alignment.
+ let copy_bytes_per_row =
+ stage_bytes_per_row.min(data_layout.bytes_per_row) as usize;
+ for layer in 0..size.depth {
+ let rows_offset = layer * block_rows_per_image;
+ for row in 0..height_blocks {
+ ptr::copy_nonoverlapping(
+ data.as_ptr().offset(
+ (rows_offset + row) as isize * data_layout.bytes_per_row as isize,
+ ),
+ ptr.as_ptr().offset(
+ (rows_offset + row) as isize * stage_bytes_per_row as isize,
+ ),
+ copy_bytes_per_row,
+ );
+ }
+ }
+ }
+ }
+ stage.memory.unmap(&device.raw);
+ if !stage.memory.is_coherent() {
+ stage.memory.flush_range(&device.raw, 0, None)?;
+ }
+
+ let region = hal::command::BufferImageCopy {
+ buffer_offset: 0,
+ buffer_width: (stage_bytes_per_row / bytes_per_block) * block_width,
+ buffer_height: texel_rows_per_image,
+ image_layers,
+ image_offset,
+ image_extent: conv::map_extent(size, dst.dimension),
+ };
+ unsafe {
+ stage.cmdbuf.pipeline_barrier(
+ super::all_image_stages() | hal::pso::PipelineStage::HOST
+ ..hal::pso::PipelineStage::TRANSFER,
+ hal::memory::Dependencies::empty(),
+ iter::once(hal::memory::Barrier::Buffer {
+ states: hal::buffer::Access::HOST_WRITE..hal::buffer::Access::TRANSFER_READ,
+ target: &stage.buffer,
+ range: hal::buffer::SubRange::WHOLE,
+ families: None,
+ })
+ .chain(transition.map(|pending| pending.into_hal(dst))),
+ );
+ stage.cmdbuf.copy_buffer_to_image(
+ &stage.buffer,
+ dst_raw,
+ hal::image::Layout::TransferDstOptimal,
+ iter::once(region),
+ );
+ }
+
+ device.pending_writes.consume(stage);
+ device
+ .pending_writes
+ .dst_textures
+ .insert(destination.texture);
+
+ Ok(())
+ }
+
+ pub fn queue_submit<B: GfxBackend>(
+ &self,
+ queue_id: id::QueueId,
+ command_buffer_ids: &[id::CommandBufferId],
+ ) -> Result<(), QueueSubmitError> {
+ span!(_guard, INFO, "Queue::submit");
+
+ let hub = B::hub(self);
+
+ let callbacks = {
+ let mut token = Token::root();
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let device = device_guard
+ .get_mut(queue_id)
+ .map_err(|_| DeviceError::Invalid)?;
+ let pending_write_command_buffer = device.pending_writes.finish();
+ device.temp_suspected.clear();
+ device.active_submission_index += 1;
+ let submit_index = device.active_submission_index;
+
+ let fence = {
+ let mut signal_swapchain_semaphores = SmallVec::<[_; 1]>::new();
+ let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
+ let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token);
+
+ {
+ let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
+ let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
+ let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
+ let (mut buffer_guard, mut token) = hub.buffers.write(&mut token);
+ let (texture_guard, mut token) = hub.textures.read(&mut token);
+ let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
+ let (sampler_guard, _) = hub.samplers.read(&mut token);
+
+ //Note: locking the trackers has to be done after the storages
+ let mut trackers = device.trackers.lock();
+
+ //TODO: if multiple command buffers are submitted, we can re-use the last
+ // native command buffer of the previous chain instead of always creating
+ // a temporary one, since the chains are not finished.
+
+ // finish all the command buffers first
+ for &cmb_id in command_buffer_ids {
+ let cmdbuf = command_buffer_guard
+ .get_mut(cmb_id)
+ .map_err(|_| QueueSubmitError::InvalidCommandBuffer(cmb_id))?;
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(Action::Submit(
+ submit_index,
+ cmdbuf.commands.take().unwrap(),
+ ));
+ }
+
+ if let Some((sc_id, fbo)) = cmdbuf.used_swap_chain.take() {
+ let sc = &mut swap_chain_guard[sc_id.value];
+ sc.active_submission_index = submit_index;
+ if sc.acquired_view_id.is_none() {
+ return Err(QueueSubmitError::SwapChainOutputDropped);
+ }
+ // For each swapchain, we only want to have at most 1 signaled semaphore.
+ if sc.acquired_framebuffers.is_empty() {
+ // Only add a signal if this is the first time for this swapchain
+ // to be used in the submission.
+ signal_swapchain_semaphores.push(sc_id.value);
+ }
+ sc.acquired_framebuffers.push(fbo);
+ }
+
+ // optimize the tracked states
+ cmdbuf.trackers.optimize();
+
+ // update submission IDs
+ for id in cmdbuf.trackers.buffers.used() {
+ let buffer = &mut buffer_guard[id];
+ if buffer.raw.is_none() {
+ return Err(QueueSubmitError::DestroyedBuffer(id.0))?;
+ }
+ if !buffer.life_guard.use_at(submit_index) {
+ if let BufferMapState::Active { .. } = buffer.map_state {
+ tracing::warn!("Dropped buffer has a pending mapping.");
+ super::unmap_buffer(&device.raw, buffer)?;
+ }
+ device.temp_suspected.buffers.push(id);
+ } else {
+ match buffer.map_state {
+ BufferMapState::Idle => (),
+ _ => panic!("Buffer {:?} is still mapped", id),
+ }
+ }
+ }
+ for id in cmdbuf.trackers.textures.used() {
+ let texture = &texture_guard[id];
+ if texture.raw.is_none() {
+ return Err(QueueSubmitError::DestroyedTexture(id.0))?;
+ }
+ if !texture.life_guard.use_at(submit_index) {
+ device.temp_suspected.textures.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.views.used() {
+ if !texture_view_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.texture_views.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.bind_groups.used() {
+ if !bind_group_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.bind_groups.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.samplers.used() {
+ if !sampler_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.samplers.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.compute_pipes.used() {
+ if !compute_pipe_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.compute_pipelines.push(id);
+ }
+ }
+ for id in cmdbuf.trackers.render_pipes.used() {
+ if !render_pipe_guard[id].life_guard.use_at(submit_index) {
+ device.temp_suspected.render_pipelines.push(id);
+ }
+ }
+
+ // execute resource transitions
+ let mut transit = device.cmd_allocator.extend(cmdbuf);
+ unsafe {
+ // the last buffer was open, closing now
+ cmdbuf.raw.last_mut().unwrap().finish();
+ transit
+ .begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
+ }
+ tracing::trace!("Stitching command buffer {:?} before submission", cmb_id);
+ CommandBuffer::insert_barriers(
+ &mut transit,
+ &mut *trackers,
+ &cmdbuf.trackers,
+ &*buffer_guard,
+ &*texture_guard,
+ );
+ unsafe {
+ transit.finish();
+ }
+ cmdbuf.raw.insert(0, transit);
+ }
+
+ tracing::trace!("Device after submission {}: {:#?}", submit_index, trackers);
+ }
+
+ // now prepare the GPU submission
+ let fence = device
+ .raw
+ .create_fence(false)
+ .or(Err(DeviceError::OutOfMemory))?;
+ let submission = hal::queue::Submission {
+ command_buffers: pending_write_command_buffer.as_ref().into_iter().chain(
+ command_buffer_ids
+ .iter()
+ .flat_map(|&cmb_id| &command_buffer_guard.get(cmb_id).unwrap().raw),
+ ),
+ wait_semaphores: Vec::new(),
+ signal_semaphores: signal_swapchain_semaphores
+ .into_iter()
+ .map(|sc_id| &swap_chain_guard[sc_id].semaphore),
+ };
+
+ unsafe {
+ device.queue_group.queues[0].submit(submission, Some(&fence));
+ }
+ fence
+ };
+
+ if let Some(comb_raw) = pending_write_command_buffer {
+ device
+ .cmd_allocator
+ .after_submit_internal(comb_raw, submit_index);
+ }
+
+ let callbacks = match device.maintain(&hub, false, &mut token) {
+ Ok(callbacks) => callbacks,
+ Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
+ Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
+ };
+ super::Device::lock_life_internal(&device.life_tracker, &mut token).track_submission(
+ submit_index,
+ fence,
+ &device.temp_suspected,
+ device.pending_writes.temp_resources.drain(..),
+ );
+
+ // finally, return the command buffers to the allocator
+ for &cmb_id in command_buffer_ids {
+ if let (Some(cmd_buf), _) = hub.command_buffers.unregister(cmb_id, &mut token) {
+ device.cmd_allocator.after_submit(cmd_buf, submit_index);
+ }
+ }
+
+ callbacks
+ };
+
+ super::fire_map_callbacks(callbacks);
+
+ Ok(())
+ }
+}
+
+fn get_lowest_common_denom(a: u32, b: u32) -> u32 {
+ let gcd = if a >= b {
+ get_greatest_common_divisor(a, b)
+ } else {
+ get_greatest_common_divisor(b, a)
+ };
+ a * b / gcd
+}
+
+fn get_greatest_common_divisor(mut a: u32, mut b: u32) -> u32 {
+ assert!(a >= b);
+ loop {
+ let c = a % b;
+ if c == 0 {
+ return b;
+ } else {
+ a = b;
+ b = c;
+ }
+ }
+}
+
+fn align_to(value: u32, alignment: u32) -> u32 {
+ match value % alignment {
+ 0 => value,
+ other => value - other + alignment,
+ }
+}
+
+#[test]
+fn test_lcd() {
+ assert_eq!(get_lowest_common_denom(2, 2), 2);
+ assert_eq!(get_lowest_common_denom(2, 3), 6);
+ assert_eq!(get_lowest_common_denom(6, 4), 12);
+}
+
+#[test]
+fn test_gcd() {
+ assert_eq!(get_greatest_common_divisor(5, 1), 1);
+ assert_eq!(get_greatest_common_divisor(4, 2), 2);
+ assert_eq!(get_greatest_common_divisor(6, 4), 2);
+ assert_eq!(get_greatest_common_divisor(7, 7), 7);
+}
diff --git a/gfx/wgpu/wgpu-core/src/device/trace.rs b/gfx/wgpu/wgpu-core/src/device/trace.rs
new file mode 100644
index 0000000000..8fbd08526d
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/device/trace.rs
@@ -0,0 +1,192 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::id;
+use std::ops::Range;
+#[cfg(feature = "trace")]
+use std::{borrow::Cow, io::Write as _};
+
+//TODO: consider a readable Id that doesn't include the backend
+
+type FileName = String;
+
+pub const FILE_NAME: &str = "trace.ron";
+
+#[cfg(feature = "trace")]
+pub(crate) fn new_render_bundle_encoder_descriptor<'a>(
+ label: Option<&'a str>,
+ context: &'a super::RenderPassContext,
+) -> crate::command::RenderBundleEncoderDescriptor<'a> {
+ crate::command::RenderBundleEncoderDescriptor {
+ label: label.map(Cow::Borrowed),
+ color_formats: Cow::Borrowed(&context.attachments.colors),
+ depth_stencil_format: context.attachments.depth_stencil,
+ sample_count: context.sample_count as u32,
+ }
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum Action<'a> {
+ Init {
+ desc: crate::device::DeviceDescriptor<'a>,
+ backend: wgt::Backend,
+ },
+ CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>),
+ FreeBuffer(id::BufferId),
+ DestroyBuffer(id::BufferId),
+ CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>),
+ FreeTexture(id::TextureId),
+ DestroyTexture(id::TextureId),
+ CreateTextureView {
+ id: id::TextureViewId,
+ parent_id: id::TextureId,
+ desc: crate::resource::TextureViewDescriptor<'a>,
+ },
+ DestroyTextureView(id::TextureViewId),
+ CreateSampler(id::SamplerId, crate::resource::SamplerDescriptor<'a>),
+ DestroySampler(id::SamplerId),
+ CreateSwapChain(id::SwapChainId, wgt::SwapChainDescriptor),
+ GetSwapChainTexture {
+ id: Option<id::TextureViewId>,
+ parent_id: id::SwapChainId,
+ },
+ PresentSwapChain(id::SwapChainId),
+ CreateBindGroupLayout(
+ id::BindGroupLayoutId,
+ crate::binding_model::BindGroupLayoutDescriptor<'a>,
+ ),
+ DestroyBindGroupLayout(id::BindGroupLayoutId),
+ CreatePipelineLayout(
+ id::PipelineLayoutId,
+ crate::binding_model::PipelineLayoutDescriptor<'a>,
+ ),
+ DestroyPipelineLayout(id::PipelineLayoutId),
+ CreateBindGroup(
+ id::BindGroupId,
+ crate::binding_model::BindGroupDescriptor<'a>,
+ ),
+ DestroyBindGroup(id::BindGroupId),
+ CreateShaderModule {
+ id: id::ShaderModuleId,
+ label: crate::Label<'a>,
+ data: FileName,
+ },
+ DestroyShaderModule(id::ShaderModuleId),
+ CreateComputePipeline(
+ id::ComputePipelineId,
+ crate::pipeline::ComputePipelineDescriptor<'a>,
+ ),
+ DestroyComputePipeline(id::ComputePipelineId),
+ CreateRenderPipeline(
+ id::RenderPipelineId,
+ crate::pipeline::RenderPipelineDescriptor<'a>,
+ ),
+ DestroyRenderPipeline(id::RenderPipelineId),
+ CreateRenderBundle {
+ id: id::RenderBundleId,
+ desc: crate::command::RenderBundleEncoderDescriptor<'a>,
+ base: crate::command::BasePass<crate::command::RenderCommand>,
+ },
+ DestroyRenderBundle(id::RenderBundleId),
+ WriteBuffer {
+ id: id::BufferId,
+ data: FileName,
+ range: Range<wgt::BufferAddress>,
+ queued: bool,
+ },
+ WriteTexture {
+ to: crate::command::TextureCopyView,
+ data: FileName,
+ layout: wgt::TextureDataLayout,
+ size: wgt::Extent3d,
+ },
+ Submit(crate::SubmissionIndex, Vec<Command>),
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum Command {
+ CopyBufferToBuffer {
+ src: id::BufferId,
+ src_offset: wgt::BufferAddress,
+ dst: id::BufferId,
+ dst_offset: wgt::BufferAddress,
+ size: wgt::BufferAddress,
+ },
+ CopyBufferToTexture {
+ src: crate::command::BufferCopyView,
+ dst: crate::command::TextureCopyView,
+ size: wgt::Extent3d,
+ },
+ CopyTextureToBuffer {
+ src: crate::command::TextureCopyView,
+ dst: crate::command::BufferCopyView,
+ size: wgt::Extent3d,
+ },
+ CopyTextureToTexture {
+ src: crate::command::TextureCopyView,
+ dst: crate::command::TextureCopyView,
+ size: wgt::Extent3d,
+ },
+ RunComputePass {
+ base: crate::command::BasePass<crate::command::ComputeCommand>,
+ },
+ RunRenderPass {
+ base: crate::command::BasePass<crate::command::RenderCommand>,
+ target_colors: Vec<crate::command::ColorAttachmentDescriptor>,
+ target_depth_stencil: Option<crate::command::DepthStencilAttachmentDescriptor>,
+ },
+}
+
+#[cfg(feature = "trace")]
+#[derive(Debug)]
+pub struct Trace {
+ path: std::path::PathBuf,
+ file: std::fs::File,
+ config: ron::ser::PrettyConfig,
+ binary_id: usize,
+}
+
+#[cfg(feature = "trace")]
+impl Trace {
+ pub fn new(path: &std::path::Path) -> Result<Self, std::io::Error> {
+ tracing::info!("Tracing into '{:?}'", path);
+ let mut file = std::fs::File::create(path.join(FILE_NAME))?;
+ file.write_all(b"[\n")?;
+ Ok(Self {
+ path: path.to_path_buf(),
+ file,
+ config: ron::ser::PrettyConfig::default(),
+ binary_id: 0,
+ })
+ }
+
+ pub fn make_binary(&mut self, kind: &str, data: &[u8]) -> String {
+ self.binary_id += 1;
+ let name = format!("data{}.{}", self.binary_id, kind);
+ let _ = std::fs::write(self.path.join(&name), data);
+ name
+ }
+
+ pub(crate) fn add(&mut self, action: Action) {
+ match ron::ser::to_string_pretty(&action, self.config.clone()) {
+ Ok(string) => {
+ let _ = writeln!(self.file, "{},", string);
+ }
+ Err(e) => {
+ tracing::warn!("RON serialization failure: {:?}", e);
+ }
+ }
+ }
+}
+
+#[cfg(feature = "trace")]
+impl Drop for Trace {
+ fn drop(&mut self) {
+ let _ = self.file.write_all(b"]");
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/hub.rs b/gfx/wgpu/wgpu-core/src/hub.rs
new file mode 100644
index 0000000000..33fa6e0966
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/hub.rs
@@ -0,0 +1,866 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ backend,
+ binding_model::{BindGroup, BindGroupLayout, PipelineLayout},
+ command::{CommandBuffer, RenderBundle},
+ device::Device,
+ id::{
+ AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandBufferId, ComputePipelineId,
+ DeviceId, PipelineLayoutId, RenderBundleId, RenderPipelineId, SamplerId, ShaderModuleId,
+ SurfaceId, SwapChainId, TextureId, TextureViewId, TypedId, Valid,
+ },
+ instance::{Adapter, Instance, Surface},
+ pipeline::{ComputePipeline, RenderPipeline, ShaderModule},
+ resource::{Buffer, Sampler, Texture, TextureView},
+ span,
+ swap_chain::SwapChain,
+ Epoch, Index,
+};
+
+use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
+use wgt::Backend;
+
+#[cfg(debug_assertions)]
+use std::cell::Cell;
+use std::{fmt::Debug, marker::PhantomData, ops, thread};
+
+/// A simple structure to manage identities of objects.
+#[derive(Debug)]
+pub struct IdentityManager {
+ free: Vec<Index>,
+ epochs: Vec<Epoch>,
+}
+
+impl Default for IdentityManager {
+ fn default() -> Self {
+ Self {
+ free: Default::default(),
+ epochs: Default::default(),
+ }
+ }
+}
+
+impl IdentityManager {
+ pub fn from_index(min_index: u32) -> Self {
+ Self {
+ free: (0..min_index).collect(),
+ epochs: vec![1; min_index as usize],
+ }
+ }
+
+ pub fn alloc<I: TypedId>(&mut self, backend: Backend) -> I {
+ match self.free.pop() {
+ Some(index) => I::zip(index, self.epochs[index as usize], backend),
+ None => {
+ let epoch = 1;
+ let id = I::zip(self.epochs.len() as Index, epoch, backend);
+ self.epochs.push(epoch);
+ id
+ }
+ }
+ }
+
+ pub fn free<I: TypedId + Debug>(&mut self, id: I) {
+ let (index, epoch, _backend) = id.unzip();
+ // avoid doing this check in release
+ if cfg!(debug_assertions) {
+ assert!(!self.free.contains(&index));
+ }
+ let pe = &mut self.epochs[index as usize];
+ assert_eq!(*pe, epoch);
+ *pe += 1;
+ self.free.push(index);
+ }
+}
+
+#[derive(Debug)]
+enum Element<T> {
+ Vacant,
+ Occupied(T, Epoch),
+ Error(Epoch, String),
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct InvalidId;
+
+#[derive(Debug)]
+pub struct Storage<T, I: TypedId> {
+ map: Vec<Element<T>>,
+ kind: &'static str,
+ _phantom: PhantomData<I>,
+}
+
+impl<T, I: TypedId> ops::Index<Valid<I>> for Storage<T, I> {
+ type Output = T;
+ fn index(&self, id: Valid<I>) -> &T {
+ self.get(id.0).unwrap()
+ }
+}
+
+impl<T, I: TypedId> ops::IndexMut<Valid<I>> for Storage<T, I> {
+ fn index_mut(&mut self, id: Valid<I>) -> &mut T {
+ self.get_mut(id.0).unwrap()
+ }
+}
+
+impl<T, I: TypedId> Storage<T, I> {
+ pub(crate) fn contains(&self, id: I) -> bool {
+ let (index, epoch, _) = id.unzip();
+ match self.map[index as usize] {
+ Element::Vacant => false,
+ Element::Occupied(_, storage_epoch) | Element::Error(storage_epoch, ..) => {
+ epoch == storage_epoch
+ }
+ }
+ }
+
+ /// Get a reference to an item behind a potentially invalid ID.
+ /// Panics if there is an epoch mismatch, or the entry is empty.
+ pub(crate) fn get(&self, id: I) -> Result<&T, InvalidId> {
+ let (index, epoch, _) = id.unzip();
+ let (result, storage_epoch) = match self.map[index as usize] {
+ Element::Occupied(ref v, epoch) => (Ok(v), epoch),
+ Element::Vacant => panic!("{}[{}] does not exist", self.kind, index),
+ Element::Error(epoch, ..) => (Err(InvalidId), epoch),
+ };
+ assert_eq!(
+ epoch, storage_epoch,
+ "{}[{}] is no longer alive",
+ self.kind, index
+ );
+ result
+ }
+
+ /// Get a mutable reference to an item behind a potentially invalid ID.
+ /// Panics if there is an epoch mismatch, or the entry is empty.
+ pub(crate) fn get_mut(&mut self, id: I) -> Result<&mut T, InvalidId> {
+ let (index, epoch, _) = id.unzip();
+ let (result, storage_epoch) = match self.map[index as usize] {
+ Element::Occupied(ref mut v, epoch) => (Ok(v), epoch),
+ Element::Vacant => panic!("{}[{}] does not exist", self.kind, index),
+ Element::Error(epoch, ..) => (Err(InvalidId), epoch),
+ };
+ assert_eq!(
+ epoch, storage_epoch,
+ "{}[{}] is no longer alive",
+ self.kind, index
+ );
+ result
+ }
+
+ pub(crate) fn label_for_invalid_id(&self, id: I) -> &str {
+ let (index, _, _) = id.unzip();
+ match self.map[index as usize] {
+ Element::Error(_, ref label) => label,
+ _ => "",
+ }
+ }
+
+ fn insert_impl(&mut self, index: usize, element: Element<T>) {
+ if index >= self.map.len() {
+ self.map.resize_with(index + 1, || Element::Vacant);
+ }
+ match std::mem::replace(&mut self.map[index], element) {
+ Element::Vacant => {}
+ _ => panic!("Index {:?} is already occupied", index),
+ }
+ }
+
+ pub(crate) fn insert(&mut self, id: I, value: T) {
+ let (index, epoch, _) = id.unzip();
+ self.insert_impl(index as usize, Element::Occupied(value, epoch))
+ }
+
+ pub(crate) fn insert_error(&mut self, id: I, label: &str) {
+ let (index, epoch, _) = id.unzip();
+ self.insert_impl(index as usize, Element::Error(epoch, label.to_string()))
+ }
+
+ pub(crate) fn remove(&mut self, id: I) -> Option<T> {
+ let (index, epoch, _) = id.unzip();
+ match std::mem::replace(&mut self.map[index as usize], Element::Vacant) {
+ Element::Occupied(value, storage_epoch) => {
+ assert_eq!(epoch, storage_epoch);
+ Some(value)
+ }
+ Element::Error(..) => None,
+ Element::Vacant => panic!("Cannot remove a vacant resource"),
+ }
+ }
+
+ // Prevents panic on out of range access, allows Vacant elements.
+ pub(crate) fn try_remove(&mut self, id: I) -> Option<T> {
+ let (index, epoch, _) = id.unzip();
+ if index as usize >= self.map.len() {
+ None
+ } else if let Element::Occupied(value, storage_epoch) =
+ std::mem::replace(&mut self.map[index as usize], Element::Vacant)
+ {
+ assert_eq!(epoch, storage_epoch);
+ Some(value)
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn iter(&self, backend: Backend) -> impl Iterator<Item = (I, &T)> {
+ self.map
+ .iter()
+ .enumerate()
+ .filter_map(move |(index, x)| match *x {
+ Element::Occupied(ref value, storage_epoch) => {
+ Some((I::zip(index as Index, storage_epoch, backend), value))
+ }
+ _ => None,
+ })
+ .into_iter()
+ }
+}
+
+/// Type system for enforcing the lock order on shared HUB structures.
+/// If type A implements `Access<B>`, that means we are allowed to proceed
+/// with locking resource `B` after we lock `A`.
+///
+/// The implenentations basically describe the edges in a directed graph
+/// of lock transitions. As long as it doesn't have loops, we can have
+/// multiple concurrent paths on this graph (from multiple threads) without
+/// deadlocks, i.e. there is always a path whose next resource is not locked
+/// by some other path, at any time.
+pub trait Access<B> {}
+
+pub enum Root {}
+//TODO: establish an order instead of declaring all the pairs.
+impl Access<Instance> for Root {}
+impl Access<Surface> for Root {}
+impl Access<Surface> for Instance {}
+impl<B: hal::Backend> Access<Adapter<B>> for Root {}
+impl<B: hal::Backend> Access<Adapter<B>> for Surface {}
+impl<B: hal::Backend> Access<Device<B>> for Root {}
+impl<B: hal::Backend> Access<Device<B>> for Surface {}
+impl<B: hal::Backend> Access<Device<B>> for Adapter<B> {}
+impl<B: hal::Backend> Access<SwapChain<B>> for Root {}
+impl<B: hal::Backend> Access<SwapChain<B>> for Device<B> {}
+impl<B: hal::Backend> Access<PipelineLayout<B>> for Root {}
+impl<B: hal::Backend> Access<PipelineLayout<B>> for Device<B> {}
+impl<B: hal::Backend> Access<PipelineLayout<B>> for RenderBundle {}
+impl<B: hal::Backend> Access<BindGroupLayout<B>> for Root {}
+impl<B: hal::Backend> Access<BindGroupLayout<B>> for Device<B> {}
+impl<B: hal::Backend> Access<BindGroupLayout<B>> for PipelineLayout<B> {}
+impl<B: hal::Backend> Access<BindGroup<B>> for Root {}
+impl<B: hal::Backend> Access<BindGroup<B>> for Device<B> {}
+impl<B: hal::Backend> Access<BindGroup<B>> for BindGroupLayout<B> {}
+impl<B: hal::Backend> Access<BindGroup<B>> for PipelineLayout<B> {}
+impl<B: hal::Backend> Access<BindGroup<B>> for CommandBuffer<B> {}
+impl<B: hal::Backend> Access<CommandBuffer<B>> for Root {}
+impl<B: hal::Backend> Access<CommandBuffer<B>> for Device<B> {}
+impl<B: hal::Backend> Access<CommandBuffer<B>> for SwapChain<B> {}
+impl<B: hal::Backend> Access<RenderBundle> for Device<B> {}
+impl<B: hal::Backend> Access<RenderBundle> for CommandBuffer<B> {}
+impl<B: hal::Backend> Access<ComputePipeline<B>> for Device<B> {}
+impl<B: hal::Backend> Access<ComputePipeline<B>> for BindGroup<B> {}
+impl<B: hal::Backend> Access<RenderPipeline<B>> for Device<B> {}
+impl<B: hal::Backend> Access<RenderPipeline<B>> for BindGroup<B> {}
+impl<B: hal::Backend> Access<RenderPipeline<B>> for ComputePipeline<B> {}
+impl<B: hal::Backend> Access<ShaderModule<B>> for Device<B> {}
+impl<B: hal::Backend> Access<ShaderModule<B>> for BindGroupLayout<B> {}
+impl<B: hal::Backend> Access<Buffer<B>> for Root {}
+impl<B: hal::Backend> Access<Buffer<B>> for Device<B> {}
+impl<B: hal::Backend> Access<Buffer<B>> for BindGroupLayout<B> {}
+impl<B: hal::Backend> Access<Buffer<B>> for BindGroup<B> {}
+impl<B: hal::Backend> Access<Buffer<B>> for CommandBuffer<B> {}
+impl<B: hal::Backend> Access<Buffer<B>> for ComputePipeline<B> {}
+impl<B: hal::Backend> Access<Buffer<B>> for RenderPipeline<B> {}
+impl<B: hal::Backend> Access<Texture<B>> for Root {}
+impl<B: hal::Backend> Access<Texture<B>> for Device<B> {}
+impl<B: hal::Backend> Access<Texture<B>> for Buffer<B> {}
+impl<B: hal::Backend> Access<TextureView<B>> for Root {}
+impl<B: hal::Backend> Access<TextureView<B>> for SwapChain<B> {}
+impl<B: hal::Backend> Access<TextureView<B>> for Device<B> {}
+impl<B: hal::Backend> Access<TextureView<B>> for Texture<B> {}
+impl<B: hal::Backend> Access<Sampler<B>> for Root {}
+impl<B: hal::Backend> Access<Sampler<B>> for Device<B> {}
+impl<B: hal::Backend> Access<Sampler<B>> for TextureView<B> {}
+
+#[cfg(debug_assertions)]
+thread_local! {
+ static ACTIVE_TOKEN: Cell<u8> = Cell::new(0);
+}
+
+/// A permission token to lock resource `T` or anything after it,
+/// as defined by the `Access` implementations.
+///
+/// Note: there can only be one non-borrowed `Token` alive on a thread
+/// at a time, which is enforced by `ACTIVE_TOKEN`.
+pub struct Token<'a, T: 'a> {
+ level: PhantomData<&'a T>,
+}
+
+impl<'a, T> Token<'a, T> {
+ fn new() -> Self {
+ #[cfg(debug_assertions)]
+ ACTIVE_TOKEN.with(|active| {
+ let old = active.get();
+ assert_ne!(old, 0, "Root token was dropped");
+ active.set(old + 1);
+ });
+ Self { level: PhantomData }
+ }
+}
+
+impl Token<'static, Root> {
+ pub fn root() -> Self {
+ #[cfg(debug_assertions)]
+ ACTIVE_TOKEN.with(|active| {
+ assert_eq!(0, active.replace(1), "Root token is already active");
+ });
+
+ Self { level: PhantomData }
+ }
+}
+
+impl<'a, T> Drop for Token<'a, T> {
+ fn drop(&mut self) {
+ #[cfg(debug_assertions)]
+ ACTIVE_TOKEN.with(|active| {
+ let old = active.get();
+ active.set(old - 1);
+ });
+ }
+}
+
+pub trait IdentityHandler<I>: Debug {
+ type Input: Clone + Debug;
+ fn process(&self, id: Self::Input, backend: Backend) -> I;
+ fn free(&self, id: I);
+}
+
+impl<I: TypedId + Debug> IdentityHandler<I> for Mutex<IdentityManager> {
+ type Input = PhantomData<I>;
+ fn process(&self, _id: Self::Input, backend: Backend) -> I {
+ self.lock().alloc(backend)
+ }
+ fn free(&self, id: I) {
+ self.lock().free(id)
+ }
+}
+
+pub trait IdentityHandlerFactory<I> {
+ type Filter: IdentityHandler<I>;
+ fn spawn(&self, min_index: Index) -> Self::Filter;
+}
+
+#[derive(Debug)]
+pub struct IdentityManagerFactory;
+
+impl<I: TypedId + Debug> IdentityHandlerFactory<I> for IdentityManagerFactory {
+ type Filter = Mutex<IdentityManager>;
+ fn spawn(&self, min_index: Index) -> Self::Filter {
+ Mutex::new(IdentityManager::from_index(min_index))
+ }
+}
+
+pub trait GlobalIdentityHandlerFactory:
+ IdentityHandlerFactory<AdapterId>
+ + IdentityHandlerFactory<DeviceId>
+ + IdentityHandlerFactory<SwapChainId>
+ + IdentityHandlerFactory<PipelineLayoutId>
+ + IdentityHandlerFactory<ShaderModuleId>
+ + IdentityHandlerFactory<BindGroupLayoutId>
+ + IdentityHandlerFactory<BindGroupId>
+ + IdentityHandlerFactory<CommandBufferId>
+ + IdentityHandlerFactory<RenderBundleId>
+ + IdentityHandlerFactory<RenderPipelineId>
+ + IdentityHandlerFactory<ComputePipelineId>
+ + IdentityHandlerFactory<BufferId>
+ + IdentityHandlerFactory<TextureId>
+ + IdentityHandlerFactory<TextureViewId>
+ + IdentityHandlerFactory<SamplerId>
+ + IdentityHandlerFactory<SurfaceId>
+{
+}
+
+impl GlobalIdentityHandlerFactory for IdentityManagerFactory {}
+
+pub type Input<G, I> = <<G as IdentityHandlerFactory<I>>::Filter as IdentityHandler<I>>::Input;
+
+pub trait Resource {
+ const TYPE: &'static str;
+ fn life_guard(&self) -> &crate::LifeGuard;
+ fn label(&self) -> &str {
+ #[cfg(debug_assertions)]
+ return &self.life_guard().label;
+ #[cfg(not(debug_assertions))]
+ return "";
+ }
+}
+
+#[derive(Debug)]
+pub struct Registry<T: Resource, I: TypedId, F: IdentityHandlerFactory<I>> {
+ identity: F::Filter,
+ data: RwLock<Storage<T, I>>,
+ backend: Backend,
+}
+
+impl<T: Resource, I: TypedId, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
+ fn new(backend: Backend, factory: &F) -> Self {
+ Self {
+ identity: factory.spawn(0),
+ data: RwLock::new(Storage {
+ map: Vec::new(),
+ kind: T::TYPE,
+ _phantom: PhantomData,
+ }),
+ backend,
+ }
+ }
+
+ fn without_backend(factory: &F, kind: &'static str) -> Self {
+ Self {
+ identity: factory.spawn(1),
+ data: RwLock::new(Storage {
+ map: Vec::new(),
+ kind,
+ _phantom: PhantomData,
+ }),
+ backend: Backend::Empty,
+ }
+ }
+}
+
+impl<T: Resource, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
+ pub fn register<A: Access<T>>(&self, id: I, value: T, _token: &mut Token<A>) {
+ debug_assert_eq!(id.unzip().2, self.backend);
+ self.data.write().insert(id, value);
+ }
+
+ pub fn read<'a, A: Access<T>>(
+ &'a self,
+ _token: &'a mut Token<A>,
+ ) -> (RwLockReadGuard<'a, Storage<T, I>>, Token<'a, T>) {
+ (self.data.read(), Token::new())
+ }
+
+ pub fn write<'a, A: Access<T>>(
+ &'a self,
+ _token: &'a mut Token<A>,
+ ) -> (RwLockWriteGuard<'a, Storage<T, I>>, Token<'a, T>) {
+ (self.data.write(), Token::new())
+ }
+
+ pub(crate) fn register_identity<A: Access<T>>(
+ &self,
+ id_in: <F::Filter as IdentityHandler<I>>::Input,
+ value: T,
+ token: &mut Token<A>,
+ ) -> Valid<I> {
+ let id = self.identity.process(id_in, self.backend);
+ self.register(id, value, token);
+ Valid(id)
+ }
+
+ pub(crate) fn register_identity_locked(
+ &self,
+ id_in: <F::Filter as IdentityHandler<I>>::Input,
+ value: T,
+ guard: &mut Storage<T, I>,
+ ) -> Valid<I> {
+ let id = self.identity.process(id_in, self.backend);
+ guard.insert(id, value);
+ Valid(id)
+ }
+
+ pub fn register_error<A: Access<T>>(
+ &self,
+ id_in: <F::Filter as IdentityHandler<I>>::Input,
+ label: &str,
+ _token: &mut Token<A>,
+ ) -> I {
+ let id = self.identity.process(id_in, self.backend);
+ debug_assert_eq!(id.unzip().2, self.backend);
+ self.data.write().insert_error(id, label);
+ id
+ }
+
+ pub fn unregister_locked(&self, id: I, guard: &mut Storage<T, I>) -> Option<T> {
+ let value = guard.remove(id);
+ //Note: careful about the order here!
+ self.identity.free(id);
+ //Returning None is legal if it's an error ID
+ value
+ }
+
+ pub fn unregister<'a, A: Access<T>>(
+ &self,
+ id: I,
+ _token: &'a mut Token<A>,
+ ) -> (Option<T>, Token<'a, T>) {
+ let value = self.data.write().remove(id);
+ //Note: careful about the order here!
+ self.identity.free(id);
+ //Returning None is legal if it's an error ID
+ (value, Token::new())
+ }
+
+ pub fn process_id(&self, id_in: <F::Filter as IdentityHandler<I>>::Input) -> I {
+ self.identity.process(id_in, self.backend)
+ }
+
+ pub fn free_id(&self, id: I) {
+ self.identity.free(id)
+ }
+
+ pub fn label_for_resource(&self, id: I) -> String {
+ let guard = self.data.read();
+
+ let type_name = guard.kind;
+ match guard.get(id) {
+ Ok(res) => {
+ let label = res.label();
+ if label.is_empty() {
+ format!("<{}-{:?}>", type_name, id.unzip())
+ } else {
+ label.to_string()
+ }
+ }
+ Err(_) => format!(
+ "<Invalid-{} label={}>",
+ type_name,
+ guard.label_for_invalid_id(id)
+ ),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Hub<B: hal::Backend, F: GlobalIdentityHandlerFactory> {
+ pub adapters: Registry<Adapter<B>, AdapterId, F>,
+ pub devices: Registry<Device<B>, DeviceId, F>,
+ pub swap_chains: Registry<SwapChain<B>, SwapChainId, F>,
+ pub pipeline_layouts: Registry<PipelineLayout<B>, PipelineLayoutId, F>,
+ pub shader_modules: Registry<ShaderModule<B>, ShaderModuleId, F>,
+ pub bind_group_layouts: Registry<BindGroupLayout<B>, BindGroupLayoutId, F>,
+ pub bind_groups: Registry<BindGroup<B>, BindGroupId, F>,
+ pub command_buffers: Registry<CommandBuffer<B>, CommandBufferId, F>,
+ pub render_bundles: Registry<RenderBundle, RenderBundleId, F>,
+ pub render_pipelines: Registry<RenderPipeline<B>, RenderPipelineId, F>,
+ pub compute_pipelines: Registry<ComputePipeline<B>, ComputePipelineId, F>,
+ pub buffers: Registry<Buffer<B>, BufferId, F>,
+ pub textures: Registry<Texture<B>, TextureId, F>,
+ pub texture_views: Registry<TextureView<B>, TextureViewId, F>,
+ pub samplers: Registry<Sampler<B>, SamplerId, F>,
+}
+
+impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
+ fn new(factory: &F) -> Self {
+ Self {
+ adapters: Registry::new(B::VARIANT, factory),
+ devices: Registry::new(B::VARIANT, factory),
+ swap_chains: Registry::new(B::VARIANT, factory),
+ pipeline_layouts: Registry::new(B::VARIANT, factory),
+ shader_modules: Registry::new(B::VARIANT, factory),
+ bind_group_layouts: Registry::new(B::VARIANT, factory),
+ bind_groups: Registry::new(B::VARIANT, factory),
+ command_buffers: Registry::new(B::VARIANT, factory),
+ render_bundles: Registry::new(B::VARIANT, factory),
+ render_pipelines: Registry::new(B::VARIANT, factory),
+ compute_pipelines: Registry::new(B::VARIANT, factory),
+ buffers: Registry::new(B::VARIANT, factory),
+ textures: Registry::new(B::VARIANT, factory),
+ texture_views: Registry::new(B::VARIANT, factory),
+ samplers: Registry::new(B::VARIANT, factory),
+ }
+ }
+}
+
+impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
+ fn clear(&self, surface_guard: &mut Storage<Surface, SurfaceId>) {
+ use crate::resource::TextureViewInner;
+ use hal::{device::Device as _, window::PresentationSurface as _};
+
+ let mut devices = self.devices.data.write();
+ for element in devices.map.iter_mut() {
+ if let Element::Occupied(device, _) = element {
+ device.prepare_to_die();
+ }
+ }
+
+ for element in self.samplers.data.write().map.drain(..) {
+ if let Element::Occupied(sampler, _) = element {
+ unsafe {
+ devices[sampler.device_id.value]
+ .raw
+ .destroy_sampler(sampler.raw);
+ }
+ }
+ }
+ {
+ let textures = self.textures.data.read();
+ for element in self.texture_views.data.write().map.drain(..) {
+ if let Element::Occupied(texture_view, _) = element {
+ match texture_view.inner {
+ TextureViewInner::Native { raw, source_id } => {
+ let device = &devices[textures[source_id.value].device_id.value];
+ unsafe {
+ device.raw.destroy_image_view(raw);
+ }
+ }
+ TextureViewInner::SwapChain { .. } => {} //TODO
+ }
+ }
+ }
+ }
+
+ for element in self.textures.data.write().map.drain(..) {
+ if let Element::Occupied(texture, _) = element {
+ devices[texture.device_id.value].destroy_texture(texture);
+ }
+ }
+ for element in self.buffers.data.write().map.drain(..) {
+ if let Element::Occupied(buffer, _) = element {
+ //TODO: unmap if needed
+ devices[buffer.device_id.value].destroy_buffer(buffer);
+ }
+ }
+ for element in self.command_buffers.data.write().map.drain(..) {
+ if let Element::Occupied(command_buffer, _) = element {
+ devices[command_buffer.device_id.value]
+ .cmd_allocator
+ .after_submit(command_buffer, 0);
+ }
+ }
+ for element in self.bind_groups.data.write().map.drain(..) {
+ if let Element::Occupied(bind_group, _) = element {
+ let device = &devices[bind_group.device_id.value];
+ device.destroy_bind_group(bind_group);
+ }
+ }
+
+ for element in self.shader_modules.data.write().map.drain(..) {
+ if let Element::Occupied(module, _) = element {
+ let device = &devices[module.device_id.value];
+ unsafe {
+ device.raw.destroy_shader_module(module.raw);
+ }
+ }
+ }
+ for element in self.bind_group_layouts.data.write().map.drain(..) {
+ if let Element::Occupied(bgl, _) = element {
+ let device = &devices[bgl.device_id.value];
+ unsafe {
+ device.raw.destroy_descriptor_set_layout(bgl.raw);
+ }
+ }
+ }
+ for element in self.pipeline_layouts.data.write().map.drain(..) {
+ if let Element::Occupied(pipeline_layout, _) = element {
+ let device = &devices[pipeline_layout.device_id.value];
+ unsafe {
+ device.raw.destroy_pipeline_layout(pipeline_layout.raw);
+ }
+ }
+ }
+ for element in self.compute_pipelines.data.write().map.drain(..) {
+ if let Element::Occupied(pipeline, _) = element {
+ let device = &devices[pipeline.device_id.value];
+ unsafe {
+ device.raw.destroy_compute_pipeline(pipeline.raw);
+ }
+ }
+ }
+ for element in self.render_pipelines.data.write().map.drain(..) {
+ if let Element::Occupied(pipeline, _) = element {
+ let device = &devices[pipeline.device_id.value];
+ unsafe {
+ device.raw.destroy_graphics_pipeline(pipeline.raw);
+ }
+ }
+ }
+
+ for (index, element) in self.swap_chains.data.write().map.drain(..).enumerate() {
+ if let Element::Occupied(swap_chain, epoch) = element {
+ let device = &devices[swap_chain.device_id.value];
+ unsafe {
+ device.raw.destroy_semaphore(swap_chain.semaphore);
+ }
+ let suf_id = TypedId::zip(index as Index, epoch, B::VARIANT);
+ //TODO: hold the surface alive by the swapchain
+ if surface_guard.contains(suf_id) {
+ let surface = surface_guard.get_mut(suf_id).unwrap();
+ let suf = B::get_surface_mut(surface);
+ unsafe {
+ suf.unconfigure_swapchain(&device.raw);
+ }
+ }
+ }
+ }
+
+ for element in devices.map.drain(..) {
+ if let Element::Occupied(device, _) = element {
+ device.dispose();
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Hubs<F: GlobalIdentityHandlerFactory> {
+ #[cfg(vulkan)]
+ vulkan: Hub<backend::Vulkan, F>,
+ #[cfg(metal)]
+ metal: Hub<backend::Metal, F>,
+ #[cfg(dx12)]
+ dx12: Hub<backend::Dx12, F>,
+ #[cfg(dx11)]
+ dx11: Hub<backend::Dx11, F>,
+ #[cfg(gl)]
+ gl: Hub<backend::Gl, F>,
+}
+
+impl<F: GlobalIdentityHandlerFactory> Hubs<F> {
+ fn new(factory: &F) -> Self {
+ Self {
+ #[cfg(vulkan)]
+ vulkan: Hub::new(factory),
+ #[cfg(metal)]
+ metal: Hub::new(factory),
+ #[cfg(dx12)]
+ dx12: Hub::new(factory),
+ #[cfg(dx11)]
+ dx11: Hub::new(factory),
+ #[cfg(gl)]
+ gl: Hub::new(factory),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Global<G: GlobalIdentityHandlerFactory> {
+ pub instance: Instance,
+ pub surfaces: Registry<Surface, SurfaceId, G>,
+ hubs: Hubs<G>,
+}
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn new(name: &str, factory: G, backends: wgt::BackendBit) -> Self {
+ span!(_guard, INFO, "Global::new");
+ Self {
+ instance: Instance::new(name, 1, backends),
+ surfaces: Registry::without_backend(&factory, "Surface"),
+ hubs: Hubs::new(&factory),
+ }
+ }
+
+ pub fn clear_backend<B: GfxBackend>(&self, _dummy: ()) {
+ let mut surface_guard = self.surfaces.data.write();
+ let hub = B::hub(self);
+ hub.clear(&mut *surface_guard);
+ }
+}
+
+impl<G: GlobalIdentityHandlerFactory> Drop for Global<G> {
+ fn drop(&mut self) {
+ if !thread::panicking() {
+ tracing::info!("Dropping Global");
+ let mut surface_guard = self.surfaces.data.write();
+
+ // destroy hubs
+ #[cfg(vulkan)]
+ {
+ self.hubs.vulkan.clear(&mut *surface_guard);
+ }
+ #[cfg(metal)]
+ {
+ self.hubs.metal.clear(&mut *surface_guard);
+ }
+ #[cfg(dx12)]
+ {
+ self.hubs.dx12.clear(&mut *surface_guard);
+ }
+ #[cfg(dx11)]
+ {
+ self.hubs.dx11.clear(&mut *surface_guard);
+ }
+ #[cfg(gl)]
+ {
+ self.hubs.gl.clear(&mut *surface_guard);
+ }
+
+ // destroy surfaces
+ for element in surface_guard.map.drain(..) {
+ if let Element::Occupied(surface, _) = element {
+ self.instance.destroy_surface(surface);
+ }
+ }
+ }
+ }
+}
+
+pub trait GfxBackend: hal::Backend {
+ const VARIANT: Backend;
+ fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G>;
+ fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface;
+}
+
+#[cfg(vulkan)]
+impl GfxBackend for backend::Vulkan {
+ const VARIANT: Backend = Backend::Vulkan;
+ fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
+ &global.hubs.vulkan
+ }
+ fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
+ surface.vulkan.as_mut().unwrap()
+ }
+}
+
+#[cfg(metal)]
+impl GfxBackend for backend::Metal {
+ const VARIANT: Backend = Backend::Metal;
+ fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
+ &global.hubs.metal
+ }
+ fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
+ surface.metal.as_mut().unwrap()
+ }
+}
+
+#[cfg(dx12)]
+impl GfxBackend for backend::Dx12 {
+ const VARIANT: Backend = Backend::Dx12;
+ fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
+ &global.hubs.dx12
+ }
+ fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
+ surface.dx12.as_mut().unwrap()
+ }
+}
+
+#[cfg(dx11)]
+impl GfxBackend for backend::Dx11 {
+ const VARIANT: Backend = Backend::Dx11;
+ fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
+ &global.hubs.dx11
+ }
+ fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
+ surface.dx11.as_mut().unwrap()
+ }
+}
+
+#[cfg(gl)]
+impl GfxBackend for backend::Gl {
+ const VARIANT: Backend = Backend::Gl;
+ fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
+ &global.hubs.gl
+ }
+ fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
+ surface.gl.as_mut().unwrap()
+ }
+}
+
+#[cfg(test)]
+fn _test_send_sync(global: &Global<IdentityManagerFactory>) {
+ fn test_internal<T: Send + Sync>(_: T) {}
+ test_internal(global)
+}
diff --git a/gfx/wgpu/wgpu-core/src/id.rs b/gfx/wgpu/wgpu-core/src/id.rs
new file mode 100644
index 0000000000..7a1201be2e
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/id.rs
@@ -0,0 +1,196 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{Epoch, Index};
+use std::{cmp::Ordering, fmt, marker::PhantomData, num::NonZeroU64};
+use wgt::Backend;
+
+const BACKEND_BITS: usize = 3;
+const EPOCH_MASK: u32 = (1 << (32 - BACKEND_BITS)) - 1;
+type Dummy = crate::backend::Empty;
+
+#[repr(transparent)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize), serde(into = "SerialId"))]
+#[cfg_attr(
+ feature = "replay",
+ derive(serde::Deserialize),
+ serde(from = "SerialId")
+)]
+#[cfg_attr(
+ all(feature = "serde", not(feature = "trace")),
+ derive(serde::Serialize)
+)]
+#[cfg_attr(
+ all(feature = "serde", not(feature = "replay")),
+ derive(serde::Deserialize)
+)]
+pub struct Id<T>(NonZeroU64, PhantomData<T>);
+
+// This type represents Id in a more readable (and editable) way.
+#[allow(dead_code)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+enum SerialId {
+ // The only variant forces RON to not ignore "Id"
+ Id(Index, Epoch, Backend),
+}
+#[cfg(feature = "trace")]
+impl<T> From<Id<T>> for SerialId {
+ fn from(id: Id<T>) -> Self {
+ let (index, epoch, backend) = id.unzip();
+ Self::Id(index, epoch, backend)
+ }
+}
+#[cfg(feature = "replay")]
+impl<T> From<SerialId> for Id<T> {
+ fn from(id: SerialId) -> Self {
+ match id {
+ SerialId::Id(index, epoch, backend) => TypedId::zip(index, epoch, backend),
+ }
+ }
+}
+
+impl<T> Id<T> {
+ #[cfg(test)]
+ pub(crate) fn dummy() -> Valid<Self> {
+ Valid(Id(NonZeroU64::new(1).unwrap(), PhantomData))
+ }
+
+ pub fn backend(self) -> Backend {
+ match self.0.get() >> (64 - BACKEND_BITS) as u8 {
+ 0 => Backend::Empty,
+ 1 => Backend::Vulkan,
+ 2 => Backend::Metal,
+ 3 => Backend::Dx12,
+ 4 => Backend::Dx11,
+ 5 => Backend::Gl,
+ _ => unreachable!(),
+ }
+ }
+}
+
+impl<T> Copy for Id<T> {}
+
+impl<T> Clone for Id<T> {
+ fn clone(&self) -> Self {
+ Self(self.0, PhantomData)
+ }
+}
+
+impl<T> fmt::Debug for Id<T> {
+ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ self.unzip().fmt(formatter)
+ }
+}
+
+impl<T> std::hash::Hash for Id<T> {
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ self.0.hash(state);
+ }
+}
+
+impl<T> PartialEq for Id<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+}
+
+impl<T> Eq for Id<T> {}
+
+impl<T> PartialOrd for Id<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.0.partial_cmp(&other.0)
+ }
+}
+
+impl<T> Ord for Id<T> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.0.cmp(&other.0)
+ }
+}
+
+/// An internal ID that has been checked to point to
+/// a valid object in the storages.
+#[repr(transparent)]
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub(crate) struct Valid<I>(pub I);
+
+pub trait TypedId {
+ fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self;
+ fn unzip(self) -> (Index, Epoch, Backend);
+}
+
+impl<T> TypedId for Id<T> {
+ fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self {
+ assert_eq!(0, epoch >> (32 - BACKEND_BITS));
+ let v = index as u64 | ((epoch as u64) << 32) | ((backend as u64) << (64 - BACKEND_BITS));
+ Id(NonZeroU64::new(v).unwrap(), PhantomData)
+ }
+
+ fn unzip(self) -> (Index, Epoch, Backend) {
+ (
+ self.0.get() as u32,
+ (self.0.get() >> 32) as u32 & EPOCH_MASK,
+ self.backend(),
+ )
+ }
+}
+
+pub type AdapterId = Id<crate::instance::Adapter<Dummy>>;
+pub type SurfaceId = Id<crate::instance::Surface>;
+// Device
+pub type DeviceId = Id<crate::device::Device<Dummy>>;
+pub type QueueId = DeviceId;
+// Resource
+pub type BufferId = Id<crate::resource::Buffer<Dummy>>;
+pub type TextureViewId = Id<crate::resource::TextureView<Dummy>>;
+pub type TextureId = Id<crate::resource::Texture<Dummy>>;
+pub type SamplerId = Id<crate::resource::Sampler<Dummy>>;
+// Binding model
+pub type BindGroupLayoutId = Id<crate::binding_model::BindGroupLayout<Dummy>>;
+pub type PipelineLayoutId = Id<crate::binding_model::PipelineLayout<Dummy>>;
+pub type BindGroupId = Id<crate::binding_model::BindGroup<Dummy>>;
+// Pipeline
+pub type ShaderModuleId = Id<crate::pipeline::ShaderModule<Dummy>>;
+pub type RenderPipelineId = Id<crate::pipeline::RenderPipeline<Dummy>>;
+pub type ComputePipelineId = Id<crate::pipeline::ComputePipeline<Dummy>>;
+// Command
+pub type CommandEncoderId = CommandBufferId;
+pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
+pub type RenderPassEncoderId = *mut crate::command::RenderPass;
+pub type ComputePassEncoderId = *mut crate::command::ComputePass;
+pub type RenderBundleEncoderId = *mut crate::command::RenderBundleEncoder;
+pub type RenderBundleId = Id<crate::command::RenderBundle>;
+// Swap chain
+pub type SwapChainId = Id<crate::swap_chain::SwapChain<Dummy>>;
+
+impl SurfaceId {
+ pub(crate) fn to_swap_chain_id(self, backend: Backend) -> SwapChainId {
+ let (index, epoch, _) = self.unzip();
+ Id::zip(index, epoch, backend)
+ }
+}
+impl SwapChainId {
+ pub fn to_surface_id(self) -> SurfaceId {
+ let (index, epoch, _) = self.unzip();
+ Id::zip(index, epoch, Backend::Empty)
+ }
+}
+
+#[test]
+fn test_id_backend() {
+ for &b in &[
+ Backend::Empty,
+ Backend::Vulkan,
+ Backend::Metal,
+ Backend::Dx12,
+ Backend::Dx11,
+ Backend::Gl,
+ ] {
+ let id: Id<()> = Id::zip(1, 0, b);
+ assert_eq!(id.backend(), b);
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/instance.rs b/gfx/wgpu/wgpu-core/src/instance.rs
new file mode 100644
index 0000000000..8f156c9d3f
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/instance.rs
@@ -0,0 +1,840 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ backend,
+ device::{Device, DeviceDescriptor},
+ hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
+ id::{AdapterId, DeviceId, SurfaceId, Valid},
+ span, LabelHelpers, LifeGuard, PrivateFeatures, Stored, MAX_BIND_GROUPS,
+};
+
+use wgt::{Backend, BackendBit, PowerPreference, BIND_BUFFER_ALIGNMENT};
+
+use hal::{
+ adapter::{AdapterInfo as HalAdapterInfo, DeviceType as HalDeviceType, PhysicalDevice as _},
+ queue::QueueFamily as _,
+ window::Surface as _,
+ Instance as _,
+};
+use thiserror::Error;
+
+/// Size that is guaranteed to be available in push constants.
+///
+/// This is needed because non-vulkan backends might not
+/// provide a push-constant size limit.
+const MIN_PUSH_CONSTANT_SIZE: u32 = 128;
+
+pub type RequestAdapterOptions = wgt::RequestAdapterOptions<SurfaceId>;
+
+#[derive(Debug)]
+pub struct Instance {
+ #[cfg(vulkan)]
+ pub vulkan: Option<gfx_backend_vulkan::Instance>,
+ #[cfg(metal)]
+ pub metal: Option<gfx_backend_metal::Instance>,
+ #[cfg(dx12)]
+ pub dx12: Option<gfx_backend_dx12::Instance>,
+ #[cfg(dx11)]
+ pub dx11: Option<gfx_backend_dx11::Instance>,
+ #[cfg(gl)]
+ pub gl: Option<gfx_backend_gl::Instance>,
+}
+
+impl Instance {
+ pub fn new(name: &str, version: u32, backends: BackendBit) -> Self {
+ backends_map! {
+ let map = |(backend, backend_create)| {
+ if backends.contains(backend.into()) {
+ backend_create(name, version).ok()
+ } else {
+ None
+ }
+ };
+ Self {
+ #[cfg(vulkan)]
+ vulkan: map((Backend::Vulkan, gfx_backend_vulkan::Instance::create)),
+ #[cfg(metal)]
+ metal: map((Backend::Metal, gfx_backend_metal::Instance::create)),
+ #[cfg(dx12)]
+ dx12: map((Backend::Dx12, gfx_backend_dx12::Instance::create)),
+ #[cfg(dx11)]
+ dx11: map((Backend::Dx11, gfx_backend_dx11::Instance::create)),
+ #[cfg(gl)]
+ gl: map((Backend::Gl, gfx_backend_gl::Instance::create)),
+ }
+ }
+ }
+
+ pub(crate) fn destroy_surface(&self, surface: Surface) {
+ backends_map! {
+ let map = |(surface_backend, self_backend)| {
+ unsafe {
+ if let Some(suf) = surface_backend {
+ self_backend.as_ref().unwrap().destroy_surface(suf);
+ }
+ }
+ };
+
+ #[cfg(vulkan)]
+ map((surface.vulkan, &self.vulkan)),
+ #[cfg(metal)]
+ map((surface.metal, &self.metal)),
+ #[cfg(dx12)]
+ map((surface.dx12, &self.dx12)),
+ #[cfg(dx11)]
+ map((surface.dx11, &self.dx11)),
+ #[cfg(gl)]
+ map((surface.gl, &self.gl)),
+ }
+ }
+}
+
+type GfxSurface<B> = <B as hal::Backend>::Surface;
+
+#[derive(Debug)]
+pub struct Surface {
+ #[cfg(vulkan)]
+ pub vulkan: Option<GfxSurface<backend::Vulkan>>,
+ #[cfg(metal)]
+ pub metal: Option<GfxSurface<backend::Metal>>,
+ #[cfg(dx12)]
+ pub dx12: Option<GfxSurface<backend::Dx12>>,
+ #[cfg(dx11)]
+ pub dx11: Option<GfxSurface<backend::Dx11>>,
+ #[cfg(gl)]
+ pub gl: Option<GfxSurface<backend::Gl>>,
+}
+
+impl crate::hub::Resource for Surface {
+ const TYPE: &'static str = "Surface";
+
+ fn life_guard(&self) -> &LifeGuard {
+ unreachable!()
+ }
+
+ fn label(&self) -> &str {
+ "<Surface>"
+ }
+}
+
+#[derive(Debug)]
+pub struct Adapter<B: hal::Backend> {
+ pub(crate) raw: hal::adapter::Adapter<B>,
+ features: wgt::Features,
+ limits: wgt::Limits,
+ life_guard: LifeGuard,
+}
+
+impl<B: GfxBackend> Adapter<B> {
+ fn new(raw: hal::adapter::Adapter<B>) -> Self {
+ span!(_guard, INFO, "Adapter::new");
+
+ let adapter_features = raw.physical_device.features();
+
+ let mut features = wgt::Features::default()
+ | wgt::Features::MAPPABLE_PRIMARY_BUFFERS
+ | wgt::Features::PUSH_CONSTANTS;
+ features.set(
+ wgt::Features::DEPTH_CLAMPING,
+ adapter_features.contains(hal::Features::DEPTH_CLAMP),
+ );
+ features.set(
+ wgt::Features::TEXTURE_COMPRESSION_BC,
+ adapter_features.contains(hal::Features::FORMAT_BC),
+ );
+ features.set(
+ wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY,
+ adapter_features.contains(hal::Features::TEXTURE_DESCRIPTOR_ARRAY),
+ );
+ features.set(
+ wgt::Features::SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING,
+ adapter_features.contains(hal::Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING),
+ );
+ features.set(
+ wgt::Features::SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
+ adapter_features.contains(hal::Features::SAMPLED_TEXTURE_DESCRIPTOR_INDEXING),
+ );
+ features.set(
+ wgt::Features::UNSIZED_BINDING_ARRAY,
+ adapter_features.contains(hal::Features::UNSIZED_DESCRIPTOR_ARRAY),
+ );
+ features.set(
+ wgt::Features::MULTI_DRAW_INDIRECT,
+ adapter_features.contains(hal::Features::MULTI_DRAW_INDIRECT),
+ );
+ features.set(
+ wgt::Features::MULTI_DRAW_INDIRECT_COUNT,
+ adapter_features.contains(hal::Features::DRAW_INDIRECT_COUNT),
+ );
+ features.set(
+ wgt::Features::NON_FILL_POLYGON_MODE,
+ adapter_features.contains(hal::Features::NON_FILL_POLYGON_MODE),
+ );
+ #[cfg(not(target_os = "ios"))]
+ //TODO: https://github.com/gfx-rs/gfx/issues/3346
+ features.set(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER, true);
+
+ let adapter_limits = raw.physical_device.limits();
+
+ let default_limits = wgt::Limits::default();
+
+ // All these casts to u32 are safe as the underlying vulkan types are u32s.
+ // If another backend provides larger limits than u32, we need to clamp them to u32::MAX.
+ // TODO: fix all gfx-hal backends to produce limits we care about, and remove .max
+ let limits = wgt::Limits {
+ max_bind_groups: (adapter_limits.max_bound_descriptor_sets as u32)
+ .min(MAX_BIND_GROUPS as u32)
+ .max(default_limits.max_bind_groups),
+ max_dynamic_uniform_buffers_per_pipeline_layout: (adapter_limits
+ .max_descriptor_set_uniform_buffers_dynamic
+ as u32)
+ .max(default_limits.max_dynamic_uniform_buffers_per_pipeline_layout),
+ max_dynamic_storage_buffers_per_pipeline_layout: (adapter_limits
+ .max_descriptor_set_storage_buffers_dynamic
+ as u32)
+ .max(default_limits.max_dynamic_storage_buffers_per_pipeline_layout),
+ max_sampled_textures_per_shader_stage: (adapter_limits
+ .max_per_stage_descriptor_sampled_images
+ as u32)
+ .max(default_limits.max_sampled_textures_per_shader_stage),
+ max_samplers_per_shader_stage: (adapter_limits.max_per_stage_descriptor_samplers
+ as u32)
+ .max(default_limits.max_samplers_per_shader_stage),
+ max_storage_buffers_per_shader_stage: (adapter_limits
+ .max_per_stage_descriptor_storage_buffers
+ as u32)
+ .max(default_limits.max_storage_buffers_per_shader_stage),
+ max_storage_textures_per_shader_stage: (adapter_limits
+ .max_per_stage_descriptor_storage_images
+ as u32)
+ .max(default_limits.max_storage_textures_per_shader_stage),
+ max_uniform_buffers_per_shader_stage: (adapter_limits
+ .max_per_stage_descriptor_uniform_buffers
+ as u32)
+ .max(default_limits.max_uniform_buffers_per_shader_stage),
+ max_uniform_buffer_binding_size: (adapter_limits.max_uniform_buffer_range as u32)
+ .max(default_limits.max_uniform_buffer_binding_size),
+ max_push_constant_size: (adapter_limits.max_push_constants_size as u32)
+ .max(MIN_PUSH_CONSTANT_SIZE), // As an extension, the default is always 0, so define a separate minimum.
+ };
+
+ Self {
+ raw,
+ features,
+ limits,
+ life_guard: LifeGuard::new("<Adapter>"),
+ }
+ }
+
+ fn create_device(
+ &self,
+ self_id: AdapterId,
+ desc: &DeviceDescriptor,
+ trace_path: Option<&std::path::Path>,
+ ) -> Result<Device<B>, RequestDeviceError> {
+ // Verify all features were exposed by the adapter
+ if !self.features.contains(desc.features) {
+ return Err(RequestDeviceError::UnsupportedFeature(
+ desc.features - self.features,
+ ));
+ }
+
+ // Verify feature preconditions
+ if desc
+ .features
+ .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS)
+ && self.raw.info.device_type == hal::adapter::DeviceType::DiscreteGpu
+ {
+ tracing::warn!("Feature MAPPABLE_PRIMARY_BUFFERS enabled on a discrete gpu. This is a massive performance footgun and likely not what you wanted");
+ }
+
+ let phd = &self.raw.physical_device;
+ let available_features = phd.features();
+
+ // Check features that are always needed
+ let wishful_features = hal::Features::ROBUST_BUFFER_ACCESS
+ | hal::Features::VERTEX_STORES_AND_ATOMICS
+ | hal::Features::FRAGMENT_STORES_AND_ATOMICS
+ | hal::Features::NDC_Y_UP
+ | hal::Features::INDEPENDENT_BLENDING
+ | hal::Features::SAMPLER_ANISOTROPY
+ | hal::Features::IMAGE_CUBE_ARRAY;
+ let mut enabled_features = available_features & wishful_features;
+ if enabled_features != wishful_features {
+ tracing::warn!(
+ "Missing internal features: {:?}",
+ wishful_features - enabled_features
+ );
+ }
+
+ // Features
+ enabled_features.set(
+ hal::Features::TEXTURE_DESCRIPTOR_ARRAY,
+ desc.features
+ .contains(wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY),
+ );
+ enabled_features.set(
+ hal::Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING,
+ desc.features
+ .contains(wgt::Features::SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING),
+ );
+ enabled_features.set(
+ hal::Features::SAMPLED_TEXTURE_DESCRIPTOR_INDEXING,
+ desc.features
+ .contains(wgt::Features::SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING),
+ );
+ enabled_features.set(
+ hal::Features::UNSIZED_DESCRIPTOR_ARRAY,
+ desc.features.contains(wgt::Features::UNSIZED_BINDING_ARRAY),
+ );
+ enabled_features.set(
+ hal::Features::MULTI_DRAW_INDIRECT,
+ desc.features.contains(wgt::Features::MULTI_DRAW_INDIRECT),
+ );
+ enabled_features.set(
+ hal::Features::DRAW_INDIRECT_COUNT,
+ desc.features
+ .contains(wgt::Features::MULTI_DRAW_INDIRECT_COUNT),
+ );
+ enabled_features.set(
+ hal::Features::NON_FILL_POLYGON_MODE,
+ desc.features.contains(wgt::Features::NON_FILL_POLYGON_MODE),
+ );
+
+ let family = self
+ .raw
+ .queue_families
+ .iter()
+ .find(|family| family.queue_type().supports_graphics())
+ .ok_or(RequestDeviceError::NoGraphicsQueue)?;
+ let mut gpu =
+ unsafe { phd.open(&[(family, &[1.0])], enabled_features) }.map_err(|err| {
+ use hal::device::CreationError::*;
+ match err {
+ DeviceLost => RequestDeviceError::DeviceLost,
+ InitializationFailed => RequestDeviceError::Internal,
+ OutOfMemory(_) => RequestDeviceError::OutOfMemory,
+ _ => panic!("failed to create `gfx-hal` device: {}", err),
+ }
+ })?;
+
+ if let Some(_) = desc.label {
+ //TODO
+ }
+
+ let limits = phd.limits();
+ assert_eq!(
+ 0,
+ BIND_BUFFER_ALIGNMENT % limits.min_storage_buffer_offset_alignment,
+ "Adapter storage buffer offset alignment not compatible with WGPU"
+ );
+ assert_eq!(
+ 0,
+ BIND_BUFFER_ALIGNMENT % limits.min_uniform_buffer_offset_alignment,
+ "Adapter uniform buffer offset alignment not compatible with WGPU"
+ );
+ if self.limits < desc.limits {
+ return Err(RequestDeviceError::LimitsExceeded);
+ }
+
+ let mem_props = phd.memory_properties();
+ if !desc.shader_validation {
+ tracing::warn!("Shader validation is disabled");
+ }
+ let private_features = PrivateFeatures {
+ shader_validation: desc.shader_validation,
+ anisotropic_filtering: enabled_features.contains(hal::Features::SAMPLER_ANISOTROPY),
+ texture_d24: phd
+ .format_properties(Some(hal::format::Format::X8D24Unorm))
+ .optimal_tiling
+ .contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT),
+ texture_d24_s8: phd
+ .format_properties(Some(hal::format::Format::D24UnormS8Uint))
+ .optimal_tiling
+ .contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT),
+ };
+
+ Device::new(
+ gpu.device,
+ Stored {
+ value: Valid(self_id),
+ ref_count: self.life_guard.add_ref(),
+ },
+ gpu.queue_groups.swap_remove(0),
+ mem_props,
+ limits,
+ private_features,
+ desc,
+ trace_path,
+ )
+ .or(Err(RequestDeviceError::OutOfMemory))
+ }
+}
+
+impl<B: hal::Backend> crate::hub::Resource for Adapter<B> {
+ const TYPE: &'static str = "Adapter";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+/// Metadata about a backend adapter.
+#[derive(Clone, Debug, PartialEq)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct AdapterInfo {
+ /// Adapter name
+ pub name: String,
+ /// Vendor PCI id of the adapter
+ pub vendor: usize,
+ /// PCI id of the adapter
+ pub device: usize,
+ /// Type of device
+ pub device_type: DeviceType,
+ /// Backend used for device
+ pub backend: Backend,
+}
+
+impl AdapterInfo {
+ fn from_gfx(adapter_info: HalAdapterInfo, backend: Backend) -> Self {
+ let HalAdapterInfo {
+ name,
+ vendor,
+ device,
+ device_type,
+ } = adapter_info;
+
+ Self {
+ name,
+ vendor,
+ device,
+ device_type: device_type.into(),
+ backend,
+ }
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+/// Error when requesting a device from the adaptor
+pub enum RequestDeviceError {
+ #[error("parent adapter is invalid")]
+ InvalidAdapter,
+ #[error("connection to device was lost during initialization")]
+ DeviceLost,
+ #[error("device initialization failed due to implementation specific errors")]
+ Internal,
+ #[error("some of the requested device limits are not supported")]
+ LimitsExceeded,
+ #[error("device has no queue supporting graphics")]
+ NoGraphicsQueue,
+ #[error("not enough memory left")]
+ OutOfMemory,
+ #[error("unsupported features were requested: {0:?}")]
+ UnsupportedFeature(wgt::Features),
+}
+
+/// Supported physical device types.
+#[repr(u8)]
+#[derive(Clone, Debug, PartialEq)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum DeviceType {
+ /// Other.
+ Other,
+ /// Integrated GPU with shared CPU/GPU memory.
+ IntegratedGpu,
+ /// Discrete GPU with separate CPU/GPU memory.
+ DiscreteGpu,
+ /// Virtual / Hosted.
+ VirtualGpu,
+ /// Cpu / Software Rendering.
+ Cpu,
+}
+
+impl From<HalDeviceType> for DeviceType {
+ fn from(device_type: HalDeviceType) -> Self {
+ match device_type {
+ HalDeviceType::Other => Self::Other,
+ HalDeviceType::IntegratedGpu => Self::IntegratedGpu,
+ HalDeviceType::DiscreteGpu => Self::DiscreteGpu,
+ HalDeviceType::VirtualGpu => Self::VirtualGpu,
+ HalDeviceType::Cpu => Self::Cpu,
+ }
+ }
+}
+
+pub enum AdapterInputs<'a, I> {
+ IdSet(&'a [I], fn(&I) -> Backend),
+ Mask(BackendBit, fn(Backend) -> I),
+}
+
+impl<I: Clone> AdapterInputs<'_, I> {
+ fn find(&self, b: Backend) -> Option<I> {
+ match *self {
+ Self::IdSet(ids, ref fun) => ids.iter().find(|id| fun(id) == b).cloned(),
+ Self::Mask(bits, ref fun) => {
+ if bits.contains(b.into()) {
+ Some(fun(b))
+ } else {
+ None
+ }
+ }
+ }
+ }
+}
+
+#[error("adapter is invalid")]
+#[derive(Clone, Debug, Error)]
+pub struct InvalidAdapter;
+
+#[derive(Clone, Debug, Error)]
+pub enum RequestAdapterError {
+ #[error("no suitable adapter found")]
+ NotFound,
+ #[error("surface {0:?} is invalid")]
+ InvalidSurface(SurfaceId),
+}
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ #[cfg(feature = "raw-window-handle")]
+ pub fn instance_create_surface(
+ &self,
+ handle: &impl raw_window_handle::HasRawWindowHandle,
+ id_in: Input<G, SurfaceId>,
+ ) -> SurfaceId {
+ span!(_guard, INFO, "Instance::create_surface");
+
+ let surface = unsafe {
+ backends_map! {
+ let map = |inst| {
+ inst
+ .as_ref()
+ .and_then(|inst| inst.create_surface(handle).map_err(|e| {
+ tracing::warn!("Error: {:?}", e);
+ }).ok())
+ };
+
+ Surface {
+ #[cfg(vulkan)]
+ vulkan: map(&self.instance.vulkan),
+ #[cfg(metal)]
+ metal: map(&self.instance.metal),
+ #[cfg(dx12)]
+ dx12: map(&self.instance.dx12),
+ #[cfg(dx11)]
+ dx11: map(&self.instance.dx11),
+ #[cfg(gl)]
+ gl: map(&self.instance.gl),
+ }
+ }
+ };
+
+ let mut token = Token::root();
+ let id = self.surfaces.register_identity(id_in, surface, &mut token);
+ id.0
+ }
+
+ pub fn surface_drop(&self, id: SurfaceId) {
+ span!(_guard, INFO, "Surface::drop");
+ let mut token = Token::root();
+ let (surface, _) = self.surfaces.unregister(id, &mut token);
+ self.instance.destroy_surface(surface.unwrap());
+ }
+
+ pub fn enumerate_adapters(&self, inputs: AdapterInputs<Input<G, AdapterId>>) -> Vec<AdapterId> {
+ span!(_guard, INFO, "Instance::enumerate_adapters");
+
+ let instance = &self.instance;
+ let mut token = Token::root();
+ let mut adapters = Vec::new();
+
+ backends_map! {
+ let map = |(instance_field, backend, backend_info, backend_hub)| {
+ if let Some(inst) = instance_field {
+ let hub = backend_hub(self);
+ if let Some(id_backend) = inputs.find(backend) {
+ for raw in inst.enumerate_adapters() {
+ let adapter = Adapter::new(raw);
+ tracing::info!("Adapter {} {:?}", backend_info, adapter.raw.info);
+ let id = hub.adapters.register_identity(
+ id_backend.clone(),
+ adapter,
+ &mut token,
+ );
+ adapters.push(id.0);
+ }
+ }
+ }
+ };
+
+ #[cfg(vulkan)]
+ map((&instance.vulkan, Backend::Vulkan, "Vulkan", backend::Vulkan::hub)),
+ #[cfg(metal)]
+ map((&instance.metal, Backend::Metal, "Metal", backend::Metal::hub)),
+ #[cfg(dx12)]
+ map((&instance.dx12, Backend::Dx12, "Dx12", backend::Dx12::hub)),
+ #[cfg(dx11)]
+ map((&instance.dx11, Backend::Dx11, "Dx11", backend::Dx11::hub)),
+ #[cfg(gl)]
+ map((&instance.gl, Backend::Gl, "GL", backend::Gl::hub)),
+ }
+
+ adapters
+ }
+
+ pub fn request_adapter(
+ &self,
+ desc: &RequestAdapterOptions,
+ inputs: AdapterInputs<Input<G, AdapterId>>,
+ ) -> Result<AdapterId, RequestAdapterError> {
+ span!(_guard, INFO, "Instance::pick_adapter");
+
+ let instance = &self.instance;
+ let mut token = Token::root();
+ let (surface_guard, mut token) = self.surfaces.read(&mut token);
+ let compatible_surface = desc
+ .compatible_surface
+ .map(|id| {
+ surface_guard
+ .get(id)
+ .map_err(|_| RequestAdapterError::InvalidSurface(id))
+ })
+ .transpose()?;
+ let mut device_types = Vec::new();
+
+ let mut id_vulkan = inputs.find(Backend::Vulkan);
+ let mut id_metal = inputs.find(Backend::Metal);
+ let mut id_dx12 = inputs.find(Backend::Dx12);
+ let mut id_dx11 = inputs.find(Backend::Dx11);
+ let mut id_gl = inputs.find(Backend::Gl);
+
+ backends_map! {
+ let map = |(instance_backend, id_backend, surface_backend)| {
+ match instance_backend {
+ Some(ref inst) if id_backend.is_some() => {
+ let mut adapters = inst.enumerate_adapters();
+ if let Some(surface_backend) = compatible_surface.and_then(surface_backend) {
+ adapters.retain(|a| {
+ a.queue_families
+ .iter()
+ .find(|qf| qf.queue_type().supports_graphics())
+ .map_or(false, |qf| surface_backend.supports_queue_family(qf))
+ });
+ }
+ device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
+ adapters
+ }
+ _ => Vec::new(),
+ }
+ };
+
+ // NB: The internal function definitions are a workaround for Rust
+ // being weird with lifetimes for closure literals...
+ #[cfg(vulkan)]
+ let adapters_vk = map((&instance.vulkan, &id_vulkan, {
+ fn surface_vulkan(surf: &Surface) -> Option<&GfxSurface<backend::Vulkan>> {
+ surf.vulkan.as_ref()
+ }
+ surface_vulkan
+ }));
+ #[cfg(metal)]
+ let adapters_mtl = map((&instance.metal, &id_metal, {
+ fn surface_metal(surf: &Surface) -> Option<&GfxSurface<backend::Metal>> {
+ surf.metal.as_ref()
+ }
+ surface_metal
+ }));
+ #[cfg(dx12)]
+ let adapters_dx12 = map((&instance.dx12, &id_dx12, {
+ fn surface_dx12(surf: &Surface) -> Option<&GfxSurface<backend::Dx12>> {
+ surf.dx12.as_ref()
+ }
+ surface_dx12
+ }));
+ #[cfg(dx11)]
+ let adapters_dx11 = map((&instance.dx11, &id_dx11, {
+ fn surface_dx11(surf: &Surface) -> Option<&GfxSurface<backend::Dx11>> {
+ surf.dx11.as_ref()
+ }
+ surface_dx11
+ }));
+ #[cfg(gl)]
+ let adapters_gl = map((&instance.gl, &id_gl, {
+ fn surface_gl(surf: &Surface) -> Option<&GfxSurface<backend::Gl>> {
+ surf.gl.as_ref()
+ }
+ surface_gl
+ }));
+ }
+
+ if device_types.is_empty() {
+ return Err(RequestAdapterError::NotFound);
+ }
+
+ let (mut integrated, mut discrete, mut virt, mut other) = (None, None, None, None);
+
+ for (i, ty) in device_types.into_iter().enumerate() {
+ match ty {
+ hal::adapter::DeviceType::IntegratedGpu => {
+ integrated = integrated.or(Some(i));
+ }
+ hal::adapter::DeviceType::DiscreteGpu => {
+ discrete = discrete.or(Some(i));
+ }
+ hal::adapter::DeviceType::VirtualGpu => {
+ virt = virt.or(Some(i));
+ }
+ _ => {
+ other = other.or(Some(i));
+ }
+ }
+ }
+
+ let preferred_gpu = match desc.power_preference {
+ PowerPreference::LowPower => integrated.or(other).or(discrete).or(virt),
+ PowerPreference::HighPerformance => discrete.or(other).or(integrated).or(virt),
+ };
+
+ let mut selected = preferred_gpu.unwrap_or(0);
+
+ backends_map! {
+ let map = |(info_adapter, id_backend, mut adapters_backend, backend_hub)| {
+ if selected < adapters_backend.len() {
+ let adapter = Adapter::new(adapters_backend.swap_remove(selected));
+ tracing::info!("Adapter {} {:?}", info_adapter, adapter.raw.info);
+ let id = backend_hub(self).adapters.register_identity(
+ id_backend.take().unwrap(),
+ adapter,
+ &mut token,
+ );
+ return Ok(id.0);
+ }
+ selected -= adapters_backend.len();
+ };
+
+ #[cfg(vulkan)]
+ map(("Vulkan", &mut id_vulkan, adapters_vk, backend::Vulkan::hub)),
+ #[cfg(metal)]
+ map(("Metal", &mut id_metal, adapters_mtl, backend::Metal::hub)),
+ #[cfg(dx12)]
+ map(("Dx12", &mut id_dx12, adapters_dx12, backend::Dx12::hub)),
+ #[cfg(dx11)]
+ map(("Dx11", &mut id_dx11, adapters_dx11, backend::Dx11::hub)),
+ #[cfg(gl)]
+ map(("GL", &mut id_dx11, adapters_gl, backend::Gl::hub)),
+ }
+
+ let _ = (
+ selected,
+ id_vulkan.take(),
+ id_metal.take(),
+ id_dx12.take(),
+ id_dx11.take(),
+ id_gl.take(),
+ );
+ tracing::warn!("Some adapters are present, but enumerating them failed!");
+ Err(RequestAdapterError::NotFound)
+ }
+
+ pub fn adapter_get_info<B: GfxBackend>(
+ &self,
+ adapter_id: AdapterId,
+ ) -> Result<AdapterInfo, InvalidAdapter> {
+ span!(_guard, INFO, "Adapter::get_info");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (adapter_guard, _) = hub.adapters.read(&mut token);
+ adapter_guard
+ .get(adapter_id)
+ .map(|adapter| AdapterInfo::from_gfx(adapter.raw.info.clone(), adapter_id.backend()))
+ .map_err(|_| InvalidAdapter)
+ }
+
+ pub fn adapter_features<B: GfxBackend>(
+ &self,
+ adapter_id: AdapterId,
+ ) -> Result<wgt::Features, InvalidAdapter> {
+ span!(_guard, INFO, "Adapter::features");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (adapter_guard, _) = hub.adapters.read(&mut token);
+ adapter_guard
+ .get(adapter_id)
+ .map(|adapter| adapter.features)
+ .map_err(|_| InvalidAdapter)
+ }
+
+ pub fn adapter_limits<B: GfxBackend>(
+ &self,
+ adapter_id: AdapterId,
+ ) -> Result<wgt::Limits, InvalidAdapter> {
+ span!(_guard, INFO, "Adapter::limits");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (adapter_guard, _) = hub.adapters.read(&mut token);
+ adapter_guard
+ .get(adapter_id)
+ .map(|adapter| adapter.limits.clone())
+ .map_err(|_| InvalidAdapter)
+ }
+
+ pub fn adapter_drop<B: GfxBackend>(&self, adapter_id: AdapterId) {
+ span!(_guard, INFO, "Adapter::drop");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+ let (mut adapter_guard, _) = hub.adapters.write(&mut token);
+
+ match adapter_guard.get_mut(adapter_id) {
+ Ok(adapter) => {
+ if adapter.life_guard.ref_count.take().unwrap().load() == 1 {
+ hub.adapters
+ .unregister_locked(adapter_id, &mut *adapter_guard);
+ }
+ }
+ Err(_) => {
+ hub.adapters.free_id(adapter_id);
+ }
+ }
+ }
+}
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn adapter_request_device<B: GfxBackend>(
+ &self,
+ adapter_id: AdapterId,
+ desc: &DeviceDescriptor,
+ trace_path: Option<&std::path::Path>,
+ id_in: Input<G, DeviceId>,
+ ) -> (DeviceId, Option<RequestDeviceError>) {
+ span!(_guard, INFO, "Adapter::request_device");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let error = loop {
+ let (adapter_guard, mut token) = hub.adapters.read(&mut token);
+ let adapter = match adapter_guard.get(adapter_id) {
+ Ok(adapter) => adapter,
+ Err(_) => break RequestDeviceError::InvalidAdapter,
+ };
+ let device = match adapter.create_device(adapter_id, desc, trace_path) {
+ Ok(device) => device,
+ Err(e) => break e,
+ };
+ let id = hub.devices.register_identity(id_in, device, &mut token);
+ return (id.0, None);
+ };
+
+ let id = hub
+ .devices
+ .register_error(id_in, desc.label.borrow_or_default(), &mut token);
+ (id, Some(error))
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/lib.rs b/gfx/wgpu/wgpu-core/src/lib.rs
new file mode 100644
index 0000000000..68bb1738ab
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/lib.rs
@@ -0,0 +1,271 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#![warn(
+ trivial_casts,
+ trivial_numeric_casts,
+ unused_extern_crates,
+ unused_qualifications
+)]
+// We use loops for getting early-out of scope without closures.
+#![allow(clippy::never_loop)]
+
+#[macro_use]
+mod macros;
+
+pub mod backend {
+ pub use gfx_backend_empty::Backend as Empty;
+
+ #[cfg(dx11)]
+ pub use gfx_backend_dx11::Backend as Dx11;
+ #[cfg(dx12)]
+ pub use gfx_backend_dx12::Backend as Dx12;
+ #[cfg(gl)]
+ pub use gfx_backend_gl::Backend as Gl;
+ #[cfg(metal)]
+ pub use gfx_backend_metal::Backend as Metal;
+ #[cfg(vulkan)]
+ pub use gfx_backend_vulkan::Backend as Vulkan;
+}
+
+pub mod binding_model;
+pub mod command;
+mod conv;
+pub mod device;
+pub mod hub;
+pub mod id;
+pub mod instance;
+pub mod pipeline;
+pub mod resource;
+pub mod swap_chain;
+mod track;
+mod validation;
+
+#[cfg(test)]
+use loom::sync::atomic;
+#[cfg(not(test))]
+use std::sync::atomic;
+
+use atomic::{AtomicUsize, Ordering};
+
+use std::{borrow::Cow, os::raw::c_char, ptr};
+
+pub const MAX_BIND_GROUPS: usize = 8;
+
+type SubmissionIndex = usize;
+type Index = u32;
+type Epoch = u32;
+
+pub type RawString = *const c_char;
+pub type Label<'a> = Option<Cow<'a, str>>;
+
+trait LabelHelpers<'a> {
+ fn to_string_or_default(&'a self) -> String;
+ fn borrow_or_default(&'a self) -> &'a str;
+}
+impl<'a> LabelHelpers<'a> for Label<'a> {
+ fn borrow_or_default(&'a self) -> &'a str {
+ self.as_ref().map(|cow| cow.as_ref()).unwrap_or("")
+ }
+ fn to_string_or_default(&'a self) -> String {
+ self.as_ref()
+ .map(|cow| cow.as_ref())
+ .unwrap_or("")
+ .to_string()
+ }
+}
+
+/// Reference count object that is 1:1 with each reference.
+#[derive(Debug)]
+struct RefCount(ptr::NonNull<AtomicUsize>);
+
+unsafe impl Send for RefCount {}
+unsafe impl Sync for RefCount {}
+
+impl RefCount {
+ const MAX: usize = 1 << 24;
+
+ fn load(&self) -> usize {
+ unsafe { self.0.as_ref() }.load(Ordering::Acquire)
+ }
+
+ /// This works like `std::mem::drop`, except that it returns a boolean which is true if and only
+ /// if we deallocated the underlying memory, i.e. if this was the last clone of this `RefCount`
+ /// to be dropped. This is useful for loom testing because it allows us to verify that we
+ /// deallocated the underlying memory exactly once.
+ #[cfg(test)]
+ fn rich_drop_outer(self) -> bool {
+ unsafe { std::mem::ManuallyDrop::new(self).rich_drop_inner() }
+ }
+
+ /// This function exists to allow `Self::rich_drop_outer` and `Drop::drop` to share the same
+ /// logic. To use this safely from outside of `Drop::drop`, the calling function must move
+ /// `Self` into a `ManuallyDrop`.
+ unsafe fn rich_drop_inner(&mut self) -> bool {
+ if self.0.as_ref().fetch_sub(1, Ordering::AcqRel) == 1 {
+ let _ = Box::from_raw(self.0.as_ptr());
+ true
+ } else {
+ false
+ }
+ }
+}
+
+impl Clone for RefCount {
+ fn clone(&self) -> Self {
+ let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::AcqRel);
+ assert!(old_size < Self::MAX);
+ Self(self.0)
+ }
+}
+
+impl Drop for RefCount {
+ fn drop(&mut self) {
+ unsafe {
+ self.rich_drop_inner();
+ }
+ }
+}
+
+#[cfg(test)]
+#[test]
+fn loom() {
+ loom::model(move || {
+ let bx = Box::new(AtomicUsize::new(1));
+ let ref_count_main = ptr::NonNull::new(Box::into_raw(bx)).map(RefCount).unwrap();
+ let ref_count_spawned = ref_count_main.clone();
+
+ let join_handle = loom::thread::spawn(move || {
+ let _ = ref_count_spawned.clone();
+ ref_count_spawned.rich_drop_outer()
+ });
+
+ let dropped_in_main = ref_count_main.rich_drop_outer();
+ let dropped_in_spawned = join_handle.join().unwrap();
+ assert_ne!(
+ dropped_in_main, dropped_in_spawned,
+ "must drop exactly once"
+ );
+ });
+}
+
+/// Reference count object that tracks multiple references.
+/// Unlike `RefCount`, it's manually inc()/dec() called.
+#[derive(Debug)]
+struct MultiRefCount(ptr::NonNull<AtomicUsize>);
+
+unsafe impl Send for MultiRefCount {}
+unsafe impl Sync for MultiRefCount {}
+
+impl MultiRefCount {
+ fn new() -> Self {
+ let bx = Box::new(AtomicUsize::new(1));
+ let ptr = Box::into_raw(bx);
+ Self(unsafe { ptr::NonNull::new_unchecked(ptr) })
+ }
+
+ fn inc(&self) {
+ unsafe { self.0.as_ref() }.fetch_add(1, Ordering::AcqRel);
+ }
+
+ fn dec_and_check_empty(&self) -> bool {
+ unsafe { self.0.as_ref() }.fetch_sub(1, Ordering::AcqRel) == 1
+ }
+}
+
+impl Drop for MultiRefCount {
+ fn drop(&mut self) {
+ let _ = unsafe { Box::from_raw(self.0.as_ptr()) };
+ }
+}
+
+#[derive(Debug)]
+pub struct LifeGuard {
+ ref_count: Option<RefCount>,
+ submission_index: AtomicUsize,
+ #[cfg(debug_assertions)]
+ pub(crate) label: String,
+}
+
+impl LifeGuard {
+ #[allow(unused_variables)]
+ fn new(label: &str) -> Self {
+ let bx = Box::new(AtomicUsize::new(1));
+ Self {
+ ref_count: ptr::NonNull::new(Box::into_raw(bx)).map(RefCount),
+ submission_index: AtomicUsize::new(0),
+ #[cfg(debug_assertions)]
+ label: label.to_string(),
+ }
+ }
+
+ fn add_ref(&self) -> RefCount {
+ self.ref_count.clone().unwrap()
+ }
+
+ /// Returns `true` if the resource is still needed by the user.
+ fn use_at(&self, submit_index: SubmissionIndex) -> bool {
+ self.submission_index.store(submit_index, Ordering::Release);
+ self.ref_count.is_some()
+ }
+}
+
+#[derive(Clone, Debug)]
+struct Stored<T> {
+ value: id::Valid<T>,
+ ref_count: RefCount,
+}
+
+#[derive(Clone, Copy, Debug)]
+struct PrivateFeatures {
+ shader_validation: bool,
+ anisotropic_filtering: bool,
+ texture_d24: bool,
+ texture_d24_s8: bool,
+}
+
+#[macro_export]
+macro_rules! gfx_select {
+ ($id:expr => $global:ident.$method:ident( $($param:expr),* )) => {
+ // Note: For some reason the cfg aliases defined in build.rs don't succesfully apply in this
+ // macro so we must specify their equivalents manually
+ match $id.backend() {
+ #[cfg(any(not(any(target_os = "ios", target_os = "macos")), feature = "gfx-backend-vulkan"))]
+ wgt::Backend::Vulkan => $global.$method::<$crate::backend::Vulkan>( $($param),* ),
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ wgt::Backend::Metal => $global.$method::<$crate::backend::Metal>( $($param),* ),
+ #[cfg(windows)]
+ wgt::Backend::Dx12 => $global.$method::<$crate::backend::Dx12>( $($param),* ),
+ #[cfg(windows)]
+ wgt::Backend::Dx11 => $global.$method::<$crate::backend::Dx11>( $($param),* ),
+ //#[cfg(all(unix, not(any(target_os = "ios", target_os = "macos"))))]
+ //wgt::Backend::Gl => $global.$method::<$crate::backend::Gl>( $($param),+ ),
+ other => panic!("Unexpected backend {:?}", other),
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! span {
+ ($guard_name:tt, $level:ident, $name:expr, $($fields:tt)*) => {
+ let span = tracing::span!(tracing::Level::$level, $name, $($fields)*);
+ let $guard_name = span.enter();
+ };
+ ($guard_name:tt, $level:ident, $name:expr) => {
+ let span = tracing::span!(tracing::Level::$level, $name);
+ let $guard_name = span.enter();
+ };
+}
+
+/// Fast hash map used internally.
+type FastHashMap<K, V> =
+ std::collections::HashMap<K, V, std::hash::BuildHasherDefault<fxhash::FxHasher>>;
+/// Fast hash set used internally.
+type FastHashSet<K> = std::collections::HashSet<K, std::hash::BuildHasherDefault<fxhash::FxHasher>>;
+
+#[test]
+fn test_default_limits() {
+ let limits = wgt::Limits::default();
+ assert!(limits.max_bind_groups <= MAX_BIND_GROUPS as u32);
+}
diff --git a/gfx/wgpu/wgpu-core/src/macros.rs b/gfx/wgpu/wgpu-core/src/macros.rs
new file mode 100644
index 0000000000..47485e6d42
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/macros.rs
@@ -0,0 +1,226 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+macro_rules! backends_map {
+ // one let statement per backend with mapped data
+ (
+ let map = |$backend:pat| $map:block;
+ $(
+ #[cfg($backend_cfg:meta)] let $pat:pat = map($expr:expr);
+ )*
+ ) => {
+ $(
+ #[cfg($backend_cfg)]
+ let $pat = {
+ let $backend = $expr;
+ $map
+ };
+ )*
+ };
+
+ // one block statement per backend with mapped data
+ (
+ let map = |$backend:pat| $map:block;
+ $(
+ #[cfg($backend_cfg:meta)] map($expr:expr),
+ )*
+ ) => {
+ $(
+ #[cfg($backend_cfg)]
+ {
+ let $backend = $expr;
+ $map
+ }
+ )*
+ };
+
+ // a struct constructor with one field per backend with mapped data
+ (
+ let map = |$backend:pat| $map:block;
+ $Struct:ident {
+ $(
+ #[cfg($backend_cfg:meta)] $ident:ident : map($expr:expr),
+ )*
+ }
+ ) => {
+ $Struct {
+ $(
+ #[cfg($backend_cfg)]
+ $ident: {
+ let $backend = $expr;
+ $map
+ },
+ )*
+ }
+ };
+}
+
+#[test]
+fn test_backend_macro() {
+ struct Foo {
+ #[cfg(any(
+ windows,
+ all(unix, not(any(target_os = "ios", target_os = "macos"))),
+ feature = "gfx-backend-vulkan",
+ ))]
+ vulkan: u32,
+
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ metal: u32,
+
+ #[cfg(windows)]
+ dx12: u32,
+
+ #[cfg(windows)]
+ dx11: u32,
+ }
+
+ // test struct construction
+ let test_foo: Foo = backends_map! {
+ let map = |init| { init - 100 };
+ Foo {
+ #[cfg(vulkan)] vulkan: map(101),
+ #[cfg(metal)] metal: map(102),
+ #[cfg(dx12)] dx12: map(103),
+ #[cfg(dx11)] dx11: map(104),
+ }
+ };
+
+ let mut vec = Vec::new();
+
+ // test basic statement-per-backend
+ backends_map! {
+ let map = |(id, chr)| {
+ vec.push((id, chr));
+ };
+
+ #[cfg(vulkan)]
+ map((test_foo.vulkan, 'a')),
+
+ #[cfg(metal)]
+ map((test_foo.metal, 'b')),
+
+ #[cfg(dx12)]
+ map((test_foo.dx12, 'c')),
+
+ #[cfg(dx11)]
+ map((test_foo.dx11, 'd')),
+ }
+
+ #[cfg(any(
+ windows,
+ all(unix, not(any(target_os = "ios", target_os = "macos"))),
+ feature = "gfx-backend-vulkan",
+ ))]
+ assert!(vec.contains(&(1, 'a')));
+
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ assert!(vec.contains(&(2, 'b')));
+
+ #[cfg(windows)]
+ assert!(vec.contains(&(3, 'c')));
+
+ #[cfg(windows)]
+ assert!(vec.contains(&(4, 'd')));
+
+ // test complex statement-per-backend
+ backends_map! {
+ let map = |(id, pred, code)| {
+ if pred(id) {
+ code();
+ }
+ };
+
+ #[cfg(vulkan)]
+ map((test_foo.vulkan, |v| v == 1, || println!("vulkan"))),
+
+ #[cfg(metal)]
+ map((test_foo.metal, |v| v == 2, || println!("metal"))),
+
+ #[cfg(dx12)]
+ map((test_foo.dx12, |v| v == 3, || println!("dx12"))),
+
+ #[cfg(dx11)]
+ map((test_foo.dx11, |v| v == 4, || println!("dx11"))),
+ }
+
+ // test struct construction 2
+ let test_foo_2: Foo = Foo {
+ #[cfg(vulkan)]
+ vulkan: 1,
+
+ #[cfg(metal)]
+ metal: 2,
+
+ #[cfg(dx12)]
+ dx12: 3,
+
+ #[cfg(dx11)]
+ dx11: 4,
+ };
+
+ #[cfg(vulkan)]
+ let var_vulkan = test_foo_2.vulkan;
+
+ #[cfg(metal)]
+ let var_metal = test_foo_2.metal;
+
+ #[cfg(dx12)]
+ let var_dx12 = test_foo_2.dx12;
+
+ #[cfg(dx11)]
+ let var_dx11 = test_foo_2.dx11;
+
+ backends_map! {
+ let map = |(id, chr, var)| { (chr, id, var) };
+
+ #[cfg(vulkan)]
+ let var_vulkan = map((test_foo_2.vulkan, 'a', var_vulkan));
+
+ #[cfg(metal)]
+ let var_metal = map((test_foo_2.metal, 'b', var_metal));
+
+ #[cfg(dx12)]
+ let var_dx12 = map((test_foo_2.dx12, 'c', var_dx12));
+
+ #[cfg(dx11)]
+ let var_dx11 = map((test_foo_2.dx11, 'd', var_dx11));
+ }
+
+ #[cfg(vulkan)]
+ {
+ println!("backend int: {:?}", var_vulkan);
+ }
+
+ #[cfg(metal)]
+ {
+ println!("backend int: {:?}", var_metal);
+ }
+
+ #[cfg(dx12)]
+ {
+ println!("backend int: {:?}", var_dx12);
+ }
+
+ #[cfg(dx11)]
+ {
+ println!("backend int: {:?}", var_dx11);
+ }
+
+ #[cfg(any(
+ windows,
+ all(unix, not(any(target_os = "ios", target_os = "macos"))),
+ feature = "gfx-backend-vulkan",
+ ))]
+ let _ = var_vulkan;
+
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ let _ = var_metal;
+
+ #[cfg(windows)]
+ let _ = var_dx12;
+
+ #[cfg(windows)]
+ let _ = var_dx11;
+}
diff --git a/gfx/wgpu/wgpu-core/src/pipeline.rs b/gfx/wgpu/wgpu-core/src/pipeline.rs
new file mode 100644
index 0000000000..c4ca0e1407
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/pipeline.rs
@@ -0,0 +1,254 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError},
+ device::{DeviceError, RenderPassContext},
+ hub::Resource,
+ id::{DeviceId, PipelineLayoutId, ShaderModuleId},
+ validation::StageError,
+ Label, LifeGuard, Stored,
+};
+use std::borrow::Cow;
+use thiserror::Error;
+use wgt::{BufferAddress, IndexFormat, InputStepMode};
+
+#[derive(Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum ShaderModuleSource<'a> {
+ SpirV(Cow<'a, [u32]>),
+ Wgsl(Cow<'a, str>),
+ // Unable to serialize with `naga::Module` in here:
+ // requires naga serialization feature.
+ //Naga(naga::Module),
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct ShaderModuleDescriptor<'a> {
+ pub label: Label<'a>,
+ pub source: ShaderModuleSource<'a>,
+}
+
+#[derive(Debug)]
+pub struct ShaderModule<B: hal::Backend> {
+ pub(crate) raw: B::ShaderModule,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) module: Option<naga::Module>,
+ #[cfg(debug_assertions)]
+ pub(crate) label: String,
+}
+
+impl<B: hal::Backend> Resource for ShaderModule<B> {
+ const TYPE: &'static str = "ShaderModule";
+
+ fn life_guard(&self) -> &LifeGuard {
+ unreachable!()
+ }
+
+ fn label(&self) -> &str {
+ #[cfg(debug_assertions)]
+ return &self.label;
+ #[cfg(not(debug_assertions))]
+ return "";
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateShaderModuleError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error(transparent)]
+ Validation(#[from] naga::proc::ValidationError),
+}
+
+/// Describes a programmable pipeline stage.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct ProgrammableStageDescriptor<'a> {
+ /// The compiled shader module for this stage.
+ pub module: ShaderModuleId,
+ /// The name of the entry point in the compiled shader. There must be a function that returns
+ /// void with this name in the shader.
+ pub entry_point: Cow<'a, str>,
+}
+
+/// Number of implicit bind groups derived at pipeline creation.
+pub type ImplicitBindGroupCount = u8;
+
+#[derive(Clone, Debug, Error)]
+pub enum ImplicitLayoutError {
+ #[error("missing IDs for deriving {0} bind groups")]
+ MissingIds(ImplicitBindGroupCount),
+ #[error("unable to reflect the shader {0:?} interface")]
+ ReflectionError(wgt::ShaderStage),
+ #[error(transparent)]
+ BindGroup(#[from] CreateBindGroupLayoutError),
+ #[error(transparent)]
+ Pipeline(#[from] CreatePipelineLayoutError),
+}
+
+/// Describes a compute pipeline.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct ComputePipelineDescriptor<'a> {
+ pub label: Label<'a>,
+ /// The layout of bind groups for this pipeline.
+ pub layout: Option<PipelineLayoutId>,
+ /// The compiled compute stage and its entry point.
+ pub compute_stage: ProgrammableStageDescriptor<'a>,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateComputePipelineError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("pipeline layout is invalid")]
+ InvalidLayout,
+ #[error("unable to derive an implicit layout")]
+ Implicit(#[from] ImplicitLayoutError),
+ #[error(transparent)]
+ Stage(StageError),
+}
+
+#[derive(Debug)]
+pub struct ComputePipeline<B: hal::Backend> {
+ pub(crate) raw: B::ComputePipeline,
+ pub(crate) layout_id: Stored<PipelineLayoutId>,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) life_guard: LifeGuard,
+}
+
+impl<B: hal::Backend> Resource for ComputePipeline<B> {
+ const TYPE: &'static str = "ComputePipeline";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+/// Describes how the vertex buffer is interpreted.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct VertexBufferDescriptor<'a> {
+ /// The stride, in bytes, between elements of this buffer.
+ pub stride: BufferAddress,
+ /// How often this vertex buffer is "stepped" forward.
+ pub step_mode: InputStepMode,
+ /// The list of attributes which comprise a single vertex.
+ pub attributes: Cow<'a, [wgt::VertexAttributeDescriptor]>,
+}
+
+/// Describes vertex input state for a render pipeline.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct VertexStateDescriptor<'a> {
+ /// The format of any index buffers used with this pipeline.
+ pub index_format: IndexFormat,
+ /// The format of any vertex buffers used with this pipeline.
+ pub vertex_buffers: Cow<'a, [VertexBufferDescriptor<'a>]>,
+}
+
+/// Describes a render (graphics) pipeline.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct RenderPipelineDescriptor<'a> {
+ pub label: Label<'a>,
+ /// The layout of bind groups for this pipeline.
+ pub layout: Option<PipelineLayoutId>,
+ /// The compiled vertex stage and its entry point.
+ pub vertex_stage: ProgrammableStageDescriptor<'a>,
+ /// The compiled fragment stage and its entry point, if any.
+ pub fragment_stage: Option<ProgrammableStageDescriptor<'a>>,
+ /// The rasterization process for this pipeline.
+ pub rasterization_state: Option<wgt::RasterizationStateDescriptor>,
+ /// The primitive topology used to interpret vertices.
+ pub primitive_topology: wgt::PrimitiveTopology,
+ /// The effect of draw calls on the color aspect of the output target.
+ pub color_states: Cow<'a, [wgt::ColorStateDescriptor]>,
+ /// The effect of draw calls on the depth and stencil aspects of the output target, if any.
+ pub depth_stencil_state: Option<wgt::DepthStencilStateDescriptor>,
+ /// The vertex input state for this pipeline.
+ pub vertex_state: VertexStateDescriptor<'a>,
+ /// The number of samples calculated per pixel (for MSAA). For non-multisampled textures,
+ /// this should be `1`
+ pub sample_count: u32,
+ /// Bitmask that restricts the samples of a pixel modified by this pipeline. All samples
+ /// can be enabled using the value `!0`
+ pub sample_mask: u32,
+ /// When enabled, produces another sample mask per pixel based on the alpha output value, that
+ /// is ANDed with the sample_mask and the primitive coverage to restrict the set of samples
+ /// affected by a primitive.
+ ///
+ /// The implicit mask produced for alpha of zero is guaranteed to be zero, and for alpha of one
+ /// is guaranteed to be all 1-s.
+ pub alpha_to_coverage_enabled: bool,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateRenderPipelineError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("pipelie layout is invalid")]
+ InvalidLayout,
+ #[error("unable to derive an implicit layout")]
+ Implicit(#[from] ImplicitLayoutError),
+ #[error("missing output at index {index}")]
+ MissingOutput { index: u8 },
+ #[error("incompatible output format at index {index}")]
+ IncompatibleOutputFormat { index: u8 },
+ #[error("invalid sample count {0}")]
+ InvalidSampleCount(u32),
+ #[error("vertex buffer {index} stride {stride} does not respect `VERTEX_STRIDE_ALIGNMENT`")]
+ UnalignedVertexStride { index: u32, stride: BufferAddress },
+ #[error("vertex attribute at location {location} has invalid offset {offset}")]
+ InvalidVertexAttributeOffset {
+ location: wgt::ShaderLocation,
+ offset: BufferAddress,
+ },
+ #[error("missing required device features {0:?}")]
+ MissingFeature(wgt::Features),
+ #[error("error in stage {flag:?}")]
+ Stage {
+ flag: wgt::ShaderStage,
+ #[source]
+ error: StageError,
+ },
+}
+
+bitflags::bitflags! {
+ #[repr(transparent)]
+ pub struct PipelineFlags: u32 {
+ const BLEND_COLOR = 1;
+ const STENCIL_REFERENCE = 2;
+ const WRITES_DEPTH_STENCIL = 4;
+ }
+}
+
+#[derive(Debug)]
+pub struct RenderPipeline<B: hal::Backend> {
+ pub(crate) raw: B::GraphicsPipeline,
+ pub(crate) layout_id: Stored<PipelineLayoutId>,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) pass_context: RenderPassContext,
+ pub(crate) flags: PipelineFlags,
+ pub(crate) index_format: IndexFormat,
+ pub(crate) vertex_strides: Vec<(BufferAddress, InputStepMode)>,
+ pub(crate) life_guard: LifeGuard,
+}
+
+impl<B: hal::Backend> Resource for RenderPipeline<B> {
+ const TYPE: &'static str = "RenderPipeline";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/resource.rs b/gfx/wgpu/wgpu-core/src/resource.rs
new file mode 100644
index 0000000000..16319dd27e
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/resource.rs
@@ -0,0 +1,447 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{
+ device::{alloc::MemoryBlock, DeviceError, HostMap},
+ hub::Resource,
+ id::{DeviceId, SwapChainId, TextureId},
+ track::{TextureSelector, DUMMY_SELECTOR},
+ validation::MissingBufferUsageError,
+ Label, LifeGuard, RefCount, Stored,
+};
+
+use thiserror::Error;
+
+use std::{
+ borrow::Borrow,
+ num::{NonZeroU32, NonZeroU8},
+ ops::Range,
+ ptr::NonNull,
+};
+
+bitflags::bitflags! {
+ /// The internal enum mirrored from `BufferUsage`. The values don't have to match!
+ pub struct BufferUse: u32 {
+ const EMPTY = 0;
+ const MAP_READ = 1;
+ const MAP_WRITE = 2;
+ const COPY_SRC = 4;
+ const COPY_DST = 8;
+ const INDEX = 16;
+ const VERTEX = 32;
+ const UNIFORM = 64;
+ const STORAGE_LOAD = 128;
+ const STORAGE_STORE = 256;
+ const INDIRECT = 512;
+ /// The combination of all read-only usages.
+ const READ_ALL = Self::MAP_READ.bits | Self::COPY_SRC.bits |
+ Self::INDEX.bits | Self::VERTEX.bits | Self::UNIFORM.bits |
+ Self::STORAGE_LOAD.bits | Self::INDIRECT.bits;
+ /// The combination of all write-only and read-write usages.
+ const WRITE_ALL = Self::MAP_WRITE.bits | Self::COPY_DST.bits | Self::STORAGE_STORE.bits;
+ /// The combination of all usages that the are guaranteed to be be ordered by the hardware.
+ /// If a usage is not ordered, then even if it doesn't change between draw calls, there
+ /// still need to be pipeline barriers inserted for synchronization.
+ const ORDERED = Self::READ_ALL.bits | Self::MAP_WRITE.bits | Self::COPY_DST.bits;
+ }
+}
+
+bitflags::bitflags! {
+ /// The internal enum mirrored from `TextureUsage`. The values don't have to match!
+ pub struct TextureUse: u32 {
+ const EMPTY = 0;
+ const COPY_SRC = 1;
+ const COPY_DST = 2;
+ const SAMPLED = 4;
+ const ATTACHMENT_READ = 8;
+ const ATTACHMENT_WRITE = 16;
+ const STORAGE_LOAD = 32;
+ const STORAGE_STORE = 48;
+ /// The combination of all read-only usages.
+ const READ_ALL = Self::COPY_SRC.bits | Self::SAMPLED.bits | Self::ATTACHMENT_READ.bits | Self::STORAGE_LOAD.bits;
+ /// The combination of all write-only and read-write usages.
+ const WRITE_ALL = Self::COPY_DST.bits | Self::ATTACHMENT_WRITE.bits | Self::STORAGE_STORE.bits;
+ /// The combination of all usages that the are guaranteed to be be ordered by the hardware.
+ /// If a usage is not ordered, then even if it doesn't change between draw calls, there
+ /// still need to be pipeline barriers inserted for synchronization.
+ const ORDERED = Self::READ_ALL.bits | Self::COPY_DST.bits | Self::ATTACHMENT_WRITE.bits;
+ const UNINITIALIZED = 0xFFFF;
+ }
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub enum BufferMapAsyncStatus {
+ Success,
+ Error,
+ Unknown,
+ ContextLost,
+}
+
+#[derive(Debug)]
+pub(crate) enum BufferMapState<B: hal::Backend> {
+ /// Mapped at creation.
+ Init {
+ ptr: NonNull<u8>,
+ stage_buffer: B::Buffer,
+ stage_memory: MemoryBlock<B>,
+ needs_flush: bool,
+ },
+ /// Waiting for GPU to be done before mapping
+ Waiting(BufferPendingMapping),
+ /// Mapped
+ Active {
+ ptr: NonNull<u8>,
+ sub_range: hal::buffer::SubRange,
+ host: HostMap,
+ },
+ /// Not mapped
+ Idle,
+}
+
+unsafe impl<B: hal::Backend> Send for BufferMapState<B> {}
+unsafe impl<B: hal::Backend> Sync for BufferMapState<B> {}
+
+pub type BufferMapCallback = unsafe extern "C" fn(status: BufferMapAsyncStatus, userdata: *mut u8);
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct BufferMapOperation {
+ pub host: HostMap,
+ pub callback: BufferMapCallback,
+ pub user_data: *mut u8,
+}
+
+//TODO: clarify if/why this is needed here
+unsafe impl Send for BufferMapOperation {}
+unsafe impl Sync for BufferMapOperation {}
+
+impl BufferMapOperation {
+ pub(crate) fn call_error(self) {
+ tracing::error!("wgpu_buffer_map_async failed: buffer mapping is pending");
+ unsafe {
+ (self.callback)(BufferMapAsyncStatus::Error, self.user_data);
+ }
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum BufferAccessError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("buffer is invalid")]
+ Invalid,
+ #[error("buffer is destroyed")]
+ Destroyed,
+ #[error("buffer is already mapped")]
+ AlreadyMapped,
+ #[error(transparent)]
+ MissingBufferUsage(#[from] MissingBufferUsageError),
+ #[error("buffer is not mapped")]
+ NotMapped,
+ #[error("buffer map range does not respect `COPY_BUFFER_ALIGNMENT`")]
+ UnalignedRange,
+}
+
+#[derive(Debug)]
+pub(crate) struct BufferPendingMapping {
+ pub range: Range<wgt::BufferAddress>,
+ pub op: BufferMapOperation,
+ // hold the parent alive while the mapping is active
+ pub parent_ref_count: RefCount,
+}
+
+pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
+
+#[derive(Debug)]
+pub struct Buffer<B: hal::Backend> {
+ pub(crate) raw: Option<(B::Buffer, MemoryBlock<B>)>,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) usage: wgt::BufferUsage,
+ pub(crate) size: wgt::BufferAddress,
+ pub(crate) full_range: (),
+ pub(crate) sync_mapped_writes: Option<hal::memory::Segment>,
+ pub(crate) life_guard: LifeGuard,
+ pub(crate) map_state: BufferMapState<B>,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateBufferError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("failed to map buffer while creating: {0}")]
+ AccessError(#[from] BufferAccessError),
+ #[error("buffers that are mapped at creation have to be aligned to `COPY_BUFFER_ALIGNMENT`")]
+ UnalignedSize,
+ #[error("`MAP` usage can only be combined with the opposite `COPY`, requested {0:?}")]
+ UsageMismatch(wgt::BufferUsage),
+}
+
+impl<B: hal::Backend> Resource for Buffer<B> {
+ const TYPE: &'static str = "Buffer";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+impl<B: hal::Backend> Borrow<()> for Buffer<B> {
+ fn borrow(&self) -> &() {
+ &DUMMY_SELECTOR
+ }
+}
+
+pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>>;
+
+#[derive(Debug)]
+pub struct Texture<B: hal::Backend> {
+ pub(crate) raw: Option<(B::Image, MemoryBlock<B>)>,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) usage: wgt::TextureUsage,
+ pub(crate) aspects: hal::format::Aspects,
+ pub(crate) dimension: wgt::TextureDimension,
+ pub(crate) kind: hal::image::Kind,
+ pub(crate) format: wgt::TextureFormat,
+ pub(crate) full_range: TextureSelector,
+ pub(crate) life_guard: LifeGuard,
+}
+
+#[derive(Clone, Debug)]
+pub enum TextureErrorDimension {
+ X,
+ Y,
+ Z,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum TextureDimensionError {
+ #[error("Dimension {0:?} is zero")]
+ Zero(TextureErrorDimension),
+ #[error("1D textures must have height set to 1")]
+ InvalidHeight,
+ #[error("sample count {0} is invalid")]
+ InvalidSampleCount(u32),
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateTextureError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("D24Plus textures cannot be copied")]
+ CannotCopyD24Plus,
+ #[error(transparent)]
+ InvalidDimension(#[from] TextureDimensionError),
+ #[error("texture descriptor mip level count ({0}) is invalid")]
+ InvalidMipLevelCount(u32),
+ #[error("Feature {0:?} must be enabled to create a texture of type {1:?}")]
+ MissingFeature(wgt::Features, wgt::TextureFormat),
+}
+
+impl<B: hal::Backend> Resource for Texture<B> {
+ const TYPE: &'static str = "Texture";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+impl<B: hal::Backend> Borrow<TextureSelector> for Texture<B> {
+ fn borrow(&self) -> &TextureSelector {
+ &self.full_range
+ }
+}
+
+/// Describes a [`TextureView`].
+#[derive(Clone, Debug, Default, PartialEq)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize), serde(default))]
+pub struct TextureViewDescriptor<'a> {
+ /// Debug label of the texture view. This will show up in graphics debuggers for easy identification.
+ pub label: Label<'a>,
+ /// Format of the texture view, or `None` for the same format as the texture itself.
+ /// At this time, it must be the same the underlying format of the texture.
+ pub format: Option<wgt::TextureFormat>,
+ /// The dimension of the texture view. For 1D textures, this must be `1D`. For 2D textures it must be one of
+ /// `D2`, `D2Array`, `Cube`, and `CubeArray`. For 3D textures it must be `3D`
+ pub dimension: Option<wgt::TextureViewDimension>,
+ /// Aspect of the texture. Color textures must be [`TextureAspect::All`].
+ pub aspect: wgt::TextureAspect,
+ /// Base mip level.
+ pub base_mip_level: u32,
+ /// Mip level count.
+ /// If `Some(count)`, `base_mip_level + count` must be less or equal to underlying texture mip count.
+ /// If `None`, considered to include the rest of the mipmap levels, but at least 1 in total.
+ pub level_count: Option<NonZeroU32>,
+ /// Base array layer.
+ pub base_array_layer: u32,
+ /// Layer count.
+ /// If `Some(count)`, `base_array_layer + count` must be less or equal to the underlying array count.
+ /// If `None`, considered to include the rest of the array layers, but at least 1 in total.
+ pub array_layer_count: Option<NonZeroU32>,
+}
+
+#[derive(Debug)]
+pub(crate) enum TextureViewInner<B: hal::Backend> {
+ Native {
+ raw: B::ImageView,
+ source_id: Stored<TextureId>,
+ },
+ SwapChain {
+ image: <B::Surface as hal::window::PresentationSurface<B>>::SwapchainImage,
+ source_id: Stored<SwapChainId>,
+ },
+}
+
+#[derive(Debug)]
+pub struct TextureView<B: hal::Backend> {
+ pub(crate) inner: TextureViewInner<B>,
+ //TODO: store device_id for quick access?
+ pub(crate) aspects: hal::format::Aspects,
+ pub(crate) format: wgt::TextureFormat,
+ pub(crate) extent: hal::image::Extent,
+ pub(crate) samples: hal::image::NumSamples,
+ pub(crate) selector: TextureSelector,
+ pub(crate) life_guard: LifeGuard,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateTextureViewError {
+ #[error("parent texture is invalid or destroyed")]
+ InvalidTexture,
+ #[error("not enough memory left")]
+ OutOfMemory,
+ #[error("Invalid texture view dimension `{view:?}` with texture of dimension `{image:?}`")]
+ InvalidTextureViewDimension {
+ view: wgt::TextureViewDimension,
+ image: wgt::TextureDimension,
+ },
+ #[error("Invalid texture depth `{depth}` for texture view of dimension `Cubemap`. Cubemap views must use images of size 6.")]
+ InvalidCubemapTextureDepth { depth: u16 },
+ #[error("Invalid texture depth `{depth}` for texture view of dimension `CubemapArray`. Cubemap views must use images with sizes which are a multiple of 6.")]
+ InvalidCubemapArrayTextureDepth { depth: u16 },
+ #[error(
+ "TextureView mip level count + base mip level {requested} must be <= Texture mip level count {total}"
+ )]
+ TooManyMipLevels { requested: u32, total: u8 },
+ #[error("TextureView array layer count + base array layer {requested} must be <= Texture depth/array layer count {total}")]
+ TooManyArrayLayers { requested: u32, total: u16 },
+ #[error("Requested array layer count {requested} is not valid for the target view dimension {dim:?}")]
+ InvalidArrayLayerCount {
+ requested: u32,
+ dim: wgt::TextureViewDimension,
+ },
+ #[error("Aspect {requested:?} is not in the source texture ({total:?})")]
+ InvalidAspect {
+ requested: hal::format::Aspects,
+ total: hal::format::Aspects,
+ },
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum TextureViewDestroyError {
+ #[error("cannot destroy swap chain image")]
+ SwapChainImage,
+}
+
+impl<B: hal::Backend> Resource for TextureView<B> {
+ const TYPE: &'static str = "TextureView";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+impl<B: hal::Backend> Borrow<()> for TextureView<B> {
+ fn borrow(&self) -> &() {
+ &DUMMY_SELECTOR
+ }
+}
+
+/// Describes a [`Sampler`]
+#[derive(Clone, Debug, PartialEq)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct SamplerDescriptor<'a> {
+ /// Debug label of the sampler. This will show up in graphics debuggers for easy identification.
+ pub label: Label<'a>,
+ /// How to deal with out of bounds accesses in the u (i.e. x) direction
+ pub address_modes: [wgt::AddressMode; 3],
+ /// How to filter the texture when it needs to be magnified (made larger)
+ pub mag_filter: wgt::FilterMode,
+ /// How to filter the texture when it needs to be minified (made smaller)
+ pub min_filter: wgt::FilterMode,
+ /// How to filter between mip map levels
+ pub mipmap_filter: wgt::FilterMode,
+ /// Minimum level of detail (i.e. mip level) to use
+ pub lod_min_clamp: f32,
+ /// Maximum level of detail (i.e. mip level) to use
+ pub lod_max_clamp: f32,
+ /// If this is enabled, this is a comparison sampler using the given comparison function.
+ pub compare: Option<wgt::CompareFunction>,
+ /// Valid values: 1, 2, 4, 8, and 16.
+ pub anisotropy_clamp: Option<NonZeroU8>,
+ /// Border color to use when address_mode is [`AddressMode::ClampToBorder`]
+ pub border_color: Option<wgt::SamplerBorderColor>,
+}
+
+impl Default for SamplerDescriptor<'_> {
+ fn default() -> Self {
+ Self {
+ label: None,
+ address_modes: Default::default(),
+ mag_filter: Default::default(),
+ min_filter: Default::default(),
+ mipmap_filter: Default::default(),
+ lod_min_clamp: 0.0,
+ lod_max_clamp: std::f32::MAX,
+ compare: None,
+ anisotropy_clamp: None,
+ border_color: None,
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Sampler<B: hal::Backend> {
+ pub(crate) raw: B::Sampler,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) life_guard: LifeGuard,
+ /// `true` if this is a comparison sampler
+ pub(crate) comparison: bool,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateSamplerError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("invalid anisotropic clamp {0}, must be one of 1, 2, 4, 8 or 16")]
+ InvalidClamp(u8),
+ #[error("cannot create any more samplers")]
+ TooManyObjects,
+ /// AddressMode::ClampToBorder requires feature ADDRESS_MODE_CLAMP_TO_BORDER
+ #[error("Feature {0:?} must be enabled")]
+ MissingFeature(wgt::Features),
+}
+
+impl<B: hal::Backend> Resource for Sampler<B> {
+ const TYPE: &'static str = "Sampler";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+impl<B: hal::Backend> Borrow<()> for Sampler<B> {
+ fn borrow(&self) -> &() {
+ &DUMMY_SELECTOR
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum DestroyError {
+ #[error("resource is invalid")]
+ Invalid,
+ #[error("resource is already destroyed")]
+ AlreadyDestroyed,
+}
diff --git a/gfx/wgpu/wgpu-core/src/swap_chain.rs b/gfx/wgpu/wgpu-core/src/swap_chain.rs
new file mode 100644
index 0000000000..22c65495bc
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/swap_chain.rs
@@ -0,0 +1,294 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*! Swap chain management.
+
+ ## Lifecycle
+
+ At the low level, the swap chain is using the new simplified model of gfx-rs.
+
+ A swap chain is a separate object that is backend-dependent but shares the index with
+ the parent surface, which is backend-independent. This ensures a 1:1 correspondence
+ between them.
+
+ `get_next_image()` requests a new image from the surface. It becomes a part of
+ `TextureViewInner::SwapChain` of the resulted view. The view is registered in the HUB
+ but not in the device tracker.
+
+ The only operation allowed on the view is to be either a color or a resolve attachment.
+ It can only be used in one command buffer, which needs to be submitted before presenting.
+ Command buffer tracker knows about the view, but only for the duration of recording.
+ The view ID is erased from it at the end, so that it's not merged into the device tracker.
+
+ When a swapchain view is used in `begin_render_pass()`, we assume the start and end image
+ layouts purely based on whether or not this view was used in this command buffer before.
+ It always starts with `Uninitialized` and ends with `Present`, so that no barriers are
+ needed when we need to actually present it.
+
+ In `queue_submit()` we make sure to signal the semaphore whenever we render to a swap
+ chain view.
+
+ In `present()` we return the swap chain image back and wait on the semaphore.
+!*/
+
+#[cfg(feature = "trace")]
+use crate::device::trace::Action;
+use crate::{
+ conv,
+ device::DeviceError,
+ hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
+ id::{DeviceId, SwapChainId, TextureViewId, Valid},
+ resource, span,
+ track::TextureSelector,
+ LifeGuard, PrivateFeatures, Stored, SubmissionIndex,
+};
+
+use hal::{self, device::Device as _, queue::CommandQueue as _, window::PresentationSurface as _};
+use thiserror::Error;
+use wgt::{SwapChainDescriptor, SwapChainStatus};
+
+const FRAME_TIMEOUT_MS: u64 = 1000;
+pub const DESIRED_NUM_FRAMES: u32 = 3;
+
+#[derive(Debug)]
+pub struct SwapChain<B: hal::Backend> {
+ pub(crate) life_guard: LifeGuard,
+ pub(crate) device_id: Stored<DeviceId>,
+ pub(crate) desc: SwapChainDescriptor,
+ pub(crate) num_frames: hal::window::SwapImageIndex,
+ pub(crate) semaphore: B::Semaphore,
+ pub(crate) acquired_view_id: Option<Stored<TextureViewId>>,
+ pub(crate) acquired_framebuffers: Vec<B::Framebuffer>,
+ pub(crate) active_submission_index: SubmissionIndex,
+}
+
+impl<B: hal::Backend> crate::hub::Resource for SwapChain<B> {
+ const TYPE: &'static str = "SwapChain";
+
+ fn life_guard(&self) -> &LifeGuard {
+ &self.life_guard
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum SwapChainError {
+ #[error("swap chain is invalid")]
+ Invalid,
+ #[error("parent surface is invalid")]
+ InvalidSurface,
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("swap chain image is already acquired")]
+ AlreadyAcquired,
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum CreateSwapChainError {
+ #[error(transparent)]
+ Device(#[from] DeviceError),
+ #[error("invalid surface")]
+ InvalidSurface,
+ #[error("`SwapChainOutput` must be dropped before a new `SwapChain` is made")]
+ SwapChainOutputExists,
+ #[error("surface does not support the adapter's queue family")]
+ UnsupportedQueueFamily,
+ #[error("requested format {requested:?} is not in list of supported formats: {available:?}")]
+ UnsupportedFormat {
+ requested: hal::format::Format,
+ available: Vec<hal::format::Format>,
+ },
+}
+
+pub(crate) fn swap_chain_descriptor_to_hal(
+ desc: &SwapChainDescriptor,
+ num_frames: u32,
+ private_features: PrivateFeatures,
+) -> hal::window::SwapchainConfig {
+ let mut config = hal::window::SwapchainConfig::new(
+ desc.width,
+ desc.height,
+ conv::map_texture_format(desc.format, private_features),
+ num_frames,
+ );
+ //TODO: check for supported
+ config.image_usage = conv::map_texture_usage(desc.usage, hal::format::Aspects::COLOR);
+ config.composite_alpha_mode = hal::window::CompositeAlphaMode::OPAQUE;
+ config.present_mode = match desc.present_mode {
+ wgt::PresentMode::Immediate => hal::window::PresentMode::IMMEDIATE,
+ wgt::PresentMode::Mailbox => hal::window::PresentMode::MAILBOX,
+ wgt::PresentMode::Fifo => hal::window::PresentMode::FIFO,
+ };
+ config
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct SwapChainOutput {
+ pub status: SwapChainStatus,
+ pub view_id: Option<TextureViewId>,
+}
+
+impl<G: GlobalIdentityHandlerFactory> Global<G> {
+ pub fn swap_chain_get_current_texture_view<B: GfxBackend>(
+ &self,
+ swap_chain_id: SwapChainId,
+ view_id_in: Input<G, TextureViewId>,
+ ) -> Result<SwapChainOutput, SwapChainError> {
+ span!(_guard, INFO, "SwapChain::get_next_texture");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
+ let surface = surface_guard
+ .get_mut(swap_chain_id.to_surface_id())
+ .map_err(|_| SwapChainError::InvalidSurface)?;
+ let (device_guard, mut token) = hub.devices.read(&mut token);
+ let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
+ let sc = swap_chain_guard
+ .get_mut(swap_chain_id)
+ .map_err(|_| SwapChainError::Invalid)?;
+ #[cfg_attr(not(feature = "trace"), allow(unused_variables))]
+ let device = &device_guard[sc.device_id.value];
+
+ let suf = B::get_surface_mut(surface);
+ let (image, status) = match unsafe { suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000) } {
+ Ok((surface_image, None)) => (Some(surface_image), SwapChainStatus::Good),
+ Ok((surface_image, Some(_))) => (Some(surface_image), SwapChainStatus::Suboptimal),
+ Err(err) => (
+ None,
+ match err {
+ hal::window::AcquireError::OutOfMemory(_) => Err(DeviceError::OutOfMemory)?,
+ hal::window::AcquireError::NotReady => unreachable!(), // we always set a timeout
+ hal::window::AcquireError::Timeout => SwapChainStatus::Timeout,
+ hal::window::AcquireError::OutOfDate => SwapChainStatus::Outdated,
+ hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost,
+ hal::window::AcquireError::DeviceLost(_) => Err(DeviceError::Lost)?,
+ },
+ ),
+ };
+
+ let view_id = match image {
+ Some(image) => {
+ let view = resource::TextureView {
+ inner: resource::TextureViewInner::SwapChain {
+ image,
+ source_id: Stored {
+ value: Valid(swap_chain_id),
+ ref_count: sc.life_guard.add_ref(),
+ },
+ },
+ aspects: hal::format::Aspects::COLOR,
+ format: sc.desc.format,
+ extent: hal::image::Extent {
+ width: sc.desc.width,
+ height: sc.desc.height,
+ depth: 1,
+ },
+ samples: 1,
+ selector: TextureSelector {
+ layers: 0..1,
+ levels: 0..1,
+ },
+ life_guard: LifeGuard::new("<SwapChain View>"),
+ };
+
+ let ref_count = view.life_guard.add_ref();
+ let id = hub
+ .texture_views
+ .register_identity(view_id_in, view, &mut token);
+
+ if sc.acquired_view_id.is_some() {
+ return Err(SwapChainError::AlreadyAcquired);
+ }
+
+ sc.acquired_view_id = Some(Stored {
+ value: id,
+ ref_count,
+ });
+
+ Some(id.0)
+ }
+ None => None,
+ };
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(Action::GetSwapChainTexture {
+ id: view_id,
+ parent_id: swap_chain_id,
+ });
+ }
+
+ Ok(SwapChainOutput { status, view_id })
+ }
+
+ pub fn swap_chain_present<B: GfxBackend>(
+ &self,
+ swap_chain_id: SwapChainId,
+ ) -> Result<SwapChainStatus, SwapChainError> {
+ span!(_guard, INFO, "SwapChain::present");
+
+ let hub = B::hub(self);
+ let mut token = Token::root();
+
+ let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
+ let surface = surface_guard
+ .get_mut(swap_chain_id.to_surface_id())
+ .map_err(|_| SwapChainError::InvalidSurface)?;
+ let (mut device_guard, mut token) = hub.devices.write(&mut token);
+ let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
+ let sc = swap_chain_guard
+ .get_mut(swap_chain_id)
+ .map_err(|_| SwapChainError::Invalid)?;
+ let device = &mut device_guard[sc.device_id.value];
+
+ #[cfg(feature = "trace")]
+ if let Some(ref trace) = device.trace {
+ trace.lock().add(Action::PresentSwapChain(swap_chain_id));
+ }
+
+ let view_id = sc
+ .acquired_view_id
+ .take()
+ .ok_or(SwapChainError::AlreadyAcquired)?;
+ let (view_maybe, _) = hub.texture_views.unregister(view_id.value.0, &mut token);
+ let view = view_maybe.ok_or(SwapChainError::Invalid)?;
+ let image = match view.inner {
+ resource::TextureViewInner::Native { .. } => unreachable!(),
+ resource::TextureViewInner::SwapChain { image, .. } => image,
+ };
+
+ let sem = if sc.active_submission_index > device.last_completed_submission_index() {
+ Some(&sc.semaphore)
+ } else {
+ None
+ };
+ let queue = &mut device.queue_group.queues[0];
+ let result = unsafe { queue.present(B::get_surface_mut(surface), image, sem) };
+
+ tracing::debug!(trace = true, "Presented. End of Frame");
+
+ for fbo in sc.acquired_framebuffers.drain(..) {
+ unsafe {
+ device.raw.destroy_framebuffer(fbo);
+ }
+ }
+
+ match result {
+ Ok(None) => Ok(SwapChainStatus::Good),
+ Ok(Some(_)) => Ok(SwapChainStatus::Suboptimal),
+ Err(err) => match err {
+ hal::window::PresentError::OutOfMemory(_) => {
+ Err(SwapChainError::Device(DeviceError::OutOfMemory))
+ }
+ hal::window::PresentError::OutOfDate => Ok(SwapChainStatus::Outdated),
+ hal::window::PresentError::SurfaceLost(_) => Ok(SwapChainStatus::Lost),
+ hal::window::PresentError::DeviceLost(_) => {
+ Err(SwapChainError::Device(DeviceError::Lost))
+ }
+ },
+ }
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/track/buffer.rs b/gfx/wgpu/wgpu-core/src/track/buffer.rs
new file mode 100644
index 0000000000..e4999a9ae4
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/track/buffer.rs
@@ -0,0 +1,241 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use super::{PendingTransition, ResourceState, Unit};
+use crate::{
+ id::{BufferId, Valid},
+ resource::BufferUse,
+};
+
+//TODO: store `hal::buffer::State` here to avoid extra conversions
+pub(crate) type BufferState = Unit<BufferUse>;
+
+impl PendingTransition<BufferState> {
+ fn collapse(self) -> Result<BufferUse, Self> {
+ if self.usage.start.is_empty()
+ || self.usage.start == self.usage.end
+ || !BufferUse::WRITE_ALL.intersects(self.usage.start | self.usage.end)
+ {
+ Ok(self.usage.start | self.usage.end)
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl Default for BufferState {
+ fn default() -> Self {
+ Self {
+ first: None,
+ last: BufferUse::empty(),
+ }
+ }
+}
+
+impl BufferState {
+ pub fn with_usage(usage: BufferUse) -> Self {
+ Unit::new(usage)
+ }
+}
+
+impl ResourceState for BufferState {
+ type Id = BufferId;
+ type Selector = ();
+ type Usage = BufferUse;
+
+ fn query(&self, _selector: Self::Selector) -> Option<Self::Usage> {
+ Some(self.last)
+ }
+
+ fn change(
+ &mut self,
+ id: Valid<Self::Id>,
+ _selector: Self::Selector,
+ usage: Self::Usage,
+ output: Option<&mut Vec<PendingTransition<Self>>>,
+ ) -> Result<(), PendingTransition<Self>> {
+ let old = self.last;
+ if old != usage || !BufferUse::ORDERED.contains(usage) {
+ let pending = PendingTransition {
+ id,
+ selector: (),
+ usage: old..usage,
+ };
+ *self = match output {
+ None => {
+ assert_eq!(
+ self.first, None,
+ "extending a state that is already a transition"
+ );
+ Unit::new(pending.collapse()?)
+ }
+ Some(transitions) => {
+ transitions.push(pending);
+ Unit {
+ first: self.first.or(Some(old)),
+ last: usage,
+ }
+ }
+ };
+ }
+ Ok(())
+ }
+
+ fn prepend(
+ &mut self,
+ id: Valid<Self::Id>,
+ _selector: Self::Selector,
+ usage: Self::Usage,
+ ) -> Result<(), PendingTransition<Self>> {
+ match self.first {
+ Some(old) if old != usage => Err(PendingTransition {
+ id,
+ selector: (),
+ usage: old..usage,
+ }),
+ _ => {
+ self.first = Some(usage);
+ Ok(())
+ }
+ }
+ }
+
+ fn merge(
+ &mut self,
+ id: Valid<Self::Id>,
+ other: &Self,
+ output: Option<&mut Vec<PendingTransition<Self>>>,
+ ) -> Result<(), PendingTransition<Self>> {
+ let old = self.last;
+ let new = other.port();
+ if old == new && BufferUse::ORDERED.contains(new) {
+ if output.is_some() && self.first.is_none() {
+ self.first = Some(old);
+ }
+ } else {
+ let pending = PendingTransition {
+ id,
+ selector: (),
+ usage: old..new,
+ };
+ *self = match output {
+ None => {
+ assert_eq!(
+ self.first, None,
+ "extending a state that is already a transition"
+ );
+ Unit::new(pending.collapse()?)
+ }
+ Some(transitions) => {
+ transitions.push(pending);
+ Unit {
+ first: self.first.or(Some(old)),
+ last: other.last,
+ }
+ }
+ };
+ }
+ Ok(())
+ }
+
+ fn optimize(&mut self) {}
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crate::id::Id;
+
+ #[test]
+ fn change_extend() {
+ let mut bs = Unit {
+ first: None,
+ last: BufferUse::INDEX,
+ };
+ let id = Id::dummy();
+ assert_eq!(
+ bs.change(id, (), BufferUse::STORAGE_STORE, None),
+ Err(PendingTransition {
+ id,
+ selector: (),
+ usage: BufferUse::INDEX..BufferUse::STORAGE_STORE,
+ }),
+ );
+ bs.change(id, (), BufferUse::VERTEX, None).unwrap();
+ bs.change(id, (), BufferUse::INDEX, None).unwrap();
+ assert_eq!(bs, Unit::new(BufferUse::VERTEX | BufferUse::INDEX));
+ }
+
+ #[test]
+ fn change_replace() {
+ let mut bs = Unit {
+ first: None,
+ last: BufferUse::STORAGE_STORE,
+ };
+ let id = Id::dummy();
+ let mut list = Vec::new();
+ bs.change(id, (), BufferUse::VERTEX, Some(&mut list))
+ .unwrap();
+ assert_eq!(
+ &list,
+ &[PendingTransition {
+ id,
+ selector: (),
+ usage: BufferUse::STORAGE_STORE..BufferUse::VERTEX,
+ }],
+ );
+ assert_eq!(
+ bs,
+ Unit {
+ first: Some(BufferUse::STORAGE_STORE),
+ last: BufferUse::VERTEX,
+ }
+ );
+
+ list.clear();
+ bs.change(id, (), BufferUse::STORAGE_STORE, Some(&mut list))
+ .unwrap();
+ assert_eq!(
+ &list,
+ &[PendingTransition {
+ id,
+ selector: (),
+ usage: BufferUse::VERTEX..BufferUse::STORAGE_STORE,
+ }],
+ );
+ assert_eq!(
+ bs,
+ Unit {
+ first: Some(BufferUse::STORAGE_STORE),
+ last: BufferUse::STORAGE_STORE,
+ }
+ );
+ }
+
+ #[test]
+ fn prepend() {
+ let mut bs = Unit {
+ first: None,
+ last: BufferUse::VERTEX,
+ };
+ let id = Id::dummy();
+ bs.prepend(id, (), BufferUse::INDEX).unwrap();
+ bs.prepend(id, (), BufferUse::INDEX).unwrap();
+ assert_eq!(
+ bs.prepend(id, (), BufferUse::STORAGE_LOAD),
+ Err(PendingTransition {
+ id,
+ selector: (),
+ usage: BufferUse::INDEX..BufferUse::STORAGE_LOAD,
+ })
+ );
+ assert_eq!(
+ bs,
+ Unit {
+ first: Some(BufferUse::INDEX),
+ last: BufferUse::VERTEX,
+ }
+ );
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/track/mod.rs b/gfx/wgpu/wgpu-core/src/track/mod.rs
new file mode 100644
index 0000000000..6d7e908ef6
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/track/mod.rs
@@ -0,0 +1,593 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+mod buffer;
+mod range;
+mod texture;
+
+use crate::{
+ conv, hub,
+ id::{self, TypedId, Valid},
+ resource, Epoch, FastHashMap, Index, RefCount,
+};
+
+use std::{collections::hash_map::Entry, fmt, marker::PhantomData, ops, vec::Drain};
+use thiserror::Error;
+
+pub(crate) use buffer::BufferState;
+pub(crate) use texture::{TextureSelector, TextureState};
+
+/// A single unit of state tracking. It keeps an initial
+/// usage as well as the last/current one, similar to `Range`.
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub struct Unit<U> {
+ first: Option<U>,
+ last: U,
+}
+
+impl<U: Copy> Unit<U> {
+ /// Create a new unit from a given usage.
+ fn new(usage: U) -> Self {
+ Self {
+ first: None,
+ last: usage,
+ }
+ }
+
+ /// Return a usage to link to.
+ fn port(&self) -> U {
+ self.first.unwrap_or(self.last)
+ }
+}
+
+/// The main trait that abstracts away the tracking logic of
+/// a particular resource type, like a buffer or a texture.
+pub(crate) trait ResourceState: Clone + Default {
+ /// Corresponding `HUB` identifier.
+ type Id: Copy + fmt::Debug + TypedId;
+ /// A type specifying the sub-resources.
+ type Selector: fmt::Debug;
+ /// Usage type for a `Unit` of a sub-resource.
+ type Usage: fmt::Debug;
+
+ /// Check if all the selected sub-resources have the same
+ /// usage, and return it.
+ ///
+ /// Returns `None` if no sub-resources
+ /// are intersecting with the selector, or their usage
+ /// isn't consistent.
+ fn query(&self, selector: Self::Selector) -> Option<Self::Usage>;
+
+ /// Change the last usage of the selected sub-resources.
+ ///
+ /// If `output` is specified, it's filled with the
+ /// `PendingTransition` objects corresponding to smaller
+ /// sub-resource transitions. The old usage is replaced by
+ /// the new one.
+ ///
+ /// If `output` is `None`, the old usage is extended with
+ /// the new usage. The error is returned if it's not possible,
+ /// specifying the conflicting transition. Extension can only
+ /// be done for read-only usages.
+ fn change(
+ &mut self,
+ id: Valid<Self::Id>,
+ selector: Self::Selector,
+ usage: Self::Usage,
+ output: Option<&mut Vec<PendingTransition<Self>>>,
+ ) -> Result<(), PendingTransition<Self>>;
+
+ /// Sets up the first usage of the selected sub-resources.
+ fn prepend(
+ &mut self,
+ id: Valid<Self::Id>,
+ selector: Self::Selector,
+ usage: Self::Usage,
+ ) -> Result<(), PendingTransition<Self>>;
+
+ /// Merge the state of this resource tracked by a different instance
+ /// with the current one.
+ ///
+ /// Same rules for `output` apply as with `change()`: last usage state
+ /// is either replaced (when `output` is provided) with a
+ /// `PendingTransition` pushed to this vector, or extended with the
+ /// other read-only usage, unless there is a usage conflict, and
+ /// the error is generated (returning the conflict).
+ fn merge(
+ &mut self,
+ id: Valid<Self::Id>,
+ other: &Self,
+ output: Option<&mut Vec<PendingTransition<Self>>>,
+ ) -> Result<(), PendingTransition<Self>>;
+
+ /// Try to optimize the internal representation.
+ fn optimize(&mut self);
+}
+
+/// Structure wrapping the abstract tracking state with the relevant resource
+/// data, such as the reference count and the epoch.
+#[derive(Clone)]
+struct Resource<S> {
+ ref_count: RefCount,
+ state: S,
+ epoch: Epoch,
+}
+
+/// A structure containing all the information about a particular resource
+/// transition. User code should be able to generate a pipeline barrier
+/// based on the contents.
+#[derive(Debug, PartialEq)]
+pub(crate) struct PendingTransition<S: ResourceState> {
+ pub id: Valid<S::Id>,
+ pub selector: S::Selector,
+ pub usage: ops::Range<S::Usage>,
+}
+
+impl PendingTransition<BufferState> {
+ /// Produce the gfx-hal barrier corresponding to the transition.
+ pub fn into_hal<'a, B: hal::Backend>(
+ self,
+ buf: &'a resource::Buffer<B>,
+ ) -> hal::memory::Barrier<'a, B> {
+ tracing::trace!("\tbuffer -> {:?}", self);
+ let &(ref target, _) = buf.raw.as_ref().expect("Buffer is destroyed");
+ hal::memory::Barrier::Buffer {
+ states: conv::map_buffer_state(self.usage.start)
+ ..conv::map_buffer_state(self.usage.end),
+ target,
+ range: hal::buffer::SubRange::WHOLE,
+ families: None,
+ }
+ }
+}
+
+impl PendingTransition<TextureState> {
+ /// Produce the gfx-hal barrier corresponding to the transition.
+ pub fn into_hal<'a, B: hal::Backend>(
+ self,
+ tex: &'a resource::Texture<B>,
+ ) -> hal::memory::Barrier<'a, B> {
+ tracing::trace!("\ttexture -> {:?}", self);
+ let &(ref target, _) = tex.raw.as_ref().expect("Texture is destroyed");
+ let aspects = tex.aspects;
+ hal::memory::Barrier::Image {
+ states: conv::map_texture_state(self.usage.start, aspects)
+ ..conv::map_texture_state(self.usage.end, aspects),
+ target,
+ range: hal::image::SubresourceRange {
+ aspects,
+ level_start: self.selector.levels.start,
+ level_count: Some(self.selector.levels.end - self.selector.levels.start),
+ layer_start: self.selector.layers.start,
+ layer_count: Some(self.selector.layers.end - self.selector.layers.start),
+ },
+ families: None,
+ }
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum UseExtendError<U: fmt::Debug> {
+ #[error("resource is invalid")]
+ InvalidResource,
+ #[error("total usage {0:?} is not valid")]
+ Conflict(U),
+}
+
+/// A tracker for all resources of a given type.
+pub(crate) struct ResourceTracker<S: ResourceState> {
+ /// An association of known resource indices with their tracked states.
+ map: FastHashMap<Index, Resource<S>>,
+ /// Temporary storage for collecting transitions.
+ temp: Vec<PendingTransition<S>>,
+ /// The backend variant for all the tracked resources.
+ backend: wgt::Backend,
+}
+
+impl<S: ResourceState + fmt::Debug> fmt::Debug for ResourceTracker<S> {
+ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ self.map
+ .iter()
+ .map(|(&index, res)| ((index, res.epoch), &res.state))
+ .collect::<FastHashMap<_, _>>()
+ .fmt(formatter)
+ }
+}
+
+impl<S: ResourceState> ResourceTracker<S> {
+ /// Create a new empty tracker.
+ pub fn new(backend: wgt::Backend) -> Self {
+ Self {
+ map: FastHashMap::default(),
+ temp: Vec::new(),
+ backend,
+ }
+ }
+
+ /// Remove an id from the tracked map.
+ pub(crate) fn remove(&mut self, id: Valid<S::Id>) -> bool {
+ let (index, epoch, backend) = id.0.unzip();
+ debug_assert_eq!(backend, self.backend);
+ match self.map.remove(&index) {
+ Some(resource) => {
+ assert_eq!(resource.epoch, epoch);
+ true
+ }
+ None => false,
+ }
+ }
+
+ /// Removes the resource from the tracker if we are holding the last reference.
+ pub(crate) fn remove_abandoned(&mut self, id: Valid<S::Id>) -> bool {
+ let (index, epoch, backend) = id.0.unzip();
+ debug_assert_eq!(backend, self.backend);
+ match self.map.entry(index) {
+ Entry::Occupied(e) => {
+ if e.get().ref_count.load() == 1 {
+ let res = e.remove();
+ assert_eq!(res.epoch, epoch);
+ true
+ } else {
+ false
+ }
+ }
+ _ => false,
+ }
+ }
+
+ /// Try to optimize the internal representation.
+ pub(crate) fn optimize(&mut self) {
+ for resource in self.map.values_mut() {
+ resource.state.optimize();
+ }
+ }
+
+ /// Return an iterator over used resources keys.
+ pub fn used<'a>(&'a self) -> impl 'a + Iterator<Item = Valid<S::Id>> {
+ let backend = self.backend;
+ self.map
+ .iter()
+ .map(move |(&index, resource)| Valid(S::Id::zip(index, resource.epoch, backend)))
+ }
+
+ /// Clear the tracked contents.
+ fn clear(&mut self) {
+ self.map.clear();
+ }
+
+ /// Initialize a resource to be used.
+ ///
+ /// Returns false if the resource is already registered.
+ pub(crate) fn init(
+ &mut self,
+ id: Valid<S::Id>,
+ ref_count: RefCount,
+ state: S,
+ ) -> Result<(), &S> {
+ let (index, epoch, backend) = id.0.unzip();
+ debug_assert_eq!(backend, self.backend);
+ match self.map.entry(index) {
+ Entry::Vacant(e) => {
+ e.insert(Resource {
+ ref_count,
+ state,
+ epoch,
+ });
+ Ok(())
+ }
+ Entry::Occupied(e) => Err(&e.into_mut().state),
+ }
+ }
+
+ /// Query the usage of a resource selector.
+ ///
+ /// Returns `Some(Usage)` only if this usage is consistent
+ /// across the given selector.
+ pub fn query(&self, id: Valid<S::Id>, selector: S::Selector) -> Option<S::Usage> {
+ let (index, epoch, backend) = id.0.unzip();
+ debug_assert_eq!(backend, self.backend);
+ let res = self.map.get(&index)?;
+ assert_eq!(res.epoch, epoch);
+ res.state.query(selector)
+ }
+
+ /// Make sure that a resource is tracked, and return a mutable
+ /// reference to it.
+ fn get_or_insert<'a>(
+ self_backend: wgt::Backend,
+ map: &'a mut FastHashMap<Index, Resource<S>>,
+ id: Valid<S::Id>,
+ ref_count: &RefCount,
+ ) -> &'a mut Resource<S> {
+ let (index, epoch, backend) = id.0.unzip();
+ debug_assert_eq!(self_backend, backend);
+ match map.entry(index) {
+ Entry::Vacant(e) => e.insert(Resource {
+ ref_count: ref_count.clone(),
+ state: S::default(),
+ epoch,
+ }),
+ Entry::Occupied(e) => {
+ assert_eq!(e.get().epoch, epoch);
+ e.into_mut()
+ }
+ }
+ }
+
+ /// Extend the usage of a specified resource.
+ ///
+ /// Returns conflicting transition as an error.
+ pub(crate) fn change_extend(
+ &mut self,
+ id: Valid<S::Id>,
+ ref_count: &RefCount,
+ selector: S::Selector,
+ usage: S::Usage,
+ ) -> Result<(), PendingTransition<S>> {
+ Self::get_or_insert(self.backend, &mut self.map, id, ref_count)
+ .state
+ .change(id, selector, usage, None)
+ }
+
+ /// Replace the usage of a specified resource.
+ pub(crate) fn change_replace(
+ &mut self,
+ id: Valid<S::Id>,
+ ref_count: &RefCount,
+ selector: S::Selector,
+ usage: S::Usage,
+ ) -> Drain<PendingTransition<S>> {
+ let res = Self::get_or_insert(self.backend, &mut self.map, id, ref_count);
+ res.state
+ .change(id, selector, usage, Some(&mut self.temp))
+ .ok(); //TODO: unwrap?
+ self.temp.drain(..)
+ }
+
+ /// Turn the tracking from the "expand" mode into the "replace" one,
+ /// installing the selected usage as the "first".
+ /// This is a special operation only used by the render pass attachments.
+ pub(crate) fn prepend(
+ &mut self,
+ id: Valid<S::Id>,
+ ref_count: &RefCount,
+ selector: S::Selector,
+ usage: S::Usage,
+ ) -> Result<(), PendingTransition<S>> {
+ Self::get_or_insert(self.backend, &mut self.map, id, ref_count)
+ .state
+ .prepend(id, selector, usage)
+ }
+
+ /// Merge another tracker into `self` by extending the current states
+ /// without any transitions.
+ pub(crate) fn merge_extend(&mut self, other: &Self) -> Result<(), PendingTransition<S>> {
+ debug_assert_eq!(self.backend, other.backend);
+ for (&index, new) in other.map.iter() {
+ match self.map.entry(index) {
+ Entry::Vacant(e) => {
+ e.insert(new.clone());
+ }
+ Entry::Occupied(e) => {
+ assert_eq!(e.get().epoch, new.epoch);
+ let id = Valid(S::Id::zip(index, new.epoch, self.backend));
+ e.into_mut().state.merge(id, &new.state, None)?;
+ }
+ }
+ }
+ Ok(())
+ }
+
+ /// Merge another tracker, adding it's transitions to `self`.
+ /// Transitions the current usage to the new one.
+ pub(crate) fn merge_replace<'a>(&'a mut self, other: &'a Self) -> Drain<PendingTransition<S>> {
+ for (&index, new) in other.map.iter() {
+ match self.map.entry(index) {
+ Entry::Vacant(e) => {
+ e.insert(new.clone());
+ }
+ Entry::Occupied(e) => {
+ assert_eq!(e.get().epoch, new.epoch);
+ let id = Valid(S::Id::zip(index, new.epoch, self.backend));
+ e.into_mut()
+ .state
+ .merge(id, &new.state, Some(&mut self.temp))
+ .ok(); //TODO: unwrap?
+ }
+ }
+ }
+ self.temp.drain(..)
+ }
+
+ /// Use a given resource provided by an `Id` with the specified usage.
+ /// Combines storage access by 'Id' with the transition that extends
+ /// the last read-only usage, if possible.
+ ///
+ /// Returns the old usage as an error if there is a conflict.
+ pub(crate) fn use_extend<'a, T: 'a + hub::Resource>(
+ &mut self,
+ storage: &'a hub::Storage<T, S::Id>,
+ id: S::Id,
+ selector: S::Selector,
+ usage: S::Usage,
+ ) -> Result<&'a T, UseExtendError<S::Usage>> {
+ let item = storage
+ .get(id)
+ .map_err(|_| UseExtendError::InvalidResource)?;
+ self.change_extend(
+ Valid(id),
+ item.life_guard().ref_count.as_ref().unwrap(),
+ selector,
+ usage,
+ )
+ .map(|()| item)
+ .map_err(|pending| UseExtendError::Conflict(pending.usage.end))
+ }
+
+ /// Use a given resource provided by an `Id` with the specified usage.
+ /// Combines storage access by 'Id' with the transition that replaces
+ /// the last usage with a new one, returning an iterator over these
+ /// transitions.
+ pub(crate) fn use_replace<'a, T: 'a + hub::Resource>(
+ &mut self,
+ storage: &'a hub::Storage<T, S::Id>,
+ id: S::Id,
+ selector: S::Selector,
+ usage: S::Usage,
+ ) -> Result<(&'a T, Drain<PendingTransition<S>>), S::Id> {
+ let item = storage.get(id).map_err(|_| id)?;
+ let drain = self.change_replace(
+ Valid(id),
+ item.life_guard().ref_count.as_ref().unwrap(),
+ selector,
+ usage,
+ );
+ Ok((item, drain))
+ }
+}
+
+impl<I: Copy + fmt::Debug + TypedId> ResourceState for PhantomData<I> {
+ type Id = I;
+ type Selector = ();
+ type Usage = ();
+
+ fn query(&self, _selector: Self::Selector) -> Option<Self::Usage> {
+ Some(())
+ }
+
+ fn change(
+ &mut self,
+ _id: Valid<Self::Id>,
+ _selector: Self::Selector,
+ _usage: Self::Usage,
+ _output: Option<&mut Vec<PendingTransition<Self>>>,
+ ) -> Result<(), PendingTransition<Self>> {
+ Ok(())
+ }
+
+ fn prepend(
+ &mut self,
+ _id: Valid<Self::Id>,
+ _selector: Self::Selector,
+ _usage: Self::Usage,
+ ) -> Result<(), PendingTransition<Self>> {
+ Ok(())
+ }
+
+ fn merge(
+ &mut self,
+ _id: Valid<Self::Id>,
+ _other: &Self,
+ _output: Option<&mut Vec<PendingTransition<Self>>>,
+ ) -> Result<(), PendingTransition<Self>> {
+ Ok(())
+ }
+
+ fn optimize(&mut self) {}
+}
+
+pub const DUMMY_SELECTOR: () = ();
+
+#[derive(Clone, Debug, Error)]
+pub enum UsageConflict {
+ #[error(
+ "Attempted to use buffer {id:?} as a combination of {combined_use:?} within a usage scope."
+ )]
+ Buffer {
+ id: id::BufferId,
+ combined_use: resource::BufferUse,
+ },
+ #[error("Attempted to use texture {id:?} mips {mip_levels:?} layers {array_layers:?} as a combination of {combined_use:?} within a usage scope.")]
+ Texture {
+ id: id::TextureId,
+ mip_levels: ops::Range<u32>,
+ array_layers: ops::Range<u32>,
+ combined_use: resource::TextureUse,
+ },
+}
+
+/// A set of trackers for all relevant resources.
+#[derive(Debug)]
+pub(crate) struct TrackerSet {
+ pub buffers: ResourceTracker<BufferState>,
+ pub textures: ResourceTracker<TextureState>,
+ pub views: ResourceTracker<PhantomData<id::TextureViewId>>,
+ pub bind_groups: ResourceTracker<PhantomData<id::BindGroupId>>,
+ pub samplers: ResourceTracker<PhantomData<id::SamplerId>>,
+ pub compute_pipes: ResourceTracker<PhantomData<id::ComputePipelineId>>,
+ pub render_pipes: ResourceTracker<PhantomData<id::RenderPipelineId>>,
+ pub bundles: ResourceTracker<PhantomData<id::RenderBundleId>>,
+}
+
+impl TrackerSet {
+ /// Create an empty set.
+ pub fn new(backend: wgt::Backend) -> Self {
+ Self {
+ buffers: ResourceTracker::new(backend),
+ textures: ResourceTracker::new(backend),
+ views: ResourceTracker::new(backend),
+ bind_groups: ResourceTracker::new(backend),
+ samplers: ResourceTracker::new(backend),
+ compute_pipes: ResourceTracker::new(backend),
+ render_pipes: ResourceTracker::new(backend),
+ bundles: ResourceTracker::new(backend),
+ }
+ }
+
+ /// Clear all the trackers.
+ pub fn clear(&mut self) {
+ self.buffers.clear();
+ self.textures.clear();
+ self.views.clear();
+ self.bind_groups.clear();
+ self.samplers.clear();
+ self.compute_pipes.clear();
+ self.render_pipes.clear();
+ self.bundles.clear();
+ }
+
+ /// Try to optimize the tracking representation.
+ pub fn optimize(&mut self) {
+ self.buffers.optimize();
+ self.textures.optimize();
+ self.views.optimize();
+ self.bind_groups.optimize();
+ self.samplers.optimize();
+ self.compute_pipes.optimize();
+ self.render_pipes.optimize();
+ self.bundles.optimize();
+ }
+
+ /// Merge all the trackers of another instance by extending
+ /// the usage. Panics on a conflict.
+ pub fn merge_extend(&mut self, other: &Self) -> Result<(), UsageConflict> {
+ self.buffers
+ .merge_extend(&other.buffers)
+ .map_err(|e| UsageConflict::Buffer {
+ id: e.id.0,
+ combined_use: e.usage.end,
+ })?;
+ self.textures
+ .merge_extend(&other.textures)
+ .map_err(|e| UsageConflict::Texture {
+ id: e.id.0,
+ mip_levels: e.selector.levels.start as u32..e.selector.levels.end as u32,
+ array_layers: e.selector.layers.start as u32..e.selector.layers.end as u32,
+ combined_use: e.usage.end,
+ })?;
+ self.views.merge_extend(&other.views).unwrap();
+ self.bind_groups.merge_extend(&other.bind_groups).unwrap();
+ self.samplers.merge_extend(&other.samplers).unwrap();
+ self.compute_pipes
+ .merge_extend(&other.compute_pipes)
+ .unwrap();
+ self.render_pipes.merge_extend(&other.render_pipes).unwrap();
+ self.bundles.merge_extend(&other.bundles).unwrap();
+ Ok(())
+ }
+
+ pub fn backend(&self) -> wgt::Backend {
+ self.buffers.backend
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/track/range.rs b/gfx/wgpu/wgpu-core/src/track/range.rs
new file mode 100644
index 0000000000..458861e1a9
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/track/range.rs
@@ -0,0 +1,399 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use smallvec::SmallVec;
+
+use std::{cmp::Ordering, fmt::Debug, iter, ops::Range, slice::Iter};
+
+/// Structure that keeps track of a I -> T mapping,
+/// optimized for a case where keys of the same values
+/// are often grouped together linearly.
+#[derive(Clone, Debug, PartialEq)]
+pub struct RangedStates<I, T> {
+ /// List of ranges, each associated with a singe value.
+ /// Ranges of keys have to be non-intersecting and ordered.
+ ranges: SmallVec<[(Range<I>, T); 1]>,
+}
+
+impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
+ pub fn empty() -> Self {
+ Self {
+ ranges: SmallVec::new(),
+ }
+ }
+
+ pub fn from_range(range: Range<I>, value: T) -> Self {
+ Self {
+ ranges: iter::once((range, value)).collect(),
+ }
+ }
+
+ /// Construct a new instance from a slice of ranges.
+ #[cfg(test)]
+ pub fn from_slice(values: &[(Range<I>, T)]) -> Self {
+ Self {
+ ranges: values.iter().cloned().collect(),
+ }
+ }
+
+ /// Clear all the ranges.
+ pub fn clear(&mut self) {
+ self.ranges.clear();
+ }
+
+ /// Append a range.
+ ///
+ /// Assumes that the object is being constructed from a set of
+ /// ranges, and they are given in the ascending order of their keys.
+ pub fn append(&mut self, index: Range<I>, value: T) {
+ if let Some(last) = self.ranges.last() {
+ debug_assert!(last.0.end <= index.start);
+ }
+ self.ranges.push((index, value));
+ }
+
+ /// Check that all the ranges are non-intersecting and ordered.
+ /// Panics otherwise.
+ #[cfg(test)]
+ fn check_sanity(&self) {
+ for a in self.ranges.iter() {
+ assert!(a.0.start < a.0.end);
+ }
+ for (a, b) in self.ranges.iter().zip(self.ranges[1..].iter()) {
+ assert!(a.0.end <= b.0.start);
+ }
+ }
+
+ /// Merge the neighboring ranges together, where possible.
+ pub fn coalesce(&mut self) {
+ let mut num_removed = 0;
+ let mut iter = self.ranges.iter_mut();
+ let mut cur = match iter.next() {
+ Some(elem) => elem,
+ None => return,
+ };
+ for next in iter {
+ if cur.0.end == next.0.start && cur.1 == next.1 {
+ num_removed += 1;
+ cur.0.end = next.0.end;
+ next.0.end = next.0.start;
+ } else {
+ cur = next;
+ }
+ }
+ if num_removed != 0 {
+ self.ranges.retain(|pair| pair.0.start != pair.0.end);
+ }
+ }
+
+ /// Check if all intersecting ranges have the same value, which is returned.
+ ///
+ /// Returns `None` if no intersections are detected.
+ /// Returns `Some(Err)` if the intersected values are inconsistent.
+ pub fn query<U: PartialEq>(
+ &self,
+ index: &Range<I>,
+ fun: impl Fn(&T) -> U,
+ ) -> Option<Result<U, ()>> {
+ let mut result = None;
+ for &(ref range, ref value) in self.ranges.iter() {
+ if range.end > index.start && range.start < index.end {
+ let old = result.replace(fun(value));
+ if old.is_some() && old != result {
+ return Some(Err(()));
+ }
+ }
+ }
+ result.map(Ok)
+ }
+
+ /// Split the storage ranges in such a way that there is a linear subset of
+ /// them occupying exactly `index` range, which is returned mutably.
+ ///
+ /// Gaps in the ranges are filled with `default` value.
+ pub fn isolate(&mut self, index: &Range<I>, default: T) -> &mut [(Range<I>, T)] {
+ //TODO: implement this in 2 passes:
+ // 1. scan the ranges to figure out how many extra ones need to be inserted
+ // 2. go through the ranges by moving them them to the right and inserting the missing ones
+
+ let mut start_pos = match self.ranges.iter().position(|pair| pair.0.end > index.start) {
+ Some(pos) => pos,
+ None => {
+ let pos = self.ranges.len();
+ self.ranges.push((index.clone(), default));
+ return &mut self.ranges[pos..];
+ }
+ };
+
+ {
+ let (range, value) = self.ranges[start_pos].clone();
+ if range.start < index.start {
+ self.ranges[start_pos].0.start = index.start;
+ self.ranges
+ .insert(start_pos, (range.start..index.start, value));
+ start_pos += 1;
+ }
+ }
+ let mut pos = start_pos;
+ let mut range_pos = index.start;
+ loop {
+ let (range, value) = self.ranges[pos].clone();
+ if range.start >= index.end {
+ self.ranges.insert(pos, (range_pos..index.end, default));
+ pos += 1;
+ break;
+ }
+ if range.start > range_pos {
+ self.ranges.insert(pos, (range_pos..range.start, default));
+ pos += 1;
+ range_pos = range.start;
+ }
+ if range.end >= index.end {
+ if range.end != index.end {
+ self.ranges[pos].0.start = index.end;
+ self.ranges.insert(pos, (range_pos..index.end, value));
+ }
+ pos += 1;
+ break;
+ }
+ pos += 1;
+ range_pos = range.end;
+ if pos == self.ranges.len() {
+ self.ranges.push((range_pos..index.end, default));
+ pos += 1;
+ break;
+ }
+ }
+
+ &mut self.ranges[start_pos..pos]
+ }
+
+ /// Helper method for isolation that checks the sanity of the results.
+ #[cfg(test)]
+ pub fn sanely_isolated(&self, index: Range<I>, default: T) -> Vec<(Range<I>, T)> {
+ let mut clone = self.clone();
+ let result = clone.isolate(&index, default).to_vec();
+ clone.check_sanity();
+ result
+ }
+
+ /// Produce an iterator that merges two instances together.
+ ///
+ /// Each range in the returned iterator is a subset of a range in either
+ /// `self` or `other`, and the value returned as a `Range` from `self` to `other`.
+ pub fn merge<'a>(&'a self, other: &'a Self, base: I) -> Merge<'a, I, T> {
+ Merge {
+ base,
+ sa: self.ranges.iter().peekable(),
+ sb: other.ranges.iter().peekable(),
+ }
+ }
+}
+
+/// A custom iterator that goes through two `RangedStates` and process a merge.
+#[derive(Debug)]
+pub struct Merge<'a, I, T> {
+ base: I,
+ sa: iter::Peekable<Iter<'a, (Range<I>, T)>>,
+ sb: iter::Peekable<Iter<'a, (Range<I>, T)>>,
+}
+
+impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
+ type Item = (Range<I>, Range<Option<T>>);
+ fn next(&mut self) -> Option<Self::Item> {
+ match (self.sa.peek(), self.sb.peek()) {
+ // we have both streams
+ (Some(&(ref ra, va)), Some(&(ref rb, vb))) => {
+ let (range, usage) = if ra.start < self.base {
+ // in the middle of the left stream
+ if self.base == rb.start {
+ // right stream is starting
+ debug_assert!(self.base < ra.end);
+ (self.base..ra.end.min(rb.end), Some(*va)..Some(*vb))
+ } else {
+ // right hasn't started yet
+ debug_assert!(self.base < rb.start);
+ (self.base..rb.start, Some(*va)..None)
+ }
+ } else if rb.start < self.base {
+ // in the middle of the right stream
+ if self.base == ra.start {
+ // left stream is starting
+ debug_assert!(self.base < rb.end);
+ (self.base..ra.end.min(rb.end), Some(*va)..Some(*vb))
+ } else {
+ // left hasn't started yet
+ debug_assert!(self.base < ra.start);
+ (self.base..ra.start, None..Some(*vb))
+ }
+ } else {
+ // no active streams
+ match ra.start.cmp(&rb.start) {
+ // both are starting
+ Ordering::Equal => (ra.start..ra.end.min(rb.end), Some(*va)..Some(*vb)),
+ // only left is starting
+ Ordering::Less => (ra.start..rb.start.min(ra.end), Some(*va)..None),
+ // only right is starting
+ Ordering::Greater => (rb.start..ra.start.min(rb.end), None..Some(*vb)),
+ }
+ };
+ self.base = range.end;
+ if ra.end == range.end {
+ let _ = self.sa.next();
+ }
+ if rb.end == range.end {
+ let _ = self.sb.next();
+ }
+ Some((range, usage))
+ }
+ // only right stream
+ (None, Some(&(ref rb, vb))) => {
+ let range = self.base.max(rb.start)..rb.end;
+ self.base = rb.end;
+ let _ = self.sb.next();
+ Some((range, None..Some(*vb)))
+ }
+ // only left stream
+ (Some(&(ref ra, va)), None) => {
+ let range = self.base.max(ra.start)..ra.end;
+ self.base = ra.end;
+ let _ = self.sa.next();
+ Some((range, Some(*va)..None))
+ }
+ // done
+ (None, None) => None,
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ //TODO: randomized/fuzzy testing
+ use super::RangedStates;
+ use std::{fmt::Debug, ops::Range};
+
+ fn easy_merge<T: PartialEq + Copy + Debug>(
+ ra: &[(Range<usize>, T)],
+ rb: &[(Range<usize>, T)],
+ ) -> Vec<(Range<usize>, Range<Option<T>>)> {
+ RangedStates::from_slice(ra)
+ .merge(&RangedStates::from_slice(rb), 0)
+ .collect()
+ }
+
+ #[test]
+ fn sane_good() {
+ let rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9)]);
+ rs.check_sanity();
+ }
+
+ #[test]
+ #[should_panic]
+ fn sane_empty() {
+ let rs = RangedStates::from_slice(&[(1..4, 9u8), (5..5, 9)]);
+ rs.check_sanity();
+ }
+
+ #[test]
+ #[should_panic]
+ fn sane_intersect() {
+ let rs = RangedStates::from_slice(&[(1..4, 9u8), (3..5, 9)]);
+ rs.check_sanity();
+ }
+
+ #[test]
+ fn coalesce() {
+ let mut rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9), (5..7, 1), (8..9, 1)]);
+ rs.coalesce();
+ rs.check_sanity();
+ assert_eq!(rs.ranges.as_slice(), &[(1..5, 9), (5..7, 1), (8..9, 1),]);
+ }
+
+ #[test]
+ fn query() {
+ let rs = RangedStates::from_slice(&[(1..4, 1u8), (5..7, 2)]);
+ assert_eq!(rs.query(&(0..1), |v| *v), None);
+ assert_eq!(rs.query(&(1..3), |v| *v), Some(Ok(1)));
+ assert_eq!(rs.query(&(1..6), |v| *v), Some(Err(())));
+ }
+
+ #[test]
+ fn isolate() {
+ let rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9), (5..7, 1), (8..9, 1)]);
+ assert_eq!(&rs.sanely_isolated(4..5, 0), &[(4..5, 9u8),]);
+ assert_eq!(
+ &rs.sanely_isolated(0..6, 0),
+ &[(0..1, 0), (1..4, 9u8), (4..5, 9), (5..6, 1),]
+ );
+ assert_eq!(&rs.sanely_isolated(8..10, 1), &[(8..9, 1), (9..10, 1),]);
+ assert_eq!(
+ &rs.sanely_isolated(6..9, 0),
+ &[(6..7, 1), (7..8, 0), (8..9, 1),]
+ );
+ }
+
+ #[test]
+ fn merge_same() {
+ assert_eq!(
+ &easy_merge(&[(1..4, 0u8),], &[(1..4, 2u8),],),
+ &[(1..4, Some(0)..Some(2)),]
+ );
+ }
+
+ #[test]
+ fn merge_empty() {
+ assert_eq!(
+ &easy_merge(&[(1..2, 0u8),], &[],),
+ &[(1..2, Some(0)..None),]
+ );
+ assert_eq!(
+ &easy_merge(&[], &[(3..4, 1u8),],),
+ &[(3..4, None..Some(1)),]
+ );
+ }
+
+ #[test]
+ fn merge_separate() {
+ assert_eq!(
+ &easy_merge(&[(1..2, 0u8), (5..6, 1u8),], &[(2..4, 2u8),],),
+ &[
+ (1..2, Some(0)..None),
+ (2..4, None..Some(2)),
+ (5..6, Some(1)..None),
+ ]
+ );
+ }
+
+ #[test]
+ fn merge_subset() {
+ assert_eq!(
+ &easy_merge(&[(1..6, 0u8),], &[(2..4, 2u8),],),
+ &[
+ (1..2, Some(0)..None),
+ (2..4, Some(0)..Some(2)),
+ (4..6, Some(0)..None),
+ ]
+ );
+ assert_eq!(
+ &easy_merge(&[(2..4, 0u8),], &[(1..4, 2u8),],),
+ &[(1..2, None..Some(2)), (2..4, Some(0)..Some(2)),]
+ );
+ }
+
+ #[test]
+ fn merge_all() {
+ assert_eq!(
+ &easy_merge(&[(1..4, 0u8), (5..8, 1u8),], &[(2..6, 2u8), (7..9, 3u8),],),
+ &[
+ (1..2, Some(0)..None),
+ (2..4, Some(0)..Some(2)),
+ (4..5, None..Some(2)),
+ (5..6, Some(1)..Some(2)),
+ (6..7, Some(1)..None),
+ (7..8, Some(1)..Some(3)),
+ (8..9, None..Some(3)),
+ ]
+ );
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/track/texture.rs b/gfx/wgpu/wgpu-core/src/track/texture.rs
new file mode 100644
index 0000000000..6d1d4a5935
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/track/texture.rs
@@ -0,0 +1,466 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use super::{range::RangedStates, PendingTransition, ResourceState, Unit};
+use crate::{
+ device::MAX_MIP_LEVELS,
+ id::{TextureId, Valid},
+ resource::TextureUse,
+};
+
+use arrayvec::ArrayVec;
+
+use std::{iter, ops::Range};
+
+//TODO: store `hal::image::State` here to avoid extra conversions
+type PlaneStates = RangedStates<hal::image::Layer, Unit<TextureUse>>;
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct TextureSelector {
+ //pub aspects: hal::format::Aspects,
+ pub levels: Range<hal::image::Level>,
+ pub layers: Range<hal::image::Layer>,
+}
+
+#[derive(Clone, Debug, Default, PartialEq)]
+pub(crate) struct TextureState {
+ mips: ArrayVec<[PlaneStates; MAX_MIP_LEVELS as usize]>,
+ /// True if we have the information about all the subresources here
+ full: bool,
+}
+
+impl PendingTransition<TextureState> {
+ fn collapse(self) -> Result<TextureUse, Self> {
+ if self.usage.start.is_empty()
+ || self.usage.start == self.usage.end
+ || !TextureUse::WRITE_ALL.intersects(self.usage.start | self.usage.end)
+ {
+ Ok(self.usage.start | self.usage.end)
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl TextureState {
+ pub fn new(mip_level_count: hal::image::Level, array_layer_count: hal::image::Layer) -> Self {
+ Self {
+ mips: iter::repeat_with(|| {
+ PlaneStates::from_range(0..array_layer_count, Unit::new(TextureUse::UNINITIALIZED))
+ })
+ .take(mip_level_count as usize)
+ .collect(),
+ full: true,
+ }
+ }
+}
+
+impl ResourceState for TextureState {
+ type Id = TextureId;
+ type Selector = TextureSelector;
+ type Usage = TextureUse;
+
+ fn query(&self, selector: Self::Selector) -> Option<Self::Usage> {
+ let mut result = None;
+ // Note: we only consider the subresources tracked by `self`.
+ // If some are not known to `self`, it means the can assume the
+ // initial state to whatever we need, which we can always make
+ // to be the same as the query result for the known subresources.
+ let num_levels = self.mips.len();
+ if self.full {
+ assert!(num_levels >= selector.levels.end as usize);
+ }
+ let mip_start = num_levels.min(selector.levels.start as usize);
+ let mip_end = num_levels.min(selector.levels.end as usize);
+ for mip in self.mips[mip_start..mip_end].iter() {
+ match mip.query(&selector.layers, |unit| unit.last) {
+ None => {}
+ Some(Ok(usage)) if result == Some(usage) => {}
+ Some(Ok(usage)) if result.is_none() => {
+ result = Some(usage);
+ }
+ Some(Ok(_)) | Some(Err(())) => return None,
+ }
+ }
+ result
+ }
+
+ fn change(
+ &mut self,
+ id: Valid<Self::Id>,
+ selector: Self::Selector,
+ usage: Self::Usage,
+ mut output: Option<&mut Vec<PendingTransition<Self>>>,
+ ) -> Result<(), PendingTransition<Self>> {
+ if self.full {
+ assert!(self.mips.len() >= selector.levels.end as usize);
+ } else {
+ while self.mips.len() < selector.levels.end as usize {
+ self.mips.push(PlaneStates::empty());
+ }
+ }
+ for (mip_id, mip) in self.mips[selector.levels.start as usize..selector.levels.end as usize]
+ .iter_mut()
+ .enumerate()
+ {
+ let level = selector.levels.start + mip_id as hal::image::Level;
+ let layers = mip.isolate(&selector.layers, Unit::new(usage));
+ for &mut (ref range, ref mut unit) in layers {
+ if unit.last == usage && TextureUse::ORDERED.contains(usage) {
+ continue;
+ }
+ // TODO: Can't satisfy clippy here unless we modify
+ // `TextureSelector` to use `std::ops::RangeBounds`.
+ #[allow(clippy::range_plus_one)]
+ let pending = PendingTransition {
+ id,
+ selector: TextureSelector {
+ levels: level..level + 1,
+ layers: range.clone(),
+ },
+ usage: unit.last..usage,
+ };
+
+ *unit = match output {
+ None => {
+ assert_eq!(
+ unit.first, None,
+ "extending a state that is already a transition"
+ );
+ Unit::new(pending.collapse()?)
+ }
+ Some(ref mut out) => {
+ out.push(pending);
+ Unit {
+ first: unit.first.or(Some(unit.last)),
+ last: usage,
+ }
+ }
+ };
+ }
+ }
+ Ok(())
+ }
+
+ fn prepend(
+ &mut self,
+ id: Valid<Self::Id>,
+ selector: Self::Selector,
+ usage: Self::Usage,
+ ) -> Result<(), PendingTransition<Self>> {
+ assert!(self.mips.len() >= selector.levels.end as usize);
+ for (mip_id, mip) in self.mips[selector.levels.start as usize..selector.levels.end as usize]
+ .iter_mut()
+ .enumerate()
+ {
+ let level = selector.levels.start + mip_id as hal::image::Level;
+ let layers = mip.isolate(&selector.layers, Unit::new(usage));
+ for &mut (ref range, ref mut unit) in layers {
+ match unit.first {
+ Some(old) if old != usage => {
+ return Err(PendingTransition {
+ id,
+ selector: TextureSelector {
+ levels: level..level + 1,
+ layers: range.clone(),
+ },
+ usage: old..usage,
+ });
+ }
+ _ => {
+ unit.first = Some(usage);
+ }
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn merge(
+ &mut self,
+ id: Valid<Self::Id>,
+ other: &Self,
+ mut output: Option<&mut Vec<PendingTransition<Self>>>,
+ ) -> Result<(), PendingTransition<Self>> {
+ let mut temp = Vec::new();
+ if self.full {
+ assert!(self.mips.len() >= other.mips.len());
+ } else {
+ while self.mips.len() < other.mips.len() {
+ self.mips.push(PlaneStates::empty());
+ }
+ }
+
+ for (mip_id, (mip_self, mip_other)) in self.mips.iter_mut().zip(&other.mips).enumerate() {
+ let level = mip_id as hal::image::Level;
+ temp.extend(mip_self.merge(mip_other, 0));
+ mip_self.clear();
+
+ for (layers, states) in temp.drain(..) {
+ let unit = match states {
+ Range {
+ start: None,
+ end: None,
+ } => unreachable!(),
+ Range {
+ start: Some(start),
+ end: None,
+ } => start,
+ Range {
+ start: None,
+ end: Some(end),
+ } => end,
+ Range {
+ start: Some(start),
+ end: Some(end),
+ } => {
+ let to_usage = end.port();
+ if start.last == to_usage && TextureUse::ORDERED.contains(to_usage) {
+ Unit {
+ first: match output {
+ None => start.first,
+ Some(_) => start.first.or(Some(start.last)),
+ },
+ last: end.last,
+ }
+ } else {
+ // TODO: Can't satisfy clippy here unless we modify
+ // `TextureSelector` to use `std::ops::RangeBounds`.
+ #[allow(clippy::range_plus_one)]
+ let pending = PendingTransition {
+ id,
+ selector: TextureSelector {
+ levels: level..level + 1,
+ layers: layers.clone(),
+ },
+ usage: start.last..to_usage,
+ };
+
+ match output {
+ None => {
+ assert_eq!(
+ start.first, None,
+ "extending a state that is already a transition"
+ );
+ Unit::new(pending.collapse()?)
+ }
+ Some(ref mut out) => {
+ out.push(pending);
+ Unit {
+ // this has to leave a valid `first` state
+ first: start.first.or(Some(start.last)),
+ last: end.last,
+ }
+ }
+ }
+ }
+ }
+ };
+ mip_self.append(layers, unit);
+ }
+ }
+
+ Ok(())
+ }
+
+ fn optimize(&mut self) {
+ for mip in self.mips.iter_mut() {
+ mip.coalesce();
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ //TODO: change() tests
+ use super::*;
+ use crate::id::Id;
+
+ #[test]
+ fn query() {
+ let mut ts = TextureState::default();
+ ts.mips.push(PlaneStates::empty());
+ ts.mips.push(PlaneStates::from_slice(&[
+ (1..3, Unit::new(TextureUse::SAMPLED)),
+ (3..5, Unit::new(TextureUse::SAMPLED)),
+ (5..6, Unit::new(TextureUse::STORAGE_LOAD)),
+ ]));
+
+ assert_eq!(
+ ts.query(TextureSelector {
+ levels: 1..2,
+ layers: 2..5,
+ }),
+ // level 1 matches
+ Some(TextureUse::SAMPLED),
+ );
+ assert_eq!(
+ ts.query(TextureSelector {
+ levels: 0..2,
+ layers: 2..5,
+ }),
+ // level 0 is empty, level 1 matches
+ Some(TextureUse::SAMPLED),
+ );
+ assert_eq!(
+ ts.query(TextureSelector {
+ levels: 1..2,
+ layers: 1..5,
+ }),
+ // level 1 matches with gaps
+ Some(TextureUse::SAMPLED),
+ );
+ assert_eq!(
+ ts.query(TextureSelector {
+ levels: 1..2,
+ layers: 4..6,
+ }),
+ // level 1 doesn't match
+ None,
+ );
+ }
+
+ #[test]
+ fn merge() {
+ let id = Id::dummy();
+ let mut ts1 = TextureState::default();
+ ts1.mips.push(PlaneStates::from_slice(&[(
+ 1..3,
+ Unit::new(TextureUse::SAMPLED),
+ )]));
+ let mut ts2 = TextureState::default();
+ assert_eq!(
+ ts1.merge(id, &ts2, None),
+ Ok(()),
+ "failed to merge with an empty"
+ );
+
+ ts2.mips.push(PlaneStates::from_slice(&[(
+ 1..2,
+ Unit::new(TextureUse::COPY_SRC),
+ )]));
+ assert_eq!(
+ ts1.merge(Id::dummy(), &ts2, None),
+ Ok(()),
+ "failed to extend a compatible state"
+ );
+ assert_eq!(
+ ts1.mips[0].query(&(1..2), |&v| v),
+ Some(Ok(Unit {
+ first: None,
+ last: TextureUse::SAMPLED | TextureUse::COPY_SRC,
+ })),
+ "wrong extension result"
+ );
+
+ ts2.mips[0] = PlaneStates::from_slice(&[(1..2, Unit::new(TextureUse::COPY_DST))]);
+ assert_eq!(
+ ts1.clone().merge(Id::dummy(), &ts2, None),
+ Err(PendingTransition {
+ id,
+ selector: TextureSelector {
+ levels: 0..1,
+ layers: 1..2,
+ },
+ usage: TextureUse::SAMPLED | TextureUse::COPY_SRC..TextureUse::COPY_DST,
+ }),
+ "wrong error on extending with incompatible state"
+ );
+
+ let mut list = Vec::new();
+ ts2.mips[0] = PlaneStates::from_slice(&[
+ (1..2, Unit::new(TextureUse::COPY_DST)),
+ (
+ 2..3,
+ Unit {
+ first: Some(TextureUse::COPY_SRC),
+ last: TextureUse::ATTACHMENT_WRITE,
+ },
+ ),
+ ]);
+ ts1.merge(Id::dummy(), &ts2, Some(&mut list)).unwrap();
+ assert_eq!(
+ &list,
+ &[
+ PendingTransition {
+ id,
+ selector: TextureSelector {
+ levels: 0..1,
+ layers: 1..2,
+ },
+ usage: TextureUse::SAMPLED | TextureUse::COPY_SRC..TextureUse::COPY_DST,
+ },
+ PendingTransition {
+ id,
+ selector: TextureSelector {
+ levels: 0..1,
+ layers: 2..3,
+ },
+ // the transition links the end of the base rage (..SAMPLED)
+ // with the start of the next range (COPY_SRC..)
+ usage: TextureUse::SAMPLED..TextureUse::COPY_SRC,
+ },
+ ],
+ "replacing produced wrong transitions"
+ );
+ assert_eq!(
+ ts1.mips[0].query(&(1..2), |&v| v),
+ Some(Ok(Unit {
+ first: Some(TextureUse::SAMPLED | TextureUse::COPY_SRC),
+ last: TextureUse::COPY_DST,
+ })),
+ "wrong final layer 1 state"
+ );
+ assert_eq!(
+ ts1.mips[0].query(&(2..3), |&v| v),
+ Some(Ok(Unit {
+ first: Some(TextureUse::SAMPLED),
+ last: TextureUse::ATTACHMENT_WRITE,
+ })),
+ "wrong final layer 2 state"
+ );
+
+ list.clear();
+ ts2.mips[0] = PlaneStates::from_slice(&[(
+ 2..3,
+ Unit {
+ first: Some(TextureUse::ATTACHMENT_WRITE),
+ last: TextureUse::COPY_SRC,
+ },
+ )]);
+ ts1.merge(Id::dummy(), &ts2, Some(&mut list)).unwrap();
+ assert_eq!(&list, &[], "unexpected replacing transition");
+
+ list.clear();
+ ts2.mips[0] = PlaneStates::from_slice(&[(
+ 2..3,
+ Unit {
+ first: Some(TextureUse::COPY_DST),
+ last: TextureUse::COPY_DST,
+ },
+ )]);
+ ts1.merge(Id::dummy(), &ts2, Some(&mut list)).unwrap();
+ assert_eq!(
+ &list,
+ &[PendingTransition {
+ id,
+ selector: TextureSelector {
+ levels: 0..1,
+ layers: 2..3,
+ },
+ usage: TextureUse::COPY_SRC..TextureUse::COPY_DST,
+ },],
+ "invalid replacing transition"
+ );
+ assert_eq!(
+ ts1.mips[0].query(&(2..3), |&v| v),
+ Some(Ok(Unit {
+ // the initial state here is never expected to change
+ first: Some(TextureUse::SAMPLED),
+ last: TextureUse::COPY_DST,
+ })),
+ "wrong final layer 2 state"
+ );
+ }
+}
diff --git a/gfx/wgpu/wgpu-core/src/validation.rs b/gfx/wgpu/wgpu-core/src/validation.rs
new file mode 100644
index 0000000000..6cfe12d53f
--- /dev/null
+++ b/gfx/wgpu/wgpu-core/src/validation.rs
@@ -0,0 +1,966 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use crate::{binding_model::BindEntryMap, FastHashMap, MAX_BIND_GROUPS};
+use arrayvec::ArrayVec;
+use std::collections::hash_map::Entry;
+use thiserror::Error;
+use wgt::{BindGroupLayoutEntry, BindingType};
+
+#[derive(Clone, Debug, Error)]
+#[error("buffer usage is {actual:?} which does not contain required usage {expected:?}")]
+pub struct MissingBufferUsageError {
+ pub(crate) actual: wgt::BufferUsage,
+ pub(crate) expected: wgt::BufferUsage,
+}
+
+/// Checks that the given buffer usage contains the required buffer usage,
+/// returns an error otherwise.
+pub fn check_buffer_usage(
+ actual: wgt::BufferUsage,
+ expected: wgt::BufferUsage,
+) -> Result<(), MissingBufferUsageError> {
+ if !actual.contains(expected) {
+ Err(MissingBufferUsageError { actual, expected })
+ } else {
+ Ok(())
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+#[error("texture usage is {actual:?} which does not contain required usage {expected:?}")]
+pub struct MissingTextureUsageError {
+ pub(crate) actual: wgt::TextureUsage,
+ pub(crate) expected: wgt::TextureUsage,
+}
+
+/// Checks that the given texture usage contains the required texture usage,
+/// returns an error otherwise.
+pub fn check_texture_usage(
+ actual: wgt::TextureUsage,
+ expected: wgt::TextureUsage,
+) -> Result<(), MissingTextureUsageError> {
+ if !actual.contains(expected) {
+ Err(MissingTextureUsageError { actual, expected })
+ } else {
+ Ok(())
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum BindingError {
+ #[error("binding is missing from the pipeline layout")]
+ Missing,
+ #[error("visibility flags don't include the shader stage")]
+ Invisible,
+ #[error("load/store access flags {0:?} don't match the shader")]
+ WrongUsage(naga::GlobalUse),
+ #[error("type on the shader side does not match the pipeline binding")]
+ WrongType,
+ #[error("buffer structure size {0}, added to one element of an unbound array, if it's the last field, ended up greater than the given `min_binding_size`")]
+ WrongBufferSize(wgt::BufferAddress),
+ #[error("view dimension {dim:?} (is array: {is_array}) doesn't match the shader")]
+ WrongTextureViewDimension {
+ dim: naga::ImageDimension,
+ is_array: bool,
+ },
+ #[error("texture class {binding:?} doesn't match the shader {shader:?}")]
+ WrongTextureClass {
+ binding: naga::ImageClass,
+ shader: naga::ImageClass,
+ },
+ #[error("comparison flag doesn't match the shader")]
+ WrongSamplerComparison,
+ #[error("derived bind group layout type is not consistent between stages")]
+ InconsistentlyDerivedType,
+ #[error("texture format {0:?} is not supported for storage use")]
+ BadStorageFormat(wgt::TextureFormat),
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum InputError {
+ #[error("input is not provided by the earlier stage in the pipeline")]
+ Missing,
+ #[error("input type is not compatible with the provided")]
+ WrongType,
+}
+
+/// Errors produced when validating a programmable stage of a pipeline.
+#[derive(Clone, Debug, Error)]
+pub enum StageError {
+ #[error("shader module is invalid")]
+ InvalidModule,
+ #[error("unable to find an entry point at {0:?} stage")]
+ MissingEntryPoint(wgt::ShaderStage),
+ #[error("error matching global binding at index {binding} in group {group} against the pipeline layout: {error}")]
+ Binding {
+ group: u32,
+ binding: u32,
+ error: BindingError,
+ },
+ #[error(
+ "error matching the stage input at {location} against the previous stage outputs: {error}"
+ )]
+ Input {
+ location: wgt::ShaderLocation,
+ error: InputError,
+ },
+}
+
+fn get_aligned_type_size(
+ module: &naga::Module,
+ handle: naga::Handle<naga::Type>,
+ allow_unbound: bool,
+) -> wgt::BufferAddress {
+ use naga::TypeInner as Ti;
+ //TODO: take alignment into account!
+ match module.types[handle].inner {
+ Ti::Scalar { kind: _, width } => width as wgt::BufferAddress,
+ Ti::Vector {
+ size,
+ kind: _,
+ width,
+ } => size as wgt::BufferAddress * width as wgt::BufferAddress,
+ Ti::Matrix {
+ rows,
+ columns,
+ width,
+ } => {
+ rows as wgt::BufferAddress * columns as wgt::BufferAddress * width as wgt::BufferAddress
+ }
+ Ti::Pointer { .. } => 4,
+ Ti::Array {
+ base,
+ size: naga::ArraySize::Constant(const_handle),
+ stride,
+ } => {
+ let base_size = match stride {
+ Some(stride) => stride.get() as wgt::BufferAddress,
+ None => get_aligned_type_size(module, base, false),
+ };
+ let count = match module.constants[const_handle].inner {
+ naga::ConstantInner::Uint(value) => value,
+ ref other => panic!("Invalid array size constant: {:?}", other),
+ };
+ base_size * count
+ }
+ Ti::Array {
+ base,
+ size: naga::ArraySize::Dynamic,
+ stride,
+ } if allow_unbound => match stride {
+ Some(stride) => stride.get() as wgt::BufferAddress,
+ None => get_aligned_type_size(module, base, false),
+ },
+ Ti::Struct { ref members } => members.last().map_or(0, |member| {
+ let offset = match member.origin {
+ naga::MemberOrigin::Empty => 0,
+ naga::MemberOrigin::BuiltIn(_) => {
+ tracing::error!("Missing offset on a struct member");
+ 0 // TODO: make it a proper error
+ }
+ naga::MemberOrigin::Offset(offset) => offset as wgt::BufferAddress,
+ };
+ offset + get_aligned_type_size(module, member.ty, false)
+ }),
+ _ => panic!("Unexpected struct field"),
+ }
+}
+
+fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option<naga::StorageFormat> {
+ use naga::StorageFormat as Sf;
+ use wgt::TextureFormat as Tf;
+ // Using the table in https://gpuweb.github.io/gpuweb/#plain-color-formats
+ Some(match format {
+ Tf::R32Uint => Sf::R32Uint,
+ Tf::R32Sint => Sf::R32Sint,
+ Tf::R32Float => Sf::R32Float,
+ Tf::Rgba8Unorm => Sf::Rgba8Unorm,
+ Tf::Rgba8Snorm => Sf::Rgba8Snorm,
+ Tf::Rgba8Uint => Sf::Rgba8Uint,
+ Tf::Rgba8Sint => Sf::Rgba8Sint,
+ Tf::Rg32Uint => Sf::Rg32Uint,
+ Tf::Rg32Sint => Sf::Rg32Sint,
+ Tf::Rg32Float => Sf::Rg32Float,
+ Tf::Rgba16Uint => Sf::Rgba16Uint,
+ Tf::Rgba16Sint => Sf::Rgba16Sint,
+ Tf::Rgba16Float => Sf::Rgba16Float,
+ Tf::Rgba32Uint => Sf::Rgba32Uint,
+ Tf::Rgba32Sint => Sf::Rgba32Sint,
+ Tf::Rgba32Float => Sf::Rgba32Float,
+ _ => return None,
+ })
+}
+
+fn map_storage_format_from_naga(format: naga::StorageFormat) -> wgt::TextureFormat {
+ use naga::StorageFormat as Sf;
+ use wgt::TextureFormat as Tf;
+ match format {
+ Sf::R8Unorm => Tf::R8Unorm,
+ Sf::R8Snorm => Tf::R8Snorm,
+ Sf::R8Uint => Tf::R8Uint,
+ Sf::R8Sint => Tf::R8Sint,
+ Sf::R16Uint => Tf::R16Uint,
+ Sf::R16Sint => Tf::R16Sint,
+ Sf::R16Float => Tf::R16Float,
+ Sf::Rg8Unorm => Tf::Rg8Unorm,
+ Sf::Rg8Snorm => Tf::Rg8Snorm,
+ Sf::Rg8Uint => Tf::Rg8Uint,
+ Sf::Rg8Sint => Tf::Rg8Sint,
+ Sf::R32Uint => Tf::R32Uint,
+ Sf::R32Sint => Tf::R32Sint,
+ Sf::R32Float => Tf::R32Float,
+ Sf::Rg16Uint => Tf::Rg16Uint,
+ Sf::Rg16Sint => Tf::Rg16Sint,
+ Sf::Rg16Float => Tf::Rg16Float,
+ Sf::Rgba8Unorm => Tf::Rgba8Unorm,
+ Sf::Rgba8Snorm => Tf::Rgba8Snorm,
+ Sf::Rgba8Uint => Tf::Rgba8Uint,
+ Sf::Rgba8Sint => Tf::Rgba8Sint,
+ Sf::Rgb10a2Unorm => Tf::Rgb10a2Unorm,
+ Sf::Rg11b10Float => Tf::Rg11b10Float,
+ Sf::Rg32Uint => Tf::Rg32Uint,
+ Sf::Rg32Sint => Tf::Rg32Sint,
+ Sf::Rg32Float => Tf::Rg32Float,
+ Sf::Rgba16Uint => Tf::Rgba16Uint,
+ Sf::Rgba16Sint => Tf::Rgba16Sint,
+ Sf::Rgba16Float => Tf::Rgba16Float,
+ Sf::Rgba32Uint => Tf::Rgba32Uint,
+ Sf::Rgba32Sint => Tf::Rgba32Sint,
+ Sf::Rgba32Float => Tf::Rgba32Float,
+ }
+}
+
+fn check_binding_use(
+ module: &naga::Module,
+ var: &naga::GlobalVariable,
+ entry: &BindGroupLayoutEntry,
+) -> Result<naga::GlobalUse, BindingError> {
+ match module.types[var.ty].inner {
+ naga::TypeInner::Struct { ref members } => {
+ let (allowed_usage, min_size) = match entry.ty {
+ BindingType::Buffer {
+ ty,
+ has_dynamic_offset: _,
+ min_binding_size,
+ } => {
+ let global_use = match ty {
+ wgt::BufferBindingType::Uniform
+ | wgt::BufferBindingType::Storage { read_only: true } => {
+ naga::GlobalUse::LOAD
+ }
+ wgt::BufferBindingType::Storage { read_only: _ } => naga::GlobalUse::all(),
+ };
+ (global_use, min_binding_size)
+ }
+ _ => return Err(BindingError::WrongType),
+ };
+ let mut actual_size = 0;
+ for (i, member) in members.iter().enumerate() {
+ actual_size += get_aligned_type_size(module, member.ty, i + 1 == members.len());
+ }
+ match min_size {
+ Some(non_zero) if non_zero.get() < actual_size => {
+ return Err(BindingError::WrongBufferSize(actual_size))
+ }
+ _ => (),
+ }
+ Ok(allowed_usage)
+ }
+ naga::TypeInner::Sampler { comparison } => match entry.ty {
+ BindingType::Sampler {
+ filtering: _,
+ comparison: cmp,
+ } => {
+ if cmp == comparison {
+ Ok(naga::GlobalUse::LOAD)
+ } else {
+ Err(BindingError::WrongSamplerComparison)
+ }
+ }
+ _ => Err(BindingError::WrongType),
+ },
+ naga::TypeInner::Image {
+ dim,
+ arrayed,
+ class,
+ } => {
+ let view_dimension = match entry.ty {
+ BindingType::Texture { view_dimension, .. }
+ | BindingType::StorageTexture { view_dimension, .. } => view_dimension,
+ _ => {
+ return Err(BindingError::WrongTextureViewDimension {
+ dim,
+ is_array: true,
+ })
+ }
+ };
+ if arrayed {
+ match (dim, view_dimension) {
+ (naga::ImageDimension::D2, wgt::TextureViewDimension::D2Array) => (),
+ (naga::ImageDimension::Cube, wgt::TextureViewDimension::CubeArray) => (),
+ _ => {
+ return Err(BindingError::WrongTextureViewDimension {
+ dim,
+ is_array: true,
+ })
+ }
+ }
+ } else {
+ match (dim, view_dimension) {
+ (naga::ImageDimension::D1, wgt::TextureViewDimension::D1) => (),
+ (naga::ImageDimension::D2, wgt::TextureViewDimension::D2) => (),
+ (naga::ImageDimension::D3, wgt::TextureViewDimension::D3) => (),
+ (naga::ImageDimension::Cube, wgt::TextureViewDimension::Cube) => (),
+ _ => {
+ return Err(BindingError::WrongTextureViewDimension {
+ dim,
+ is_array: false,
+ })
+ }
+ }
+ }
+ let (expected_class, usage) = match entry.ty {
+ BindingType::Texture {
+ sample_type,
+ view_dimension: _,
+ multisampled: multi,
+ } => {
+ let class = match sample_type {
+ wgt::TextureSampleType::Float { .. } => naga::ImageClass::Sampled {
+ kind: naga::ScalarKind::Float,
+ multi,
+ },
+ wgt::TextureSampleType::Sint => naga::ImageClass::Sampled {
+ kind: naga::ScalarKind::Sint,
+ multi,
+ },
+ wgt::TextureSampleType::Uint => naga::ImageClass::Sampled {
+ kind: naga::ScalarKind::Uint,
+ multi,
+ },
+ wgt::TextureSampleType::Depth => naga::ImageClass::Depth,
+ };
+ (class, naga::GlobalUse::LOAD)
+ }
+ BindingType::StorageTexture {
+ access,
+ format,
+ view_dimension: _,
+ } => {
+ let naga_format = map_storage_format_to_naga(format)
+ .ok_or(BindingError::BadStorageFormat(format))?;
+ let usage = match access {
+ wgt::StorageTextureAccess::ReadOnly => naga::GlobalUse::LOAD,
+ wgt::StorageTextureAccess::WriteOnly => naga::GlobalUse::STORE,
+ };
+ (naga::ImageClass::Storage(naga_format), usage)
+ }
+ _ => return Err(BindingError::WrongType),
+ };
+ if class != expected_class {
+ return Err(BindingError::WrongTextureClass {
+ binding: expected_class,
+ shader: class,
+ });
+ }
+ Ok(usage)
+ }
+ _ => Err(BindingError::WrongType),
+ }
+}
+
+fn is_sub_type(sub: &naga::TypeInner, provided: &naga::TypeInner) -> bool {
+ use naga::TypeInner as Ti;
+
+ match (sub, provided) {
+ (
+ &Ti::Scalar {
+ kind: k0,
+ width: w0,
+ },
+ &Ti::Scalar {
+ kind: k1,
+ width: w1,
+ },
+ ) => k0 == k1 && w0 <= w1,
+ (
+ &Ti::Scalar {
+ kind: k0,
+ width: w0,
+ },
+ &Ti::Vector {
+ size: _,
+ kind: k1,
+ width: w1,
+ },
+ ) => k0 == k1 && w0 <= w1,
+ (
+ &Ti::Vector {
+ size: s0,
+ kind: k0,
+ width: w0,
+ },
+ &Ti::Vector {
+ size: s1,
+ kind: k1,
+ width: w1,
+ },
+ ) => s0 as u8 <= s1 as u8 && k0 == k1 && w0 <= w1,
+ (
+ &Ti::Matrix {
+ columns: c0,
+ rows: r0,
+ width: w0,
+ },
+ &Ti::Matrix {
+ columns: c1,
+ rows: r1,
+ width: w1,
+ },
+ ) => c0 == c1 && r0 == r1 && w0 <= w1,
+ (&Ti::Struct { members: ref m0 }, &Ti::Struct { members: ref m1 }) => m0 == m1,
+ _ => false,
+ }
+}
+
+pub enum MaybeOwned<'a, T> {
+ Owned(T),
+ Borrowed(&'a T),
+}
+
+impl<'a, T> std::ops::Deref for MaybeOwned<'a, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ match *self {
+ MaybeOwned::Owned(ref value) => value,
+ MaybeOwned::Borrowed(value) => value,
+ }
+ }
+}
+
+pub fn map_vertex_format(format: wgt::VertexFormat) -> naga::TypeInner {
+ use naga::TypeInner as Ti;
+ use wgt::VertexFormat as Vf;
+
+ //Note: Shader always sees data as int, uint, or float.
+ // It doesn't know if the original is normalized in a tighter form.
+ let width = 4;
+ match format {
+ Vf::Uchar2 => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Uint,
+ width,
+ },
+ Vf::Uchar4 => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Uint,
+ width,
+ },
+ Vf::Char2 => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Sint,
+ width,
+ },
+ Vf::Char4 => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Sint,
+ width,
+ },
+ Vf::Uchar2Norm => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Uchar4Norm => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Char2Norm => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Char4Norm => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Ushort2 => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Uint,
+ width,
+ },
+ Vf::Ushort4 => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Uint,
+ width,
+ },
+ Vf::Short2 => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Sint,
+ width,
+ },
+ Vf::Short4 => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Sint,
+ width,
+ },
+ Vf::Ushort2Norm | Vf::Short2Norm | Vf::Half2 => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Ushort4Norm | Vf::Short4Norm | Vf::Half4 => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Float => Ti::Scalar {
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Float2 => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Float3 => Ti::Vector {
+ size: naga::VectorSize::Tri,
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Float4 => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Float,
+ width,
+ },
+ Vf::Uint => Ti::Scalar {
+ kind: naga::ScalarKind::Uint,
+ width,
+ },
+ Vf::Uint2 => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Uint,
+ width,
+ },
+ Vf::Uint3 => Ti::Vector {
+ size: naga::VectorSize::Tri,
+ kind: naga::ScalarKind::Uint,
+ width,
+ },
+ Vf::Uint4 => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Uint,
+ width,
+ },
+ Vf::Int => Ti::Scalar {
+ kind: naga::ScalarKind::Sint,
+ width,
+ },
+ Vf::Int2 => Ti::Vector {
+ size: naga::VectorSize::Bi,
+ kind: naga::ScalarKind::Sint,
+ width,
+ },
+ Vf::Int3 => Ti::Vector {
+ size: naga::VectorSize::Tri,
+ kind: naga::ScalarKind::Sint,
+ width,
+ },
+ Vf::Int4 => Ti::Vector {
+ size: naga::VectorSize::Quad,
+ kind: naga::ScalarKind::Sint,
+ width,
+ },
+ }
+}
+
+fn map_texture_format(format: wgt::TextureFormat) -> naga::TypeInner {
+ use naga::{ScalarKind as Sk, TypeInner as Ti, VectorSize as Vs};
+ use wgt::TextureFormat as Tf;
+
+ //Note: Shader always sees data as int, uint, or float.
+ // It doesn't know if the original is normalized in a tighter form.
+ let width = 4;
+ match format {
+ Tf::R8Unorm | Tf::R8Snorm => Ti::Scalar {
+ kind: Sk::Float,
+ width,
+ },
+ Tf::R8Uint => Ti::Scalar {
+ kind: Sk::Uint,
+ width,
+ },
+ Tf::R8Sint => Ti::Scalar {
+ kind: Sk::Sint,
+ width,
+ },
+ Tf::R16Uint => Ti::Scalar {
+ kind: Sk::Uint,
+ width,
+ },
+ Tf::R16Sint => Ti::Scalar {
+ kind: Sk::Sint,
+ width,
+ },
+ Tf::R16Float => Ti::Scalar {
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Rg8Unorm | Tf::Rg8Snorm => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Rg8Uint => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Uint,
+ width,
+ },
+ Tf::Rg8Sint => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Sint,
+ width,
+ },
+ Tf::R32Uint => Ti::Scalar {
+ kind: Sk::Uint,
+ width,
+ },
+ Tf::R32Sint => Ti::Scalar {
+ kind: Sk::Sint,
+ width,
+ },
+ Tf::R32Float => Ti::Scalar {
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Rg16Uint => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Uint,
+ width,
+ },
+ Tf::Rg16Sint => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Sint,
+ width,
+ },
+ Tf::Rg16Float => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Rgba8Unorm
+ | Tf::Rgba8UnormSrgb
+ | Tf::Rgba8Snorm
+ | Tf::Bgra8Unorm
+ | Tf::Bgra8UnormSrgb => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Rgba8Uint => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Uint,
+ width,
+ },
+ Tf::Rgba8Sint => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Sint,
+ width,
+ },
+ Tf::Rgb10a2Unorm => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Rg11b10Float => Ti::Vector {
+ size: Vs::Tri,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Rg32Uint => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Uint,
+ width,
+ },
+ Tf::Rg32Sint => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Sint,
+ width,
+ },
+ Tf::Rg32Float => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Rgba16Uint => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Uint,
+ width,
+ },
+ Tf::Rgba16Sint => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Sint,
+ width,
+ },
+ Tf::Rgba16Float => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Rgba32Uint => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Uint,
+ width,
+ },
+ Tf::Rgba32Sint => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Sint,
+ width,
+ },
+ Tf::Rgba32Float => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Depth32Float | Tf::Depth24Plus | Tf::Depth24PlusStencil8 => {
+ panic!("Unexpected depth format")
+ }
+ Tf::Bc1RgbaUnorm
+ | Tf::Bc1RgbaUnormSrgb
+ | Tf::Bc2RgbaUnorm
+ | Tf::Bc2RgbaUnormSrgb
+ | Tf::Bc3RgbaUnorm
+ | Tf::Bc3RgbaUnormSrgb
+ | Tf::Bc7RgbaUnorm
+ | Tf::Bc7RgbaUnormSrgb => Ti::Vector {
+ size: Vs::Quad,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Bc4RUnorm | Tf::Bc4RSnorm => Ti::Scalar {
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Bc5RgUnorm | Tf::Bc5RgSnorm => Ti::Vector {
+ size: Vs::Bi,
+ kind: Sk::Float,
+ width,
+ },
+ Tf::Bc6hRgbUfloat | Tf::Bc6hRgbSfloat => Ti::Vector {
+ size: Vs::Tri,
+ kind: Sk::Float,
+ width,
+ },
+ }
+}
+
+/// Return true if the fragment `format` is covered by the provided `output`.
+pub fn check_texture_format(format: wgt::TextureFormat, output: &naga::TypeInner) -> bool {
+ let required = map_texture_format(format);
+ is_sub_type(&required, output)
+}
+
+pub type StageInterface<'a> = FastHashMap<wgt::ShaderLocation, MaybeOwned<'a, naga::TypeInner>>;
+
+pub enum IntrospectionBindGroupLayouts<'a> {
+ Given(ArrayVec<[&'a BindEntryMap; MAX_BIND_GROUPS]>),
+ Derived(&'a mut [BindEntryMap]),
+}
+
+fn derive_binding_type(
+ module: &naga::Module,
+ var: &naga::GlobalVariable,
+ usage: naga::GlobalUse,
+) -> Result<BindingType, BindingError> {
+ let ty = &module.types[var.ty];
+ Ok(match ty.inner {
+ naga::TypeInner::Struct { ref members } => {
+ let has_dynamic_offset = false;
+ let mut actual_size = 0;
+ for (i, member) in members.iter().enumerate() {
+ actual_size += get_aligned_type_size(module, member.ty, i + 1 == members.len());
+ }
+ match var.class {
+ naga::StorageClass::Uniform => BindingType::Buffer {
+ ty: wgt::BufferBindingType::Uniform,
+ has_dynamic_offset,
+ min_binding_size: wgt::BufferSize::new(actual_size),
+ },
+ naga::StorageClass::Storage => BindingType::Buffer {
+ ty: wgt::BufferBindingType::Storage {
+ read_only: !usage.contains(naga::GlobalUse::STORE),
+ },
+ has_dynamic_offset,
+ min_binding_size: wgt::BufferSize::new(actual_size),
+ },
+ _ => return Err(BindingError::WrongType),
+ }
+ }
+ naga::TypeInner::Sampler { comparison } => BindingType::Sampler {
+ filtering: true,
+ comparison,
+ },
+ naga::TypeInner::Image {
+ dim,
+ arrayed,
+ class,
+ } => {
+ let view_dimension = match dim {
+ naga::ImageDimension::D1 => wgt::TextureViewDimension::D1,
+ naga::ImageDimension::D2 if arrayed => wgt::TextureViewDimension::D2Array,
+ naga::ImageDimension::D2 => wgt::TextureViewDimension::D2,
+ naga::ImageDimension::D3 => wgt::TextureViewDimension::D3,
+ naga::ImageDimension::Cube if arrayed => wgt::TextureViewDimension::CubeArray,
+ naga::ImageDimension::Cube => wgt::TextureViewDimension::Cube,
+ };
+ match class {
+ naga::ImageClass::Sampled { multi, kind } => BindingType::Texture {
+ sample_type: match kind {
+ naga::ScalarKind::Float => {
+ wgt::TextureSampleType::Float { filterable: true }
+ }
+ naga::ScalarKind::Sint => wgt::TextureSampleType::Sint,
+ naga::ScalarKind::Uint => wgt::TextureSampleType::Uint,
+ naga::ScalarKind::Bool => unreachable!(),
+ },
+ view_dimension,
+ multisampled: multi,
+ },
+ naga::ImageClass::Depth => BindingType::Texture {
+ sample_type: wgt::TextureSampleType::Depth,
+ view_dimension,
+ multisampled: false,
+ },
+ naga::ImageClass::Storage(format) => BindingType::StorageTexture {
+ access: if usage.contains(naga::GlobalUse::STORE) {
+ wgt::StorageTextureAccess::WriteOnly
+ } else {
+ wgt::StorageTextureAccess::ReadOnly
+ },
+ view_dimension,
+ format: {
+ let f = map_storage_format_from_naga(format);
+ let original = map_storage_format_to_naga(f)
+ .ok_or(BindingError::BadStorageFormat(f))?;
+ debug_assert_eq!(format, original);
+ f
+ },
+ },
+ }
+ }
+ _ => return Err(BindingError::WrongType),
+ })
+}
+
+pub fn check_stage<'a>(
+ module: &'a naga::Module,
+ mut group_layouts: IntrospectionBindGroupLayouts,
+ entry_point_name: &str,
+ stage_bit: wgt::ShaderStage,
+ inputs: StageInterface<'a>,
+) -> Result<StageInterface<'a>, StageError> {
+ // Since a shader module can have multiple entry points with the same name,
+ // we need to look for one with the right execution model.
+ let shader_stage = match stage_bit {
+ wgt::ShaderStage::VERTEX => naga::ShaderStage::Vertex,
+ wgt::ShaderStage::FRAGMENT => naga::ShaderStage::Fragment,
+ wgt::ShaderStage::COMPUTE => naga::ShaderStage::Compute,
+ _ => unreachable!(),
+ };
+ let entry_point = module
+ .entry_points
+ .get(&(shader_stage, entry_point_name.to_string()))
+ .ok_or(StageError::MissingEntryPoint(stage_bit))?;
+
+ let mut outputs = StageInterface::default();
+ for ((_, var), &usage) in module
+ .global_variables
+ .iter()
+ .zip(&entry_point.function.global_usage)
+ {
+ if usage.is_empty() {
+ continue;
+ }
+ match var.binding {
+ Some(naga::Binding::Resource { group, binding }) => {
+ let result = match group_layouts {
+ IntrospectionBindGroupLayouts::Given(ref layouts) => layouts
+ .get(group as usize)
+ .and_then(|map| map.get(&binding))
+ .ok_or(BindingError::Missing)
+ .and_then(|entry| {
+ if entry.visibility.contains(stage_bit) {
+ Ok(entry)
+ } else {
+ Err(BindingError::Invisible)
+ }
+ })
+ .and_then(|entry| check_binding_use(module, var, entry))
+ .and_then(|allowed_usage| {
+ if allowed_usage.contains(usage) {
+ Ok(())
+ } else {
+ Err(BindingError::WrongUsage(usage))
+ }
+ }),
+ IntrospectionBindGroupLayouts::Derived(ref mut layouts) => layouts
+ .get_mut(group as usize)
+ .ok_or(BindingError::Missing)
+ .and_then(|set| {
+ let ty = derive_binding_type(module, var, usage)?;
+ Ok(match set.entry(binding) {
+ Entry::Occupied(e) if e.get().ty != ty => {
+ return Err(BindingError::InconsistentlyDerivedType)
+ }
+ Entry::Occupied(e) => {
+ e.into_mut().visibility |= stage_bit;
+ }
+ Entry::Vacant(e) => {
+ e.insert(BindGroupLayoutEntry {
+ binding,
+ ty,
+ visibility: stage_bit,
+ count: None,
+ });
+ }
+ })
+ }),
+ };
+ if let Err(error) = result {
+ return Err(StageError::Binding {
+ group,
+ binding,
+ error,
+ });
+ }
+ }
+ Some(naga::Binding::Location(location)) => {
+ let ty = &module.types[var.ty].inner;
+ if usage.contains(naga::GlobalUse::STORE) {
+ outputs.insert(location, MaybeOwned::Borrowed(ty));
+ } else {
+ let result =
+ inputs
+ .get(&location)
+ .ok_or(InputError::Missing)
+ .and_then(|provided| {
+ if is_sub_type(ty, provided) {
+ Ok(())
+ } else {
+ Err(InputError::WrongType)
+ }
+ });
+ if let Err(error) = result {
+ return Err(StageError::Input { location, error });
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ Ok(outputs)
+}
diff --git a/gfx/wgpu/wgpu-types/Cargo.toml b/gfx/wgpu/wgpu-types/Cargo.toml
new file mode 100644
index 0000000000..bc23adb403
--- /dev/null
+++ b/gfx/wgpu/wgpu-types/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "wgpu-types"
+version = "0.6.0"
+authors = ["wgpu developers"]
+edition = "2018"
+description = "WebGPU types"
+homepage = "https://github.com/gfx-rs/wgpu"
+repository = "https://github.com/gfx-rs/wgpu"
+keywords = ["graphics"]
+license = "MPL-2.0"
+
+[lib]
+
+[features]
+trace = ["serde"]
+replay = ["serde"]
+
+[dependencies]
+bitflags = "1.0"
+serde = { version = "1.0", features = ["serde_derive"], optional = true }
diff --git a/gfx/wgpu/wgpu-types/src/lib.rs b/gfx/wgpu/wgpu-types/src/lib.rs
new file mode 100644
index 0000000000..a3a29c8c8c
--- /dev/null
+++ b/gfx/wgpu/wgpu-types/src/lib.rs
@@ -0,0 +1,2000 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// The intra doc links to the wgpu crate in this crate actually succesfully link to the types in the wgpu crate, when built from the wgpu crate.
+// However when building from both the wgpu crate or this crate cargo doc will claim all the links cannot be resolved
+// despite the fact that it works fine when it needs to.
+// So we just disable those warnings.
+#![allow(broken_intra_doc_links)]
+
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Serialize};
+use std::{num::NonZeroU32, ops::Range};
+
+/// Integral type used for buffer offsets.
+pub type BufferAddress = u64;
+/// Integral type used for buffer slice sizes.
+pub type BufferSize = std::num::NonZeroU64;
+/// Integral type used for binding locations in shaders.
+pub type ShaderLocation = u32;
+/// Integral type used for dynamic bind group offsets.
+pub type DynamicOffset = u32;
+
+/// Buffer-Texture copies must have [`bytes_per_row`] aligned to this number.
+///
+/// This doesn't apply to [`Queue::write_texture`].
+///
+/// [`bytes_per_row`]: TextureDataLayout::bytes_per_row
+pub const COPY_BYTES_PER_ROW_ALIGNMENT: u32 = 256;
+/// Bound uniform/storage buffer offsets must be aligned to this number.
+pub const BIND_BUFFER_ALIGNMENT: BufferAddress = 256;
+/// Buffer to buffer copy offsets and sizes must be aligned to this number.
+pub const COPY_BUFFER_ALIGNMENT: BufferAddress = 4;
+/// Vertex buffer strides have to be aligned to this number.
+pub const VERTEX_STRIDE_ALIGNMENT: BufferAddress = 4;
+/// Alignment all push constants need
+pub const PUSH_CONSTANT_ALIGNMENT: u32 = 4;
+
+/// Backends supported by wgpu.
+#[repr(u8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum Backend {
+ Empty = 0,
+ Vulkan = 1,
+ Metal = 2,
+ Dx12 = 3,
+ Dx11 = 4,
+ Gl = 5,
+ BrowserWebGpu = 6,
+}
+
+/// Power Preference when choosing a physical adapter.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum PowerPreference {
+ /// Adapter that uses the least possible power. This is often an integerated GPU.
+ LowPower = 0,
+ /// Adapter that has the highest performance. This is often a discrete GPU.
+ HighPerformance = 1,
+}
+
+impl Default for PowerPreference {
+ fn default() -> Self {
+ Self::LowPower
+ }
+}
+
+bitflags::bitflags! {
+ /// Represents the backends that wgpu will use.
+ #[repr(transparent)]
+ #[cfg_attr(feature = "trace", derive(Serialize))]
+ #[cfg_attr(feature = "replay", derive(Deserialize))]
+ pub struct BackendBit: u32 {
+ /// Supported on Windows, Linux/Android, and macOS/iOS via Vulkan Portability (with the Vulkan feature enabled)
+ const VULKAN = 1 << Backend::Vulkan as u32;
+ /// Currently unsupported
+ const GL = 1 << Backend::Gl as u32;
+ /// Supported on macOS/iOS
+ const METAL = 1 << Backend::Metal as u32;
+ /// Supported on Windows 10
+ const DX12 = 1 << Backend::Dx12 as u32;
+ /// Supported on Windows 7+
+ const DX11 = 1 << Backend::Dx11 as u32;
+ /// Supported when targeting the web through webassembly
+ const BROWSER_WEBGPU = 1 << Backend::BrowserWebGpu as u32;
+ /// All the apis that wgpu offers first tier of support for.
+ ///
+ /// Vulkan + Metal + DX12 + Browser WebGPU
+ const PRIMARY = Self::VULKAN.bits
+ | Self::METAL.bits
+ | Self::DX12.bits
+ | Self::BROWSER_WEBGPU.bits;
+ /// All the apis that wgpu offers second tier of support for. These may
+ /// be unsupported/still experimental.
+ ///
+ /// OpenGL + DX11
+ const SECONDARY = Self::GL.bits | Self::DX11.bits;
+ }
+}
+
+impl From<Backend> for BackendBit {
+ fn from(backend: Backend) -> Self {
+ Self::from_bits(1 << backend as u32).unwrap()
+ }
+}
+
+/// Options for requesting adapter.
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct RequestAdapterOptions<S> {
+ /// Power preference for the adapter.
+ pub power_preference: PowerPreference,
+ /// Surface that is required to be presentable with the requested adapter. This does not
+ /// create the surface, only guarantees that the adapter can present to said surface.
+ pub compatible_surface: Option<S>,
+}
+
+impl<S> Default for RequestAdapterOptions<S> {
+ fn default() -> Self {
+ Self {
+ power_preference: PowerPreference::default(),
+ compatible_surface: None,
+ }
+ }
+}
+
+bitflags::bitflags! {
+ /// Features that are not guaranteed to be supported.
+ ///
+ /// These are either part of the webgpu standard, or are extension features supported by
+ /// wgpu when targeting native.
+ ///
+ /// If you want to use a feature, you need to first verify that the adapter supports
+ /// the feature. If the adapter does not support the feature, requesting a device with it enabled
+ /// will panic.
+ #[repr(transparent)]
+ #[derive(Default)]
+ #[cfg_attr(feature = "trace", derive(Serialize))]
+ #[cfg_attr(feature = "replay", derive(Deserialize))]
+ pub struct Features: u64 {
+ /// By default, polygon depth is clipped to 0-1 range. Anything outside of that range
+ /// is rejected, and respective fragments are not touched.
+ ///
+ /// With this extension, we can force clamping of the polygon depth to 0-1. That allows
+ /// shadow map occluders to be rendered into a tighter depth range.
+ ///
+ /// Supported platforms:
+ /// - desktops
+ /// - some mobile chips
+ ///
+ /// This is a web and native feature.
+ const DEPTH_CLAMPING = 0x0000_0000_0000_0001;
+ /// Enables BCn family of compressed textures. All BCn textures use 4x4 pixel blocks
+ /// with 8 or 16 bytes per block.
+ ///
+ /// Compressed textures sacrifice some quality in exchange for signifigantly reduced
+ /// bandwidth usage.
+ ///
+ /// Supported Platforms:
+ /// - desktops
+ ///
+ /// This is a web and native feature.
+ const TEXTURE_COMPRESSION_BC = 0x0000_0000_0000_0002;
+ /// Webgpu only allows the MAP_READ and MAP_WRITE buffer usage to be matched with
+ /// COPY_DST and COPY_SRC respectively. This removes this requirement.
+ ///
+ /// This is only beneficial on systems that share memory between CPU and GPU. If enabled
+ /// on a system that doesn't, this can severely hinder performance. Only use if you understand
+ /// the consequences.
+ ///
+ /// Supported platforms:
+ /// - All
+ ///
+ /// This is a native only feature.
+ const MAPPABLE_PRIMARY_BUFFERS = 0x0000_0000_0001_0000;
+ /// Allows the user to create uniform arrays of sampled textures in shaders:
+ ///
+ /// eg. `uniform texture2D textures[10]`.
+ ///
+ /// This capability allows them to exist and to be indexed by compile time constant
+ /// values.
+ ///
+ /// Supported platforms:
+ /// - DX12
+ /// - Metal (with MSL 2.0+ on macOS 10.13+)
+ /// - Vulkan
+ ///
+ /// This is a native only feature.
+ const SAMPLED_TEXTURE_BINDING_ARRAY = 0x0000_0000_0002_0000;
+ /// Allows shaders to index sampled texture arrays with dynamically uniform values:
+ ///
+ /// eg. `texture_array[uniform_value]`
+ ///
+ /// This capability means the hardware will also support SAMPLED_TEXTURE_BINDING_ARRAY.
+ ///
+ /// Supported platforms:
+ /// - DX12
+ /// - Metal (with MSL 2.0+ on macOS 10.13+)
+ /// - Vulkan's shaderSampledImageArrayDynamicIndexing feature
+ ///
+ /// This is a native only feature.
+ const SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING = 0x0000_0000_0004_0000;
+ /// Allows shaders to index sampled texture arrays with dynamically non-uniform values:
+ ///
+ /// eg. `texture_array[vertex_data]`
+ ///
+ /// In order to use this capability, the corresponding GLSL extension must be enabled like so:
+ ///
+ /// `#extension GL_EXT_nonuniform_qualifier : require`
+ ///
+ /// and then used either as `nonuniformEXT` qualifier in variable declaration:
+ ///
+ /// eg. `layout(location = 0) nonuniformEXT flat in int vertex_data;`
+ ///
+ /// or as `nonuniformEXT` constructor:
+ ///
+ /// eg. `texture_array[nonuniformEXT(vertex_data)]`
+ ///
+ /// HLSL does not need any extension.
+ ///
+ /// This capability means the hardware will also support SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING
+ /// and SAMPLED_TEXTURE_BINDING_ARRAY.
+ ///
+ /// Supported platforms:
+ /// - DX12
+ /// - Metal (with MSL 2.0+ on macOS 10.13+)
+ /// - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s shaderSampledImageArrayNonUniformIndexing feature)
+ ///
+ /// This is a native only feature.
+ const SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING = 0x0000_0000_0008_0000;
+ /// Allows the user to create unsized uniform arrays of bindings:
+ ///
+ /// eg. `uniform texture2D textures[]`.
+ ///
+ /// If this capability is supported, SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING is very likely
+ /// to also be supported
+ ///
+ /// Supported platforms:
+ /// - DX12
+ /// - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s runtimeDescriptorArray feature
+ ///
+ /// This is a native only feature.
+ const UNSIZED_BINDING_ARRAY = 0x0000_0000_0010_0000;
+ /// Allows the user to call [`RenderPass::multi_draw_indirect`] and [`RenderPass::multi_draw_indexed_indirect`].
+ ///
+ /// Allows multiple indirect calls to be dispatched from a single buffer.
+ ///
+ /// Supported platforms:
+ /// - DX12
+ /// - Metal
+ /// - Vulkan
+ ///
+ /// This is a native only feature.
+ const MULTI_DRAW_INDIRECT = 0x0000_0000_0020_0000;
+ /// Allows the user to call [`RenderPass::multi_draw_indirect_count`] and [`RenderPass::multi_draw_indexed_indirect_count`].
+ ///
+ /// This allows the use of a buffer containing the actual number of draw calls.
+ ///
+ /// Supported platforms:
+ /// - DX12
+ /// - Vulkan 1.2+ (or VK_KHR_draw_indirect_count)
+ ///
+ /// This is a native only feature.
+ const MULTI_DRAW_INDIRECT_COUNT = 0x0000_0000_0040_0000;
+ /// Allows the use of push constants: small, fast bits of memory that can be updated
+ /// inside a [`RenderPass`].
+ ///
+ /// Allows the user to call [`RenderPass::set_push_constants`], provide a non-empty array
+ /// to [`PipelineLayoutDescriptor`], and provide a non-zero limit to [`Limits::max_push_constant_size`].
+ ///
+ /// A block of push constants can be declared with `layout(push_constant) uniform Name {..}` in shaders.
+ ///
+ /// Supported platforms:
+ /// - DX12
+ /// - Vulkan
+ /// - Metal
+ /// - DX11 (emulated with uniforms)
+ /// - OpenGL (emulated with uniforms)
+ ///
+ /// This is a native only feature.
+ const PUSH_CONSTANTS = 0x0000_0000_0080_0000;
+ /// Allows the use of [`AddressMode::ClampToBorder`].
+ ///
+ /// Supported platforms:
+ /// - DX12
+ /// - Vulkan
+ /// - Metal (macOS 10.12+ only)
+ /// - DX11
+ /// - OpenGL
+ ///
+ /// This is a web and native feature.
+ const ADDRESS_MODE_CLAMP_TO_BORDER = 0x0000_0000_0100_0000;
+ /// Allows the user to set a non-fill polygon mode in [`RasterizationStateDescriptor::polygon_mode`]
+ ///
+ /// This allows drawing polygons/triangles as lines (wireframe) or points instead of filled
+ ///
+ /// Supported platforms:
+ /// - DX12
+ /// - Vulkan
+ ///
+ /// This is a native only feature.
+ const NON_FILL_POLYGON_MODE = 0x0000_0000_0200_0000;
+ /// Features which are part of the upstream WebGPU standard.
+ const ALL_WEBGPU = 0x0000_0000_0000_FFFF;
+ /// Features that are only available when targeting native (not web).
+ const ALL_NATIVE = 0xFFFF_FFFF_FFFF_0000;
+ }
+}
+
+/// Represents the sets of limits an adapter/device supports.
+///
+/// Limits "better" than the default must be supported by the adapter and requested when requesting
+/// a device. If limits "better" than the adapter supports are requested, requesting a device will panic.
+/// Once a device is requested, you may only use resources up to the limits requested _even_ if the
+/// adapter supports "better" limits.
+///
+/// Requesting limits that are "better" than you need may cause performance to decrease because the
+/// implementation needs to support more than is needed. You should ideally only request exactly what
+/// you need.
+///
+/// See also: https://gpuweb.github.io/gpuweb/#dictdef-gpulimits
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct Limits {
+ /// Amount of bind groups that can be attached to a pipeline at the same time. Defaults to 4. Higher is "better".
+ pub max_bind_groups: u32,
+ /// Amount of uniform buffer bindings that can be dynamic in a single pipeline. Defaults to 8. Higher is "better".
+ pub max_dynamic_uniform_buffers_per_pipeline_layout: u32,
+ /// Amount of storage buffer bindings that can be dynamic in a single pipeline. Defaults to 4. Higher is "better".
+ pub max_dynamic_storage_buffers_per_pipeline_layout: u32,
+ /// Amount of sampled textures visible in a single shader stage. Defaults to 16. Higher is "better".
+ pub max_sampled_textures_per_shader_stage: u32,
+ /// Amount of samplers visible in a single shader stage. Defaults to 16. Higher is "better".
+ pub max_samplers_per_shader_stage: u32,
+ /// Amount of storage buffers visible in a single shader stage. Defaults to 4. Higher is "better".
+ pub max_storage_buffers_per_shader_stage: u32,
+ /// Amount of storage textures visible in a single shader stage. Defaults to 4. Higher is "better".
+ pub max_storage_textures_per_shader_stage: u32,
+ /// Amount of uniform buffers visible in a single shader stage. Defaults to 12. Higher is "better".
+ pub max_uniform_buffers_per_shader_stage: u32,
+ /// Maximum size in bytes of a binding to a uniform buffer. Defaults to 16384. Higher is "better".
+ pub max_uniform_buffer_binding_size: u32,
+ /// Amount of storage available for push constants in bytes. Defaults to 0. Higher is "better".
+ /// Requesting more than 0 during device creation requires [`Features::PUSH_CONSTANTS`] to be enabled.
+ ///
+ /// Expect the size to be:
+ /// - Vulkan: 128-256 bytes
+ /// - DX12: 256 bytes
+ /// - Metal: 4096 bytes
+ /// - DX11 & OpenGL don't natively support push constants, and are emulated with uniforms,
+ /// so this number is less useful.
+ pub max_push_constant_size: u32,
+}
+
+impl Default for Limits {
+ fn default() -> Self {
+ Self {
+ max_bind_groups: 4,
+ max_dynamic_uniform_buffers_per_pipeline_layout: 8,
+ max_dynamic_storage_buffers_per_pipeline_layout: 4,
+ max_sampled_textures_per_shader_stage: 16,
+ max_samplers_per_shader_stage: 16,
+ max_storage_buffers_per_shader_stage: 4,
+ max_storage_textures_per_shader_stage: 4,
+ max_uniform_buffers_per_shader_stage: 12,
+ max_uniform_buffer_binding_size: 16384,
+ max_push_constant_size: 0,
+ }
+ }
+}
+
+/// Describes a [`Device`].
+#[repr(C)]
+#[derive(Clone, Debug, Default)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct DeviceDescriptor<L> {
+ /// Debug label for the device.
+ pub label: L,
+ /// Features that the device should support. If any feature is not supported by
+ /// the adapter, creating a device will panic.
+ pub features: Features,
+ /// Limits that the device should support. If any limit is "better" than the limit exposed by
+ /// the adapter, creating a device will panic.
+ pub limits: Limits,
+ /// Switch shader validation on/off. This is a temporary field
+ /// that will be removed once our validation logic is complete.
+ pub shader_validation: bool,
+}
+
+impl<L> DeviceDescriptor<L> {
+ pub fn map_label<K>(&self, fun: impl FnOnce(&L) -> K) -> DeviceDescriptor<K> {
+ DeviceDescriptor {
+ label: fun(&self.label),
+ features: self.features,
+ limits: self.limits.clone(),
+ shader_validation: self.shader_validation,
+ }
+ }
+}
+
+bitflags::bitflags! {
+ /// Describes the shader stages that a binding will be visible from.
+ ///
+ /// These can be combined so something that is visible from both vertex and fragment shaders can be defined as:
+ ///
+ /// `ShaderStage::VERTEX | ShaderStage::FRAGMENT`
+ #[repr(transparent)]
+ #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
+ pub struct ShaderStage: u32 {
+ /// Binding is not visible from any shader stage.
+ const NONE = 0;
+ /// Binding is visible from the vertex shader of a render pipeline.
+ const VERTEX = 1;
+ /// Binding is visible from the fragment shader of a render pipeline.
+ const FRAGMENT = 2;
+ /// Binding is visible from the compute shader of a compute pipeline.
+ const COMPUTE = 4;
+ }
+}
+
+/// Dimensions of a particular texture view.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum TextureViewDimension {
+ /// A one dimensional texture. `texture1D` in glsl shaders.
+ D1,
+ /// A two dimensional texture. `texture2D` in glsl shaders.
+ D2,
+ /// A two dimensional array texture. `texture2DArray` in glsl shaders.
+ D2Array,
+ /// A cubemap texture. `textureCube` in glsl shaders.
+ Cube,
+ /// A cubemap array texture. `textureCubeArray` in glsl shaders.
+ CubeArray,
+ /// A three dimensional texture. `texture3D` in glsl shaders.
+ D3,
+}
+
+impl Default for TextureViewDimension {
+ fn default() -> Self {
+ Self::D2
+ }
+}
+
+impl TextureViewDimension {
+ /// Get the texture dimension required fo this texture view dimension.
+ pub fn compatible_texture_dimension(self) -> TextureDimension {
+ match self {
+ Self::D1 => TextureDimension::D1,
+ Self::D2 | Self::D2Array | Self::Cube | Self::CubeArray => TextureDimension::D2,
+ Self::D3 => TextureDimension::D3,
+ }
+ }
+}
+
+/// Alpha blend factor.
+///
+/// Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum BlendFactor {
+ Zero = 0,
+ One = 1,
+ SrcColor = 2,
+ OneMinusSrcColor = 3,
+ SrcAlpha = 4,
+ OneMinusSrcAlpha = 5,
+ DstColor = 6,
+ OneMinusDstColor = 7,
+ DstAlpha = 8,
+ OneMinusDstAlpha = 9,
+ SrcAlphaSaturated = 10,
+ BlendColor = 11,
+ OneMinusBlendColor = 12,
+}
+
+/// Alpha blend operation.
+///
+/// Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum BlendOperation {
+ Add = 0,
+ Subtract = 1,
+ ReverseSubtract = 2,
+ Min = 3,
+ Max = 4,
+}
+
+impl Default for BlendOperation {
+ fn default() -> Self {
+ Self::Add
+ }
+}
+
+/// Describes the blend state of a pipeline.
+///
+/// Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information.
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct BlendDescriptor {
+ pub src_factor: BlendFactor,
+ pub dst_factor: BlendFactor,
+ pub operation: BlendOperation,
+}
+
+impl BlendDescriptor {
+ pub const REPLACE: Self = BlendDescriptor {
+ src_factor: BlendFactor::One,
+ dst_factor: BlendFactor::Zero,
+ operation: BlendOperation::Add,
+ };
+
+ pub fn uses_color(&self) -> bool {
+ match (self.src_factor, self.dst_factor) {
+ (BlendFactor::BlendColor, _)
+ | (BlendFactor::OneMinusBlendColor, _)
+ | (_, BlendFactor::BlendColor)
+ | (_, BlendFactor::OneMinusBlendColor) => true,
+ (_, _) => false,
+ }
+ }
+}
+
+impl Default for BlendDescriptor {
+ fn default() -> Self {
+ Self::REPLACE
+ }
+}
+
+/// Describes the color state of a render pipeline.
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct ColorStateDescriptor {
+ /// The [`TextureFormat`] of the image that this pipeline will render to. Must match the the format
+ /// of the corresponding color attachment in [`CommandEncoder::begin_render_pass`].
+ pub format: TextureFormat,
+ /// The alpha blending that is used for this pipeline.
+ pub alpha_blend: BlendDescriptor,
+ /// The color blending that is used for this pipeline.
+ pub color_blend: BlendDescriptor,
+ /// Mask which enables/disables writes to different color/alpha channel.
+ pub write_mask: ColorWrite,
+}
+
+impl From<TextureFormat> for ColorStateDescriptor {
+ fn from(format: TextureFormat) -> Self {
+ Self {
+ format,
+ alpha_blend: BlendDescriptor::REPLACE,
+ color_blend: BlendDescriptor::REPLACE,
+ write_mask: ColorWrite::ALL,
+ }
+ }
+}
+
+/// Primitive type the input mesh is composed of.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum PrimitiveTopology {
+ /// Vertex data is a list of points. Each vertex is a new point.
+ PointList = 0,
+ /// Vertex data is a list of lines. Each pair of vertices composes a new line.
+ ///
+ /// Vertices `0 1 2 3` create two lines `0 1` and `2 3`
+ LineList = 1,
+ /// Vertex data is a strip of lines. Each set of two adjacent vertices form a line.
+ ///
+ /// Vertices `0 1 2 3` create three lines `0 1`, `1 2`, and `2 3`.
+ LineStrip = 2,
+ /// Vertex data is a list of triangles. Each set of 3 vertices composes a new triangle.
+ ///
+ /// Vertices `0 1 2 3 4 5` create two triangles `0 1 2` and `3 4 5`
+ TriangleList = 3,
+ /// Vertex data is a triangle strip. Each set of three adjacent vertices form a triangle.
+ ///
+ /// Vertices `0 1 2 3 4 5` creates four triangles `0 1 2`, `2 1 3`, `3 2 4`, and `4 3 5`
+ TriangleStrip = 4,
+}
+
+/// Winding order which classifies the "front" face.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum FrontFace {
+ /// Triangles with vertices in counter clockwise order are considered the front face.
+ ///
+ /// This is the default with right handed coordinate spaces.
+ Ccw = 0,
+ /// Triangles with vertices in clockwise order are considered the front face.
+ ///
+ /// This is the default with left handed coordinate spaces.
+ Cw = 1,
+}
+
+impl Default for FrontFace {
+ fn default() -> Self {
+ Self::Ccw
+ }
+}
+
+/// Type of faces to be culled.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum CullMode {
+ /// No faces should be culled
+ None = 0,
+ /// Front faces should be culled
+ Front = 1,
+ /// Back faces should be culled
+ Back = 2,
+}
+
+impl Default for CullMode {
+ fn default() -> Self {
+ Self::None
+ }
+}
+
+/// Type of drawing mode for polygons
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum PolygonMode {
+ /// Polygons are filled
+ Fill = 0,
+ /// Polygons are draw as line segments
+ Line = 1,
+ /// Polygons are draw as points
+ Point = 2,
+}
+
+impl Default for PolygonMode {
+ fn default() -> Self {
+ Self::Fill
+ }
+}
+
+/// Describes the state of the rasterizer in a render pipeline.
+#[repr(C)]
+#[derive(Clone, Debug, Default, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct RasterizationStateDescriptor {
+ pub front_face: FrontFace,
+ pub cull_mode: CullMode,
+ /// Controls the way each polygon is rasterized. Can be either `Fill` (default), `Line` or `Point`
+ ///
+ /// Setting this to something other than `Fill` requires `Features::NON_FILL_POLYGON_MODE` to be enabled.
+ pub polygon_mode: PolygonMode,
+ /// If enabled polygon depth is clamped to 0-1 range instead of being clipped.
+ ///
+ /// Requires `Features::DEPTH_CLAMPING` enabled.
+ pub clamp_depth: bool,
+ pub depth_bias: i32,
+ pub depth_bias_slope_scale: f32,
+ pub depth_bias_clamp: f32,
+}
+
+/// Underlying texture data format.
+///
+/// If there is a conversion in the format (such as srgb -> linear), The conversion listed is for
+/// loading from texture in a shader. When writing to the texture, the opposite conversion takes place.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
+pub enum TextureFormat {
+ // Normal 8 bit formats
+ /// Red channel only. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
+ R8Unorm = 0,
+ /// Red channel only. 8 bit integer per channel. [-127, 127] converted to/from float [-1, 1] in shader.
+ R8Snorm = 1,
+ /// Red channel only. 8 bit integer per channel. Unsigned in shader.
+ R8Uint = 2,
+ /// Red channel only. 8 bit integer per channel. Signed in shader.
+ R8Sint = 3,
+
+ // Normal 16 bit formats
+ /// Red channel only. 16 bit integer per channel. Unsigned in shader.
+ R16Uint = 4,
+ /// Red channel only. 16 bit integer per channel. Signed in shader.
+ R16Sint = 5,
+ /// Red channel only. 16 bit float per channel. Float in shader.
+ R16Float = 6,
+ /// Red and green channels. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
+ Rg8Unorm = 7,
+ /// Red and green channels. 8 bit integer per channel. [-127, 127] converted to/from float [-1, 1] in shader.
+ Rg8Snorm = 8,
+ /// Red and green channels. 8 bit integer per channel. Unsigned in shader.
+ Rg8Uint = 9,
+ /// Red and green channel s. 8 bit integer per channel. Signed in shader.
+ Rg8Sint = 10,
+
+ // Normal 32 bit formats
+ /// Red channel only. 32 bit integer per channel. Unsigned in shader.
+ R32Uint = 11,
+ /// Red channel only. 32 bit integer per channel. Signed in shader.
+ R32Sint = 12,
+ /// Red channel only. 32 bit float per channel. Float in shader.
+ R32Float = 13,
+ /// Red and green channels. 16 bit integer per channel. Unsigned in shader.
+ Rg16Uint = 14,
+ /// Red and green channels. 16 bit integer per channel. Signed in shader.
+ Rg16Sint = 15,
+ /// Red and green channels. 16 bit float per channel. Float in shader.
+ Rg16Float = 16,
+ /// Red, green, blue, and alpha channels. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
+ Rgba8Unorm = 17,
+ /// Red, green, blue, and alpha channels. 8 bit integer per channel. Srgb-color [0, 255] converted to/from linear-color float [0, 1] in shader.
+ Rgba8UnormSrgb = 18,
+ /// Red, green, blue, and alpha channels. 8 bit integer per channel. [-127, 127] converted to/from float [-1, 1] in shader.
+ Rgba8Snorm = 19,
+ /// Red, green, blue, and alpha channels. 8 bit integer per channel. Unsigned in shader.
+ Rgba8Uint = 20,
+ /// Red, green, blue, and alpha channels. 8 bit integer per channel. Signed in shader.
+ Rgba8Sint = 21,
+ /// Blue, green, red, and alpha channels. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
+ Bgra8Unorm = 22,
+ /// Blue, green, red, and alpha channels. 8 bit integer per channel. Srgb-color [0, 255] converted to/from linear-color float [0, 1] in shader.
+ Bgra8UnormSrgb = 23,
+
+ // Packed 32 bit formats
+ /// Red, green, blue, and alpha channels. 10 bit integer for RGB channels, 2 bit integer for alpha channel. [0, 1023] ([0, 3] for alpha) converted to/from float [0, 1] in shader.
+ Rgb10a2Unorm = 24,
+ /// Red, green, and blue channels. 11 bit float with no sign bit for RG channels. 10 bit float with no sign bit for blue channel. Float in shader.
+ Rg11b10Float = 25,
+
+ // Normal 64 bit formats
+ /// Red and green channels. 32 bit integer per channel. Unsigned in shader.
+ Rg32Uint = 26,
+ /// Red and green channels. 32 bit integer per channel. Signed in shader.
+ Rg32Sint = 27,
+ /// Red and green channels. 32 bit float per channel. Float in shader.
+ Rg32Float = 28,
+ /// Red, green, blue, and alpha channels. 16 bit integer per channel. Unsigned in shader.
+ Rgba16Uint = 29,
+ /// Red, green, blue, and alpha channels. 16 bit integer per channel. Signed in shader.
+ Rgba16Sint = 30,
+ /// Red, green, blue, and alpha channels. 16 bit float per channel. Float in shader.
+ Rgba16Float = 31,
+
+ // Normal 128 bit formats
+ /// Red, green, blue, and alpha channels. 32 bit integer per channel. Unsigned in shader.
+ Rgba32Uint = 32,
+ /// Red, green, blue, and alpha channels. 32 bit integer per channel. Signed in shader.
+ Rgba32Sint = 33,
+ /// Red, green, blue, and alpha channels. 32 bit float per channel. Float in shader.
+ Rgba32Float = 34,
+
+ // Depth and stencil formats
+ /// Special depth format with 32 bit floating point depth.
+ Depth32Float = 35,
+ /// Special depth format with at least 24 bit integer depth.
+ Depth24Plus = 36,
+ /// Special depth/stencil format with at least 24 bit integer depth and 8 bits integer stencil.
+ Depth24PlusStencil8 = 37,
+
+ // Compressed textures usable with `TEXTURE_COMPRESSION_BC` feature.
+ /// 4x4 block compressed texture. 8 bytes per block (4 bit/px). 4 color + alpha pallet. 5 bit R + 6 bit G + 5 bit B + 1 bit alpha.
+ /// [0, 64] ([0, 1] for alpha) converted to/from float [0, 1] in shader.
+ ///
+ /// Also known as DXT1.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc1RgbaUnorm = 38,
+ /// 4x4 block compressed texture. 8 bytes per block (4 bit/px). 4 color + alpha pallet. 5 bit R + 6 bit G + 5 bit B + 1 bit alpha.
+ /// Srgb-color [0, 64] ([0, 16] for alpha) converted to/from linear-color float [0, 1] in shader.
+ ///
+ /// Also known as DXT1.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc1RgbaUnormSrgb = 39,
+ /// 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet. 5 bit R + 6 bit G + 5 bit B + 4 bit alpha.
+ /// [0, 64] ([0, 16] for alpha) converted to/from float [0, 1] in shader.
+ ///
+ /// Also known as DXT3.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc2RgbaUnorm = 40,
+ /// 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet. 5 bit R + 6 bit G + 5 bit B + 4 bit alpha.
+ /// Srgb-color [0, 64] ([0, 256] for alpha) converted to/from linear-color float [0, 1] in shader.
+ ///
+ /// Also known as DXT3.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc2RgbaUnormSrgb = 41,
+ /// 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet + 8 alpha pallet. 5 bit R + 6 bit G + 5 bit B + 8 bit alpha.
+ /// [0, 64] ([0, 256] for alpha) converted to/from float [0, 1] in shader.
+ ///
+ /// Also known as DXT5.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc3RgbaUnorm = 42,
+ /// 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet + 8 alpha pallet. 5 bit R + 6 bit G + 5 bit B + 8 bit alpha.
+ /// Srgb-color [0, 64] ([0, 256] for alpha) converted to/from linear-color float [0, 1] in shader.
+ ///
+ /// Also known as DXT5.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc3RgbaUnormSrgb = 43,
+ /// 4x4 block compressed texture. 8 bytes per block (4 bit/px). 8 color pallet. 8 bit R.
+ /// [0, 256] converted to/from float [0, 1] in shader.
+ ///
+ /// Also known as RGTC1.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc4RUnorm = 44,
+ /// 4x4 block compressed texture. 8 bytes per block (4 bit/px). 8 color pallet. 8 bit R.
+ /// [-127, 127] converted to/from float [-1, 1] in shader.
+ ///
+ /// Also known as RGTC1.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc4RSnorm = 45,
+ /// 4x4 block compressed texture. 16 bytes per block (16 bit/px). 8 color red pallet + 8 color green pallet. 8 bit RG.
+ /// [0, 256] converted to/from float [0, 1] in shader.
+ ///
+ /// Also known as RGTC2.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc5RgUnorm = 46,
+ /// 4x4 block compressed texture. 16 bytes per block (16 bit/px). 8 color red pallet + 8 color green pallet. 8 bit RG.
+ /// [-127, 127] converted to/from float [-1, 1] in shader.
+ ///
+ /// Also known as RGTC2.
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc5RgSnorm = 47,
+ /// 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 16 bit unsigned float RGB. Float in shader.
+ ///
+ /// Also known as BPTC (float).
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc6hRgbUfloat = 48,
+ /// 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 16 bit signed float RGB. Float in shader.
+ ///
+ /// Also known as BPTC (float).
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc6hRgbSfloat = 49,
+ /// 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 8 bit integer RGBA.
+ /// [0, 256] converted to/from float [0, 1] in shader.
+ ///
+ /// Also known as BPTC (unorm).
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc7RgbaUnorm = 50,
+ /// 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 8 bit integer RGBA.
+ /// Srgb-color [0, 255] converted to/from linear-color float [0, 1] in shader.
+ ///
+ /// Also known as BPTC (unorm).
+ ///
+ /// [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ Bc7RgbaUnormSrgb = 51,
+}
+
+bitflags::bitflags! {
+ /// Color write mask. Disabled color channels will not be written to.
+ #[repr(transparent)]
+ #[cfg_attr(feature = "trace", derive(Serialize))]
+ #[cfg_attr(feature = "replay", derive(Deserialize))]
+ pub struct ColorWrite: u32 {
+ /// Enable red channel writes
+ const RED = 1;
+ /// Enable green channel writes
+ const GREEN = 2;
+ /// Enable blue channel writes
+ const BLUE = 4;
+ /// Enable alpha channel writes
+ const ALPHA = 8;
+ /// Enable red, green, and blue channel writes
+ const COLOR = 7;
+ /// Enable writes to all channels.
+ const ALL = 15;
+ }
+}
+
+impl Default for ColorWrite {
+ fn default() -> Self {
+ Self::ALL
+ }
+}
+
+#[repr(C)]
+#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct StencilStateDescriptor {
+ /// Front face mode.
+ pub front: StencilStateFaceDescriptor,
+ /// Back face mode.
+ pub back: StencilStateFaceDescriptor,
+ /// Stencil values are AND'd with this mask when reading and writing from the stencil buffer. Only low 8 bits are used.
+ pub read_mask: u32,
+ /// Stencil values are AND'd with this mask when writing to the stencil buffer. Only low 8 bits are used.
+ pub write_mask: u32,
+}
+
+impl StencilStateDescriptor {
+ pub fn is_enabled(&self) -> bool {
+ (self.front != StencilStateFaceDescriptor::IGNORE
+ || self.back != StencilStateFaceDescriptor::IGNORE)
+ && (self.read_mask != 0 || self.write_mask != 0)
+ }
+ pub fn is_read_only(&self) -> bool {
+ self.write_mask == 0
+ }
+ pub fn needs_ref_value(&self) -> bool {
+ self.front.compare.needs_ref_value() || self.back.compare.needs_ref_value()
+ }
+}
+
+/// Describes the depth/stencil state in a render pipeline.
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct DepthStencilStateDescriptor {
+ /// Format of the depth/stencil buffer, must be special depth format. Must match the the format
+ /// of the depth/stencil attachment in [`CommandEncoder::begin_render_pass`].
+ pub format: TextureFormat,
+ /// If disabled, depth will not be written to.
+ pub depth_write_enabled: bool,
+ /// Comparison function used to compare depth values in the depth test.
+ pub depth_compare: CompareFunction,
+ pub stencil: StencilStateDescriptor,
+}
+
+impl DepthStencilStateDescriptor {
+ pub fn is_depth_enabled(&self) -> bool {
+ self.depth_compare != CompareFunction::Always || self.depth_write_enabled
+ }
+ pub fn is_read_only(&self) -> bool {
+ !self.depth_write_enabled && self.stencil.is_read_only()
+ }
+}
+
+/// Format of indices used with pipeline.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum IndexFormat {
+ /// Indices are 16 bit unsigned integers.
+ Uint16 = 0,
+ /// Indices are 32 bit unsigned integers.
+ Uint32 = 1,
+}
+
+impl Default for IndexFormat {
+ fn default() -> Self {
+ Self::Uint32
+ }
+}
+
+/// Operation to perform on the stencil value.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum StencilOperation {
+ /// Keep stencil value unchanged.
+ Keep = 0,
+ /// Set stencil value to zero.
+ Zero = 1,
+ /// Replace stencil value with value provided in most recent call to [`RenderPass::set_stencil_reference`].
+ Replace = 2,
+ /// Bitwise inverts stencil value.
+ Invert = 3,
+ /// Increments stencil value by one, clamping on overflow.
+ IncrementClamp = 4,
+ /// Decrements stencil value by one, clamping on underflow.
+ DecrementClamp = 5,
+ /// Increments stencil value by one, wrapping on overflow.
+ IncrementWrap = 6,
+ /// Decrements stencil value by one, wrapping on underflow.
+ DecrementWrap = 7,
+}
+
+impl Default for StencilOperation {
+ fn default() -> Self {
+ Self::Keep
+ }
+}
+
+/// Describes stencil state in a render pipeline.
+///
+/// If you are not using stencil state, set this to [`StencilStateFaceDescriptor::IGNORE`].
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct StencilStateFaceDescriptor {
+ /// Comparison function that determines if the fail_op or pass_op is used on the stencil buffer.
+ pub compare: CompareFunction,
+ /// Operation that is preformed when stencil test fails.
+ pub fail_op: StencilOperation,
+ /// Operation that is performed when depth test fails but stencil test succeeds.
+ pub depth_fail_op: StencilOperation,
+ /// Operation that is performed when stencil test success.
+ pub pass_op: StencilOperation,
+}
+
+impl StencilStateFaceDescriptor {
+ pub const IGNORE: Self = StencilStateFaceDescriptor {
+ compare: CompareFunction::Always,
+ fail_op: StencilOperation::Keep,
+ depth_fail_op: StencilOperation::Keep,
+ pass_op: StencilOperation::Keep,
+ };
+}
+
+impl Default for StencilStateFaceDescriptor {
+ fn default() -> Self {
+ Self::IGNORE
+ }
+}
+
+/// Comparison function used for depth and stencil operations.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum CompareFunction {
+ /// Function never passes
+ Never = 1,
+ /// Function passes if new value less than existing value
+ Less = 2,
+ /// Function passes if new value is equal to existing value
+ Equal = 3,
+ /// Function passes if new value is less than or equal to existing value
+ LessEqual = 4,
+ /// Function passes if new value is greater than existing value
+ Greater = 5,
+ /// Function passes if new value is not equal to existing value
+ NotEqual = 6,
+ /// Function passes if new value is greater than or equal to existing value
+ GreaterEqual = 7,
+ /// Function always passes
+ Always = 8,
+}
+
+impl CompareFunction {
+ pub fn needs_ref_value(self) -> bool {
+ match self {
+ Self::Never | Self::Always => false,
+ _ => true,
+ }
+ }
+}
+
+/// Rate that determines when vertex data is advanced.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum InputStepMode {
+ /// Input data is advanced every vertex. This is the standard value for vertex data.
+ Vertex = 0,
+ /// Input data is advanced every instance.
+ Instance = 1,
+}
+
+/// Vertex inputs (attributes) to shaders.
+///
+/// Arrays of these can be made with the [`vertex_attr_array`] macro. Vertex attributes are assumed to be tightly packed.
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct VertexAttributeDescriptor {
+ /// Byte offset of the start of the input
+ pub offset: BufferAddress,
+ /// Format of the input
+ pub format: VertexFormat,
+ /// Location for this input. Must match the location in the shader.
+ pub shader_location: ShaderLocation,
+}
+
+/// Vertex Format for a Vertex Attribute (input).
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum VertexFormat {
+ /// Two unsigned bytes (u8). `uvec2` in shaders.
+ Uchar2 = 0,
+ /// Four unsigned bytes (u8). `uvec4` in shaders.
+ Uchar4 = 1,
+ /// Two signed bytes (i8). `ivec2` in shaders.
+ Char2 = 2,
+ /// Four signed bytes (i8). `ivec4` in shaders.
+ Char4 = 3,
+ /// Two unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec2` in shaders.
+ Uchar2Norm = 4,
+ /// Four unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec4` in shaders.
+ Uchar4Norm = 5,
+ /// Two signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec2` in shaders.
+ Char2Norm = 6,
+ /// Four signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec4` in shaders.
+ Char4Norm = 7,
+ /// Two unsigned shorts (u16). `uvec2` in shaders.
+ Ushort2 = 8,
+ /// Four unsigned shorts (u16). `uvec4` in shaders.
+ Ushort4 = 9,
+ /// Two signed shorts (i16). `ivec2` in shaders.
+ Short2 = 10,
+ /// Four signed shorts (i16). `ivec4` in shaders.
+ Short4 = 11,
+ /// Two unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec2` in shaders.
+ Ushort2Norm = 12,
+ /// Four unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec4` in shaders.
+ Ushort4Norm = 13,
+ /// Two signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec2` in shaders.
+ Short2Norm = 14,
+ /// Four signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec4` in shaders.
+ Short4Norm = 15,
+ /// Two half-precision floats (no Rust equiv). `vec2` in shaders.
+ Half2 = 16,
+ /// Four half-precision floats (no Rust equiv). `vec4` in shaders.
+ Half4 = 17,
+ /// One single-precision float (f32). `float` in shaders.
+ Float = 18,
+ /// Two single-precision floats (f32). `vec2` in shaders.
+ Float2 = 19,
+ /// Three single-precision floats (f32). `vec3` in shaders.
+ Float3 = 20,
+ /// Four single-precision floats (f32). `vec4` in shaders.
+ Float4 = 21,
+ /// One unsigned int (u32). `uint` in shaders.
+ Uint = 22,
+ /// Two unsigned ints (u32). `uvec2` in shaders.
+ Uint2 = 23,
+ /// Three unsigned ints (u32). `uvec3` in shaders.
+ Uint3 = 24,
+ /// Four unsigned ints (u32). `uvec4` in shaders.
+ Uint4 = 25,
+ /// One signed int (i32). `int` in shaders.
+ Int = 26,
+ /// Two signed ints (i32). `ivec2` in shaders.
+ Int2 = 27,
+ /// Three signed ints (i32). `ivec3` in shaders.
+ Int3 = 28,
+ /// Four signed ints (i32). `ivec4` in shaders.
+ Int4 = 29,
+}
+
+impl VertexFormat {
+ pub const fn size(&self) -> u64 {
+ match self {
+ Self::Uchar2 | Self::Char2 | Self::Uchar2Norm | Self::Char2Norm => 2,
+ Self::Uchar4
+ | Self::Char4
+ | Self::Uchar4Norm
+ | Self::Char4Norm
+ | Self::Ushort2
+ | Self::Short2
+ | Self::Ushort2Norm
+ | Self::Short2Norm
+ | Self::Half2
+ | Self::Float
+ | Self::Uint
+ | Self::Int => 4,
+ Self::Ushort4
+ | Self::Short4
+ | Self::Ushort4Norm
+ | Self::Short4Norm
+ | Self::Half4
+ | Self::Float2
+ | Self::Uint2
+ | Self::Int2 => 8,
+ Self::Float3 | Self::Uint3 | Self::Int3 => 12,
+ Self::Float4 | Self::Uint4 | Self::Int4 => 16,
+ }
+ }
+}
+
+bitflags::bitflags! {
+ /// Different ways that you can use a buffer.
+ ///
+ /// The usages determine what kind of memory the buffer is allocated from and what
+ /// actions the buffer can partake in.
+ #[repr(transparent)]
+ #[cfg_attr(feature = "trace", derive(Serialize))]
+ #[cfg_attr(feature = "replay", derive(Deserialize))]
+ pub struct BufferUsage: u32 {
+ /// Allow a buffer to be mapped for reading using [`Buffer::map_async`] + [`Buffer::get_mapped_range`].
+ /// This does not include creating a buffer with [`BufferDescriptor::mapped_at_creation`] set.
+ ///
+ /// If [`Features::MAPPABLE_PRIMARY_BUFFERS`] isn't enabled, the only other usage a buffer
+ /// may have is COPY_DST.
+ const MAP_READ = 1;
+ /// Allow a buffer to be mapped for writing using [`Buffer::map_async`] + [`Buffer::get_mapped_range_mut`].
+ /// This does not include creating a buffer with `mapped_at_creation` set.
+ ///
+ /// If [`Features::MAPPABLE_PRIMARY_BUFFERS`] feature isn't enabled, the only other usage a buffer
+ /// may have is COPY_SRC.
+ const MAP_WRITE = 2;
+ /// Allow a buffer to be the source buffer for a [`CommandEncoder::copy_buffer_to_buffer`] or [`CommandEncoder::copy_buffer_to_texture`]
+ /// operation.
+ const COPY_SRC = 4;
+ /// Allow a buffer to be the destination buffer for a [`CommandEncoder::copy_buffer_to_buffer`], [`CommandEncoder::copy_texture_to_buffer`],
+ /// or [`Queue::write_buffer`] operation.
+ const COPY_DST = 8;
+ /// Allow a buffer to be the index buffer in a draw operation.
+ const INDEX = 16;
+ /// Allow a buffer to be the vertex buffer in a draw operation.
+ const VERTEX = 32;
+ /// Allow a buffer to be a [`BufferBindingType::Uniform`] inside a bind group.
+ const UNIFORM = 64;
+ /// Allow a buffer to be a [`BufferBindingType::Storage`] inside a bind group.
+ const STORAGE = 128;
+ /// Allow a buffer to be the indirect buffer in an indirect draw call.
+ const INDIRECT = 256;
+ }
+}
+
+/// Describes a [`Buffer`].
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct BufferDescriptor<L> {
+ /// Debug label of a buffer. This will show up in graphics debuggers for easy identification.
+ pub label: L,
+ /// Size of a buffer.
+ pub size: BufferAddress,
+ /// Usages of a buffer. If the buffer is used in any way that isn't specified here, the operation
+ /// will panic.
+ pub usage: BufferUsage,
+ /// Allows a buffer to be mapped immediately after they are made. It does not have to be [`BufferUsage::MAP_READ`] or
+ /// [`BufferUsage::MAP_WRITE`], all buffers are allowed to be mapped at creation.
+ pub mapped_at_creation: bool,
+}
+
+impl<L> BufferDescriptor<L> {
+ pub fn map_label<K>(&self, fun: impl FnOnce(&L) -> K) -> BufferDescriptor<K> {
+ BufferDescriptor {
+ label: fun(&self.label),
+ size: self.size,
+ usage: self.usage,
+ mapped_at_creation: self.mapped_at_creation,
+ }
+ }
+}
+
+/// Describes a [`CommandEncoder`].
+#[repr(C)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub struct CommandEncoderDescriptor<L> {
+ /// Debug label for the command encoder. This will show up in graphics debuggers for easy identification.
+ pub label: L,
+}
+
+impl<L> CommandEncoderDescriptor<L> {
+ pub fn map_label<K>(&self, fun: impl FnOnce(&L) -> K) -> CommandEncoderDescriptor<K> {
+ CommandEncoderDescriptor {
+ label: fun(&self.label),
+ }
+ }
+}
+
+impl<T> Default for CommandEncoderDescriptor<Option<T>> {
+ fn default() -> Self {
+ Self { label: None }
+ }
+}
+
+/// Behavior of the presentation engine based on frame rate.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum PresentMode {
+ /// The presentation engine does **not** wait for a vertical blanking period and
+ /// the request is presented immediately. This is a low-latency presentation mode,
+ /// but visible tearing may be observed. Will fallback to `Fifo` if unavailable on the
+ /// selected platform and backend. Not optimal for mobile.
+ Immediate = 0,
+ /// The presentation engine waits for the next vertical blanking period to update
+ /// the current image, but frames may be submitted without delay. This is a low-latency
+ /// presentation mode and visible tearing will **not** be observed. Will fallback to `Fifo`
+ /// if unavailable on the selected platform and backend. Not optimal for mobile.
+ Mailbox = 1,
+ /// The presentation engine waits for the next vertical blanking period to update
+ /// the current image. The framerate will be capped at the display refresh rate,
+ /// corresponding to the `VSync`. Tearing cannot be observed. Optimal for mobile.
+ Fifo = 2,
+}
+
+bitflags::bitflags! {
+ /// Different ways that you can use a texture.
+ ///
+ /// The usages determine what kind of memory the texture is allocated from and what
+ /// actions the texture can partake in.
+ #[repr(transparent)]
+ #[cfg_attr(feature = "trace", derive(Serialize))]
+ #[cfg_attr(feature = "replay", derive(Deserialize))]
+ pub struct TextureUsage: u32 {
+ /// Allows a texture to be the source in a [`CommandEncoder::copy_texture_to_buffer`] or
+ /// [`CommandEncoder::copy_texture_to_texture`] operation.
+ const COPY_SRC = 1;
+ /// Allows a texture to be the destination in a [`CommandEncoder::copy_texture_to_buffer`],
+ /// [`CommandEncoder::copy_texture_to_texture`], or [`Queue::write_texture`] operation.
+ const COPY_DST = 2;
+ /// Allows a texture to be a [`BindingType::Texture`] in a bind group.
+ const SAMPLED = 4;
+ /// Allows a texture to be a [`BindingType::StorageTexture`] in a bind group.
+ const STORAGE = 8;
+ /// Allows a texture to be an output attachment of a renderpass.
+ const RENDER_ATTACHMENT = 16;
+ }
+}
+
+/// Describes a [`SwapChain`].
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct SwapChainDescriptor {
+ /// The usage of the swap chain. The only supported usage is `RENDER_ATTACHMENT`.
+ pub usage: TextureUsage,
+ /// The texture format of the swap chain. The only formats that are guaranteed are
+ /// `Bgra8Unorm` and `Bgra8UnormSrgb`
+ pub format: TextureFormat,
+ /// Width of the swap chain. Must be the same size as the surface.
+ pub width: u32,
+ /// Height of the swap chain. Must be the same size as the surface.
+ pub height: u32,
+ /// Presentation mode of the swap chain. FIFO is the only guaranteed to be supported, though
+ /// other formats will automatically fall back to FIFO.
+ pub present_mode: PresentMode,
+}
+
+/// Status of the recieved swapchain image.
+#[repr(C)]
+#[derive(Debug)]
+pub enum SwapChainStatus {
+ Good,
+ Suboptimal,
+ Timeout,
+ Outdated,
+ Lost,
+}
+
+/// RGBA double precision color.
+///
+/// This is not to be used as a generic color type, only for specific wgpu interfaces.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, Default, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct Color {
+ pub r: f64,
+ pub g: f64,
+ pub b: f64,
+ pub a: f64,
+}
+
+impl Color {
+ pub const TRANSPARENT: Self = Self {
+ r: 0.0,
+ g: 0.0,
+ b: 0.0,
+ a: 0.0,
+ };
+ pub const BLACK: Self = Self {
+ r: 0.0,
+ g: 0.0,
+ b: 0.0,
+ a: 1.0,
+ };
+ pub const WHITE: Self = Self {
+ r: 1.0,
+ g: 1.0,
+ b: 1.0,
+ a: 1.0,
+ };
+ pub const RED: Self = Self {
+ r: 1.0,
+ g: 0.0,
+ b: 0.0,
+ a: 1.0,
+ };
+ pub const GREEN: Self = Self {
+ r: 0.0,
+ g: 1.0,
+ b: 0.0,
+ a: 1.0,
+ };
+ pub const BLUE: Self = Self {
+ r: 0.0,
+ g: 0.0,
+ b: 1.0,
+ a: 1.0,
+ };
+}
+
+/// Dimensionality of a texture.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum TextureDimension {
+ /// 1D texture
+ D1,
+ /// 2D texture
+ D2,
+ /// 3D texture
+ D3,
+}
+
+/// Origin of a copy to/from a texture.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct Origin3d {
+ pub x: u32,
+ pub y: u32,
+ pub z: u32,
+}
+
+impl Origin3d {
+ pub const ZERO: Self = Self { x: 0, y: 0, z: 0 };
+}
+
+impl Default for Origin3d {
+ fn default() -> Self {
+ Self::ZERO
+ }
+}
+
+/// Extent of a texture related operation.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct Extent3d {
+ pub width: u32,
+ pub height: u32,
+ pub depth: u32,
+}
+
+impl Default for Extent3d {
+ fn default() -> Self {
+ Self {
+ width: 1,
+ height: 1,
+ depth: 1,
+ }
+ }
+}
+
+/// Describes a [`Texture`].
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct TextureDescriptor<L> {
+ /// Debug label of the texture. This will show up in graphics debuggers for easy identification.
+ pub label: L,
+ /// Size of the texture. For a regular 1D/2D texture, the unused sizes will be 1. For 2DArray textures, Z is the
+ /// number of 2D textures in that array.
+ pub size: Extent3d,
+ /// Mip count of texture. For a texture with no extra mips, this must be 1.
+ pub mip_level_count: u32,
+ /// Sample count of texture. If this is not 1, texture must have [`BindingType::Texture::multisampled`] set to true.
+ pub sample_count: u32,
+ /// Dimensions of the texture.
+ pub dimension: TextureDimension,
+ /// Format of the texture.
+ pub format: TextureFormat,
+ /// Allowed usages of the texture. If used in other ways, the operation will panic.
+ pub usage: TextureUsage,
+}
+
+impl<L> TextureDescriptor<L> {
+ pub fn map_label<K>(&self, fun: impl FnOnce(&L) -> K) -> TextureDescriptor<K> {
+ TextureDescriptor {
+ label: fun(&self.label),
+ size: self.size,
+ mip_level_count: self.mip_level_count,
+ sample_count: self.sample_count,
+ dimension: self.dimension,
+ format: self.format,
+ usage: self.usage,
+ }
+ }
+}
+
+/// Kind of data the texture holds.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum TextureAspect {
+ /// Depth, Stencil, and Color.
+ All,
+ /// Stencil.
+ StencilOnly,
+ /// Depth.
+ DepthOnly,
+}
+
+impl Default for TextureAspect {
+ fn default() -> Self {
+ Self::All
+ }
+}
+
+/// How edges should be handled in texture addressing.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum AddressMode {
+ /// Clamp the value to the edge of the texture
+ ///
+ /// -0.25 -> 0.0
+ /// 1.25 -> 1.0
+ ClampToEdge = 0,
+ /// Repeat the texture in a tiling fashion
+ ///
+ /// -0.25 -> 0.75
+ /// 1.25 -> 0.25
+ Repeat = 1,
+ /// Repeat the texture, mirroring it every repeat
+ ///
+ /// -0.25 -> 0.25
+ /// 1.25 -> 0.75
+ MirrorRepeat = 2,
+ /// Clamp the value to the border of the texture
+ /// Requires feature [`Features::ADDRESS_MODE_CLAMP_TO_BORDER`]
+ ///
+ /// -0.25 -> border
+ /// 1.25 -> border
+ ClampToBorder = 3,
+}
+
+impl Default for AddressMode {
+ fn default() -> Self {
+ Self::ClampToEdge
+ }
+}
+
+/// Texel mixing mode when sampling between texels.
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum FilterMode {
+ /// Nearest neighbor sampling.
+ ///
+ /// This creates a pixelated effect when used as a mag filter
+ Nearest = 0,
+ /// Linear Interpolation
+ ///
+ /// This makes textures smooth but blurry when used as a mag filter.
+ Linear = 1,
+}
+
+impl Default for FilterMode {
+ fn default() -> Self {
+ Self::Nearest
+ }
+}
+
+/// A range of push constant memory to pass to a shader stage.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct PushConstantRange {
+ /// Stage push constant range is visible from. Each stage can only be served by at most one range.
+ /// One range can serve multiple stages however.
+ pub stages: ShaderStage,
+ /// Range in push constant memory to use for the stage. Must be less than [`Limits::max_push_constant_size`].
+ /// Start and end must be aligned to the 4s.
+ pub range: Range<u32>,
+}
+
+/// Describes a [`CommandBuffer`].
+#[repr(C)]
+#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct CommandBufferDescriptor<L> {
+ pub label: L,
+}
+
+impl<L> CommandBufferDescriptor<L> {
+ pub fn map_label<K>(&self, fun: impl FnOnce(&L) -> K) -> CommandBufferDescriptor<K> {
+ CommandBufferDescriptor {
+ label: fun(&self.label),
+ }
+ }
+}
+
+/// Describes a [`RenderBundle`].
+#[repr(C)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct RenderBundleDescriptor<L> {
+ /// Debug label of the render bundle encoder. This will show up in graphics debuggers for easy identification.
+ pub label: L,
+}
+
+impl<L> RenderBundleDescriptor<L> {
+ pub fn map_label<K>(&self, fun: impl FnOnce(&L) -> K) -> RenderBundleDescriptor<K> {
+ RenderBundleDescriptor {
+ label: fun(&self.label),
+ }
+ }
+}
+
+impl<T> Default for RenderBundleDescriptor<Option<T>> {
+ fn default() -> Self {
+ Self { label: None }
+ }
+}
+
+/// Layout of a texture in a buffer's memory.
+#[repr(C)]
+#[derive(Clone, Debug, Default)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct TextureDataLayout {
+ /// Offset into the buffer that is the start of the texture. Must be a multiple of texture block size.
+ /// For non-compressed textures, this is 1.
+ pub offset: BufferAddress,
+ /// Bytes per "row" of the image. This represents one row of pixels in the x direction. Compressed
+ /// textures include multiple rows of pixels in each "row". May be 0 for 1D texture copies.
+ ///
+ /// Must be a multiple of 256 for [`CommandEncoder::copy_buffer_to_texture`] and [`CommandEncoder::copy_texture_to_buffer`].
+ /// [`Queue::write_texture`] does not have this requirement.
+ ///
+ /// Must be a multiple of the texture block size. For non-compressed textures, this is 1.
+ pub bytes_per_row: u32,
+ /// Rows that make up a single "image". Each "image" is one layer in the z direction of a 3D image. May be larger
+ /// than `copy_size.y`.
+ ///
+ /// May be 0 for 2D texture copies.
+ pub rows_per_image: u32,
+}
+
+/// Specific type of a buffer binding.
+///
+/// WebGPU spec: https://gpuweb.github.io/gpuweb/#enumdef-gpubufferbindingtype
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum BufferBindingType {
+ /// A buffer for uniform values.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(std140, binding = 0)
+ /// uniform Globals {
+ /// vec2 aUniform;
+ /// vec2 anotherUniform;
+ /// };
+ /// ```
+ Uniform,
+ /// A storage buffer.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout (set=0, binding=0) buffer myStorageBuffer {
+ /// vec4 myElement[];
+ /// };
+ /// ```
+ Storage {
+ /// If `true`, the buffer can only be read in the shader,
+ /// and it must be annotated with `readonly`.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout (set=0, binding=0) readonly buffer myStorageBuffer {
+ /// vec4 myElement[];
+ /// };
+ /// ```
+ read_only: bool,
+ },
+}
+
+impl Default for BufferBindingType {
+ fn default() -> Self {
+ Self::Uniform
+ }
+}
+
+/// Specific type of a sample in a texture binding.
+///
+/// WebGPU spec: https://gpuweb.github.io/gpuweb/#enumdef-gputexturesampletype
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum TextureSampleType {
+ /// Sampling returns floats.
+ ///
+ /// If `filterable` is false, the texture can't be sampled with
+ /// a filtering sampler.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(binding = 0)
+ /// uniform texture2D t;
+ /// ```
+ Float { filterable: bool },
+ /// Sampling does the depth reference comparison.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(binding = 0)
+ /// uniform texture2DShadow t;
+ /// ```
+ Depth,
+ /// Sampling returns signed integers.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(binding = 0)
+ /// uniform itexture2D t;
+ /// ```
+ Sint,
+ /// Sampling returns unsigned integers.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(binding = 0)
+ /// uniform utexture2D t;
+ /// ```
+ Uint,
+}
+
+impl Default for TextureSampleType {
+ fn default() -> Self {
+ Self::Float { filterable: true }
+ }
+}
+
+impl From<TextureFormat> for TextureSampleType {
+ fn from(format: TextureFormat) -> Self {
+ match format {
+ TextureFormat::R8Uint
+ | TextureFormat::R16Uint
+ | TextureFormat::Rg8Uint
+ | TextureFormat::R32Uint
+ | TextureFormat::Rg16Uint
+ | TextureFormat::Rgba8Uint
+ | TextureFormat::Rg32Uint
+ | TextureFormat::Rgba16Uint
+ | TextureFormat::Rgba32Uint => Self::Uint,
+
+ TextureFormat::R8Sint
+ | TextureFormat::R16Sint
+ | TextureFormat::Rg8Sint
+ | TextureFormat::R32Sint
+ | TextureFormat::Rg16Sint
+ | TextureFormat::Rgba8Sint
+ | TextureFormat::Rg32Sint
+ | TextureFormat::Rgba16Sint
+ | TextureFormat::Rgba32Sint => Self::Sint,
+
+ TextureFormat::R32Float | TextureFormat::Rg32Float | TextureFormat::Rgba32Float => {
+ Self::Float { filterable: false }
+ }
+
+ TextureFormat::R8Unorm
+ | TextureFormat::R8Snorm
+ | TextureFormat::R16Float
+ | TextureFormat::Rg8Unorm
+ | TextureFormat::Rg8Snorm
+ | TextureFormat::Rg16Float
+ | TextureFormat::Rg11b10Float
+ | TextureFormat::Rgba8Snorm
+ | TextureFormat::Rgba16Float
+ | TextureFormat::Rgba8Unorm
+ | TextureFormat::Rgba8UnormSrgb
+ | TextureFormat::Bgra8Unorm
+ | TextureFormat::Bgra8UnormSrgb
+ | TextureFormat::Rgb10a2Unorm
+ | TextureFormat::Bc1RgbaUnorm
+ | TextureFormat::Bc1RgbaUnormSrgb
+ | TextureFormat::Bc2RgbaUnorm
+ | TextureFormat::Bc2RgbaUnormSrgb
+ | TextureFormat::Bc3RgbaUnorm
+ | TextureFormat::Bc3RgbaUnormSrgb
+ | TextureFormat::Bc4RUnorm
+ | TextureFormat::Bc4RSnorm
+ | TextureFormat::Bc5RgUnorm
+ | TextureFormat::Bc5RgSnorm
+ | TextureFormat::Bc6hRgbSfloat
+ | TextureFormat::Bc6hRgbUfloat
+ | TextureFormat::Bc7RgbaUnorm
+ | TextureFormat::Bc7RgbaUnormSrgb => Self::Float { filterable: true },
+
+ TextureFormat::Depth32Float
+ | TextureFormat::Depth24Plus
+ | TextureFormat::Depth24PlusStencil8 => Self::Depth,
+ }
+ }
+}
+
+/// Specific type of a sample in a texture binding.
+///
+/// WebGPU spec: https://gpuweb.github.io/gpuweb/#enumdef-gpustoragetextureaccess
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum StorageTextureAccess {
+ /// The texture can only be read in the shader and it must be annotated with `readonly`.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(set=0, binding=0, r32f) readonly uniform image2D myStorageImage;
+ /// ```
+ ReadOnly,
+ /// The texture can only be read in the shader and it must be annotated with `writeonly`.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(set=0, binding=0, r32f) writeonly uniform image2D myStorageImage;
+ /// ```
+ WriteOnly,
+}
+
+/// Specific type of a binding.
+///
+/// WebGPU spec: the enum of
+/// - https://gpuweb.github.io/gpuweb/#dictdef-gpubufferbindinglayout
+/// - https://gpuweb.github.io/gpuweb/#dictdef-gpusamplerbindinglayout
+/// - https://gpuweb.github.io/gpuweb/#dictdef-gputexturebindinglayout
+/// - https://gpuweb.github.io/gpuweb/#dictdef-gpustoragetexturebindinglayout
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum BindingType {
+ /// A buffer binding.
+ Buffer {
+ ty: BufferBindingType,
+ /// Indicates that the binding has a dynamic offset.
+ /// One offset must be passed to [`RenderPass::set_bind_group`] for each dynamic binding in increasing order of binding number.
+ #[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
+ has_dynamic_offset: bool,
+ /// Minimum size of the corresponding `BufferBinding` required to match this entry.
+ /// When pipeline is created, the size has to cover at least the corresponding structure in the shader
+ /// plus one element of the unbound array, which can only be last in the structure.
+ /// If `None`, the check is performed at draw call time instead of pipeline and bind group creation.
+ #[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
+ min_binding_size: Option<BufferSize>,
+ },
+ /// A sampler that can be used to sample a texture.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(binding = 0)
+ /// uniform sampler s;
+ /// ```
+ Sampler {
+ /// The sampling result is produced based on more than a single color sample from a texture,
+ /// e.g. when bilinear interpolation is enabled.
+ ///
+ /// A filtering sampler can only be used with a filterable texture.
+ filtering: bool,
+ /// Use as a comparison sampler instead of a normal sampler.
+ /// For more info take a look at the analogous functionality in OpenGL: https://www.khronos.org/opengl/wiki/Sampler_Object#Comparison_mode.
+ comparison: bool,
+ },
+ /// A texture binding.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(binding = 0)
+ /// uniform texture2D t;
+ /// ```
+ Texture {
+ /// Sample type of the texture binding.
+ sample_type: TextureSampleType,
+ /// Dimension of the texture view that is going to be sampled.
+ view_dimension: TextureViewDimension,
+ /// True if the texture has a sample count greater than 1. If this is true,
+ /// the texture must be read from shaders with `texture1DMS`, `texture2DMS`, or `texture3DMS`,
+ /// depending on `dimension`.
+ multisampled: bool,
+ },
+ /// A storage texture.
+ ///
+ /// Example GLSL syntax:
+ /// ```cpp,ignore
+ /// layout(set=0, binding=0, r32f) uniform image2D myStorageImage;
+ /// ```
+ /// Note that the texture format must be specified in the shader as well.
+ /// A list of valid formats can be found in the specification here: https://www.khronos.org/registry/OpenGL/specs/gl/GLSLangSpec.4.60.html#layout-qualifiers
+ StorageTexture {
+ /// Allowed access to this texture.
+ access: StorageTextureAccess,
+ /// Format of the texture.
+ format: TextureFormat,
+ /// Dimension of the texture view that is going to be sampled.
+ view_dimension: TextureViewDimension,
+ },
+}
+
+impl BindingType {
+ pub fn has_dynamic_offset(&self) -> bool {
+ match *self {
+ Self::Buffer {
+ has_dynamic_offset, ..
+ } => has_dynamic_offset,
+ _ => false,
+ }
+ }
+}
+
+/// Describes a single binding inside a bind group.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "trace", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct BindGroupLayoutEntry {
+ /// Binding index. Must match shader index and be unique inside a BindGroupLayout. A binding
+ /// of index 1, would be described as `layout(set = 0, binding = 1) uniform` in shaders.
+ pub binding: u32,
+ /// Which shader stages can see this binding.
+ pub visibility: ShaderStage,
+ /// The type of the binding
+ pub ty: BindingType,
+ /// If this value is Some, indicates this entry is an array. Array size must be 1 or greater.
+ ///
+ /// If this value is Some and `ty` is `BindingType::Texture`, [`Features::SAMPLED_TEXTURE_BINDING_ARRAY`] must be supported.
+ ///
+ /// If this value is Some and `ty` is any other variant, bind group creation will fail.
+ #[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
+ pub count: Option<NonZeroU32>,
+}
+
+/// View of a buffer which can be used to copy to/from a texture.
+#[repr(C)]
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct BufferCopyView<B> {
+ /// The buffer to be copied to/from.
+ pub buffer: B,
+ /// The layout of the texture data in this buffer.
+ pub layout: TextureDataLayout,
+}
+
+/// View of a texture which can be used to copy to/from a buffer/texture.
+#[repr(C)]
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub struct TextureCopyView<T> {
+ /// The texture to be copied to/from.
+ pub texture: T,
+ /// The target mip level of the texture.
+ pub mip_level: u32,
+ /// The base texel of the texture in the selected `mip_level`.
+ #[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
+ pub origin: Origin3d,
+}
+
+/// Color variation to use when sampler addressing mode is [`AddressMode::ClampToBorder`]
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
+#[cfg_attr(feature = "trace", derive(serde::Serialize))]
+#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
+pub enum SamplerBorderColor {
+ TransparentBlack,
+ OpaqueBlack,
+ OpaqueWhite,
+}