summaryrefslogtreecommitdiffstats
path: root/third_party/rust/gpu-allocator
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/gpu-allocator
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/gpu-allocator')
-rw-r--r--third_party/rust/gpu-allocator/.cargo-checksum.json1
-rw-r--r--third_party/rust/gpu-allocator/Cargo.lock681
-rw-r--r--third_party/rust/gpu-allocator/Cargo.toml155
-rw-r--r--third_party/rust/gpu-allocator/LICENSE-APACHE201
-rw-r--r--third_party/rust/gpu-allocator/LICENSE-MIT7
-rw-r--r--third_party/rust/gpu-allocator/README.md155
-rw-r--r--third_party/rust/gpu-allocator/examples/d3d12-buffer-winrs.rs238
-rw-r--r--third_party/rust/gpu-allocator/examples/d3d12-buffer.rs284
-rw-r--r--third_party/rust/gpu-allocator/examples/vulkan-buffer.rs202
-rw-r--r--third_party/rust/gpu-allocator/src/allocator/dedicated_block_allocator/mod.rs136
-rw-r--r--third_party/rust/gpu-allocator/src/allocator/dedicated_block_allocator/visualizer.rs8
-rw-r--r--third_party/rust/gpu-allocator/src/allocator/free_list_allocator/mod.rs419
-rw-r--r--third_party/rust/gpu-allocator/src/allocator/free_list_allocator/visualizer.rs25
-rw-r--r--third_party/rust/gpu-allocator/src/allocator/mod.rs109
-rw-r--r--third_party/rust/gpu-allocator/src/d3d12/mod.rs1072
-rw-r--r--third_party/rust/gpu-allocator/src/d3d12/visualizer.rs252
-rw-r--r--third_party/rust/gpu-allocator/src/lib.rs277
-rw-r--r--third_party/rust/gpu-allocator/src/result.rs21
-rw-r--r--third_party/rust/gpu-allocator/src/visualizer/allocation_reports.rs138
-rw-r--r--third_party/rust/gpu-allocator/src/visualizer/memory_chunks.rs134
-rw-r--r--third_party/rust/gpu-allocator/src/visualizer/mod.rs56
-rw-r--r--third_party/rust/gpu-allocator/src/vulkan/mod.rs993
-rw-r--r--third_party/rust/gpu-allocator/src/vulkan/visualizer.rs226
23 files changed, 5790 insertions, 0 deletions
diff --git a/third_party/rust/gpu-allocator/.cargo-checksum.json b/third_party/rust/gpu-allocator/.cargo-checksum.json
new file mode 100644
index 0000000000..0294f6ef79
--- /dev/null
+++ b/third_party/rust/gpu-allocator/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.lock":"22fb0e1bc7319a72fcade655a344b017ca726271634362f1853ceb9f4f62d676","Cargo.toml":"93af1b7d358dfe27714e78bd7eb19a9c6aec79e509d96af6c7716b6d5e1c2802","LICENSE-APACHE":"0178e21322b0e88aa3aeb3146f6a9611bc1f8df6d98bdfb34be28b9dd56a8107","LICENSE-MIT":"ad41be6cc6538b29b9346648f41432b5e460bad6be073b5eeaa41320ea2921dc","README.md":"130475ab08599e30a1d783453e4678e9140c5b93923a7181864c8dd74835237e","examples/d3d12-buffer-winrs.rs":"c2c66ace6e41d302f244b95dbe38f2272ac59357a1c717a767dac3df89ea4718","examples/d3d12-buffer.rs":"abd9acc233a45c44f421dcaa4109f5a408de5335979654b1acbb55c73197ed5b","examples/vulkan-buffer.rs":"8097b361074302cf294d09a309e0cb8bfa8fbc586096f77a1cca9f7ba1f735dc","src/allocator/dedicated_block_allocator/mod.rs":"184ab11fcb52d3438586d10909277b54f1dbec9aeafb29ea338fd4df0e025e76","src/allocator/dedicated_block_allocator/visualizer.rs":"5b9019dd73ebe7bb9e9d103c48368014b73cdd4ae7f36a706ae047919f56fac6","src/allocator/free_list_allocator/mod.rs":"cab1d7e451c08978e67163ad45006cf8fdf2bdbb40e205c04bfec17490ca605f","src/allocator/free_list_allocator/visualizer.rs":"46214d07285d72a0a29c8d7e76322243853eba7d25d87ebfbb17c75e7815d07f","src/allocator/mod.rs":"09a560d3f4b0ba174bceea6d71bc201afcbea2b8170430c21365cbea8ca315f9","src/d3d12/mod.rs":"ae42a4740e7b54f1fef21f1f3e23dc2d315fd5a6129855fb53a76b7f7d455cec","src/d3d12/visualizer.rs":"955c587a0d676e7e67fe02a3c3d4a59e8857e323c316ab7c352ef8d1a247193d","src/lib.rs":"2751d2365f8223d800fb8242f2c66ef949f1927c6a6770b368764d3322c6f878","src/result.rs":"ce944b6af3e0e83920f25b23ddcf61f95ee5a815f529443415b899d1d3d11b76","src/visualizer/allocation_reports.rs":"571528b72c2a7946dae4c5b849efe88fd068786626b7d5e5e2a2cd5703c99c66","src/visualizer/memory_chunks.rs":"7d07c01f1471d25ff5702b53f5ccda09d6135888f6a77d1eaf06c541b4cafd5e","src/visualizer/mod.rs":"7d56c956abba968400aa6794e399db4b7ec10135a948beef21ea13ba3bd1fd9e","src/vulkan/mod.rs":"245492ce89f82e2846638efd7a1a9e98a5a98b9d947474a7b74612462ecc4a48","src/vulkan/visualizer.rs":"7d6c113c70fa36f1a85a989d7020bd8e9814584f11c43bc151606be643a07c6c"},"package":"6f56f6318968d03c18e1bcf4857ff88c61157e9da8e47c5f29055d60e1228884"} \ No newline at end of file
diff --git a/third_party/rust/gpu-allocator/Cargo.lock b/third_party/rust/gpu-allocator/Cargo.lock
new file mode 100644
index 0000000000..2cac969ec6
--- /dev/null
+++ b/third_party/rust/gpu-allocator/Cargo.lock
@@ -0,0 +1,681 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "ab_glyph"
+version = "0.2.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "80179d7dd5d7e8c285d67c4a1e652972a92de7475beddfb92028c76463b13225"
+dependencies = [
+ "ab_glyph_rasterizer",
+ "owned_ttf_parser",
+]
+
+[[package]]
+name = "ab_glyph_rasterizer"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c71b1793ee61086797f5c80b6efa2b8ffa6d5dd703f118545808a7f2e27f7046"
+
+[[package]]
+name = "accesskit"
+version = "0.12.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6cb10ed32c63247e4e39a8f42e8e30fb9442fbf7878c8e4a9849e7e381619bea"
+dependencies = [
+ "enumn",
+ "serde",
+]
+
+[[package]]
+name = "ahash"
+version = "0.8.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "serde",
+ "version_check",
+ "zerocopy",
+]
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "ash"
+version = "0.37.3+1.3.251"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "39e9c3835d686b0a6084ab4234fcd1b07dbf6e4767dce60874b12356a25ecd4a"
+dependencies = [
+ "libloading",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bitflags"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "ecolor"
+version = "0.24.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b7637fc2e74d17e52931bac90ff4fc061ac776ada9c7fa272f24cdca5991972"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "egui"
+version = "0.24.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c55bcb864b764eb889515a38b8924757657a250738ad15126637ee2df291ee6b"
+dependencies = [
+ "accesskit",
+ "ahash",
+ "epaint",
+ "nohash-hasher",
+ "serde",
+]
+
+[[package]]
+name = "egui_extras"
+version = "0.24.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97624eaf17a16058265d3a3e712e167798655baf7c8f693de25be75cdd6c57b5"
+dependencies = [
+ "egui",
+ "enum-map",
+ "log",
+ "serde",
+]
+
+[[package]]
+name = "emath"
+version = "0.24.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a045c6c0b44b35e98513fc1e9d183ab42881ac27caccb9fa345465601f56cce4"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "enum-map"
+version = "2.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9"
+dependencies = [
+ "enum-map-derive",
+ "serde",
+]
+
+[[package]]
+name = "enum-map-derive"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "enumn"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "env_logger"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece"
+dependencies = [
+ "humantime",
+ "is-terminal",
+ "log",
+ "regex",
+ "termcolor",
+]
+
+[[package]]
+name = "epaint"
+version = "0.24.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d1b9e000d21bab9b535ce78f9f7745be28b3f777f6c7223936561c5c7fefab8"
+dependencies = [
+ "ab_glyph",
+ "ahash",
+ "ecolor",
+ "emath",
+ "nohash-hasher",
+ "parking_lot",
+ "serde",
+]
+
+[[package]]
+name = "errno"
+version = "0.3.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
+dependencies = [
+ "libc",
+ "windows-sys",
+]
+
+[[package]]
+name = "gpu-allocator"
+version = "0.25.0"
+dependencies = [
+ "ash",
+ "egui",
+ "egui_extras",
+ "env_logger",
+ "log",
+ "presser",
+ "thiserror",
+ "winapi",
+ "windows",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7"
+
+[[package]]
+name = "humantime"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
+
+[[package]]
+name = "is-terminal"
+version = "0.4.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455"
+dependencies = [
+ "hermit-abi",
+ "rustix",
+ "windows-sys",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.151"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
+
+[[package]]
+name = "libloading"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.4.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
+
+[[package]]
+name = "lock_api"
+version = "0.4.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
+
+[[package]]
+name = "memchr"
+version = "2.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
+
+[[package]]
+name = "nohash-hasher"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451"
+
+[[package]]
+name = "once_cell"
+version = "1.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+
+[[package]]
+name = "owned_ttf_parser"
+version = "0.20.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d4586edfe4c648c71797a74c84bacb32b52b212eff5dfe2bb9f2c599844023e7"
+dependencies = [
+ "ttf-parser",
+]
+
+[[package]]
+name = "parking_lot"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
+dependencies = [
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.9.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "windows-targets 0.48.5",
+]
+
+[[package]]
+name = "presser"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e8cf8e6a8aa66ce33f63993ffc4ea4271eb5b0530a9002db8455ea6050c77bfa"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.75"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
+dependencies = [
+ "bitflags 1.3.2",
+]
+
+[[package]]
+name = "regex"
+version = "1.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
+
+[[package]]
+name = "rustix"
+version = "0.38.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316"
+dependencies = [
+ "bitflags 2.4.1",
+ "errno",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys",
+]
+
+[[package]]
+name = "scopeguard"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
+
+[[package]]
+name = "serde"
+version = "1.0.194"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b114498256798c94a0689e1a15fec6005dee8ac1f41de56404b67afc2a4b773"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.194"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3385e45322e8f9931410f01b3031ec534c3947d0e94c18049af4d9f9907d4e0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970"
+
+[[package]]
+name = "syn"
+version = "2.0.47"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1726efe18f42ae774cc644f330953a5e7b3c3003d3edcecf18850fe9d4dd9afb"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "thiserror"
+version = "1.0.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "ttf-parser"
+version = "0.20.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "17f77d76d837a7830fe1d4f12b7b4ba4192c1888001c7164257e4bc6d21d96b4"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be"
+dependencies = [
+ "windows-core",
+ "windows-targets 0.52.0",
+]
+
+[[package]]
+name = "windows-core"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
+dependencies = [
+ "windows-targets 0.52.0",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets 0.52.0",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
+dependencies = [
+ "windows_aarch64_gnullvm 0.48.5",
+ "windows_aarch64_msvc 0.48.5",
+ "windows_i686_gnu 0.48.5",
+ "windows_i686_msvc 0.48.5",
+ "windows_x86_64_gnu 0.48.5",
+ "windows_x86_64_gnullvm 0.48.5",
+ "windows_x86_64_msvc 0.48.5",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
+dependencies = [
+ "windows_aarch64_gnullvm 0.52.0",
+ "windows_aarch64_msvc 0.52.0",
+ "windows_i686_gnu 0.52.0",
+ "windows_i686_msvc 0.52.0",
+ "windows_x86_64_gnu 0.52.0",
+ "windows_x86_64_gnullvm 0.52.0",
+ "windows_x86_64_msvc 0.52.0",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
+
+[[package]]
+name = "zerocopy"
+version = "0.7.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
+dependencies = [
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.7.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
diff --git a/third_party/rust/gpu-allocator/Cargo.toml b/third_party/rust/gpu-allocator/Cargo.toml
new file mode 100644
index 0000000000..d1b47bd7fe
--- /dev/null
+++ b/third_party/rust/gpu-allocator/Cargo.toml
@@ -0,0 +1,155 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.65"
+name = "gpu-allocator"
+version = "0.25.0"
+authors = ["Traverse Research <opensource@traverseresearch.nl>"]
+include = [
+ "/README.md",
+ "/LICENSE-*",
+ "/src",
+ "/examples",
+]
+description = "Memory allocator for GPU memory in Vulkan and DirectX 12"
+homepage = "https://github.com/Traverse-Research/gpu-allocator"
+documentation = "https://docs.rs/gpu-allocator/"
+readme = "README.md"
+keywords = [
+ "vulkan",
+ "memory",
+ "allocator",
+]
+categories = [
+ "rendering",
+ "rendering::graphics-api",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/Traverse-Research/gpu-allocator"
+
+[package.metadata.docs.rs]
+all-features = true
+
+[[example]]
+name = "vulkan-buffer"
+required-features = [
+ "vulkan",
+ "ash/loaded",
+]
+
+[[example]]
+name = "d3d12-buffer"
+required-features = [
+ "d3d12",
+ "public-winapi",
+]
+
+[[example]]
+name = "d3d12-buffer-winrs"
+required-features = ["d3d12"]
+
+[dependencies.ash]
+version = ">=0.34, <=0.37"
+features = ["debug"]
+optional = true
+default-features = false
+
+[dependencies.egui]
+version = "0.24"
+optional = true
+default-features = false
+
+[dependencies.egui_extras]
+version = "0.24"
+optional = true
+default-features = false
+
+[dependencies.log]
+version = "0.4"
+
+[dependencies.presser]
+version = "0.3"
+
+[dependencies.thiserror]
+version = "1.0"
+
+[dev-dependencies.ash]
+version = ">=0.34,<=0.37"
+features = [
+ "debug",
+ "loaded",
+]
+default-features = false
+
+[dev-dependencies.env_logger]
+version = "0.10"
+
+[features]
+d3d12 = ["windows"]
+default = [
+ "d3d12",
+ "vulkan",
+]
+public-winapi = ["dep:winapi"]
+visualizer = [
+ "egui",
+ "egui_extras",
+]
+vulkan = ["ash"]
+
+[target."cfg(windows)".dependencies.winapi]
+version = "0.3.9"
+features = [
+ "d3d12",
+ "winerror",
+ "impl-default",
+ "impl-debug",
+]
+optional = true
+
+[target."cfg(windows)".dependencies.windows]
+version = ">=0.51,<=0.52"
+features = [
+ "Win32_Foundation",
+ "Win32_Graphics",
+ "Win32_Graphics_Direct3D",
+ "Win32_Graphics_Direct3D12",
+ "Win32_Graphics_Dxgi",
+ "Win32_Graphics_Dxgi_Common",
+]
+optional = true
+
+[target."cfg(windows)".dev-dependencies.winapi]
+version = "0.3.9"
+features = [
+ "d3d12",
+ "d3d12sdklayers",
+ "dxgi1_6",
+ "winerror",
+ "impl-default",
+ "impl-debug",
+ "winuser",
+ "windowsx",
+ "libloaderapi",
+]
+
+[target."cfg(windows)".dev-dependencies.windows]
+version = ">=0.51,<=0.52"
+features = [
+ "Win32_Foundation",
+ "Win32_Graphics",
+ "Win32_Graphics_Direct3D",
+ "Win32_Graphics_Direct3D12",
+ "Win32_Graphics_Dxgi",
+ "Win32_Graphics_Dxgi_Common",
+]
diff --git a/third_party/rust/gpu-allocator/LICENSE-APACHE b/third_party/rust/gpu-allocator/LICENSE-APACHE
new file mode 100644
index 0000000000..6a67d1901c
--- /dev/null
+++ b/third_party/rust/gpu-allocator/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright 2021 Traverse Research B.V.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/rust/gpu-allocator/LICENSE-MIT b/third_party/rust/gpu-allocator/LICENSE-MIT
new file mode 100644
index 0000000000..01fdf8e523
--- /dev/null
+++ b/third_party/rust/gpu-allocator/LICENSE-MIT
@@ -0,0 +1,7 @@
+Copyright (c) 2021 Traverse Research B.V.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/third_party/rust/gpu-allocator/README.md b/third_party/rust/gpu-allocator/README.md
new file mode 100644
index 0000000000..36182b1d09
--- /dev/null
+++ b/third_party/rust/gpu-allocator/README.md
@@ -0,0 +1,155 @@
+# 📒 gpu-allocator
+
+[![Actions Status](https://img.shields.io/github/actions/workflow/status/Traverse-Research/gpu-allocator/ci.yml?branch=main&logo=github)](https://github.com/Traverse-Research/gpu-allocator/actions)
+[![Latest version](https://img.shields.io/crates/v/gpu-allocator.svg?logo=rust)](https://crates.io/crates/gpu-allocator)
+[![Docs](https://img.shields.io/docsrs/gpu-allocator?logo=docs.rs)](https://docs.rs/gpu-allocator/)
+[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE-MIT)
+[![LICENSE](https://img.shields.io/badge/license-apache-blue.svg?logo=apache)](LICENSE-APACHE)
+[![Contributor Covenant](https://img.shields.io/badge/contributor%20covenant-v1.4%20adopted-ff69b4.svg)](../main/CODE_OF_CONDUCT.md)
+[![MSRV](https://img.shields.io/badge/rustc-1.65.0+-ab6000.svg)](https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html)
+
+[![Banner](banner.png)](https://traverseresearch.nl)
+
+```toml
+[dependencies]
+gpu-allocator = "0.25.0"
+```
+
+![Visualizer](visualizer.png)
+
+This crate provides a fully written in Rust memory allocator for Vulkan and DirectX 12.
+
+## [Windows-rs] and [winapi]
+
+`gpu-allocator` recently migrated from [winapi] to [windows-rs] but still provides convenient helpers to convert to and from [winapi] types, enabled when compiling with the `public-winapi` crate feature.
+
+[Windows-rs]: https://github.com/microsoft/windows-rs
+[winapi]: https://github.com/retep998/winapi-rs
+
+## Setting up the Vulkan memory allocator
+
+```rust
+use gpu_allocator::vulkan::*;
+
+let mut allocator = Allocator::new(&AllocatorCreateDesc {
+ instance,
+ device,
+ physical_device,
+ debug_settings: Default::default(),
+ buffer_device_address: true, // Ideally, check the BufferDeviceAddressFeatures struct.
+ allocation_sizes: Default::default(),
+});
+```
+
+## Simple Vulkan allocation example
+
+```rust
+use gpu_allocator::vulkan::*;
+use gpu_allocator::MemoryLocation;
+
+// Setup vulkan info
+let vk_info = vk::BufferCreateInfo::builder()
+ .size(512)
+ .usage(vk::BufferUsageFlags::STORAGE_BUFFER);
+
+let buffer = unsafe { device.create_buffer(&vk_info, None) }.unwrap();
+let requirements = unsafe { device.get_buffer_memory_requirements(buffer) };
+
+let allocation = allocator
+ .allocate(&AllocationCreateDesc {
+ name: "Example allocation",
+ requirements,
+ location: MemoryLocation::CpuToGpu,
+ linear: true, // Buffers are always linear
+ allocation_scheme: AllocationScheme::GpuAllocatorManaged,
+ }).unwrap();
+
+// Bind memory to the buffer
+unsafe { device.bind_buffer_memory(buffer, allocation.memory(), allocation.offset()).unwrap() };
+
+// Cleanup
+allocator.free(allocation).unwrap();
+unsafe { device.destroy_buffer(buffer, None) };
+```
+
+## Setting up the D3D12 memory allocator
+
+```rust
+use gpu_allocator::d3d12::*;
+
+let mut allocator = Allocator::new(&AllocatorCreateDesc {
+ device: ID3D12DeviceVersion::Device(device),
+ debug_settings: Default::default(),
+ allocation_sizes: Default::default(),
+});
+```
+
+## Simple d3d12 allocation example
+
+```rust
+use gpu_allocator::d3d12::*;
+use gpu_allocator::MemoryLocation;
+
+
+let buffer_desc = Direct3D12::D3D12_RESOURCE_DESC {
+ Dimension: Direct3D12::D3D12_RESOURCE_DIMENSION_BUFFER,
+ Alignment: 0,
+ Width: 512,
+ Height: 1,
+ DepthOrArraySize: 1,
+ MipLevels: 1,
+ Format: Dxgi::Common::DXGI_FORMAT_UNKNOWN,
+ SampleDesc: Dxgi::Common::DXGI_SAMPLE_DESC {
+ Count: 1,
+ Quality: 0,
+ },
+ Layout: Direct3D12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
+ Flags: Direct3D12::D3D12_RESOURCE_FLAG_NONE,
+};
+let allocation_desc = AllocationCreateDesc::from_d3d12_resource_desc(
+ &allocator.device(),
+ &buffer_desc,
+ "Example allocation",
+ MemoryLocation::GpuOnly,
+);
+let allocation = allocator.allocate(&allocation_desc).unwrap();
+let mut resource: Option<Direct3D12::ID3D12Resource> = None;
+let hr = unsafe {
+ device.CreatePlacedResource(
+ allocation.heap(),
+ allocation.offset(),
+ &buffer_desc,
+ Direct3D12::D3D12_RESOURCE_STATE_COMMON,
+ None,
+ &mut resource,
+ )
+}?;
+
+// Cleanup
+drop(resource);
+allocator.free(allocation).unwrap();
+```
+
+## Minimum Supported Rust Version
+
+The MSRV for this crate and the `vulkan` and `d3d12` features is Rust 1.65. Any other features such as the `visualizer` (with all the `egui` dependencies) may have a higher requirement and are not tested in our CI.
+
+## License
+
+Licensed under either of
+
+- Apache License, Version 2.0, ([LICENSE-APACHE](../master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+- MIT license ([LICENSE-MIT](../master/LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+## Alternative libraries
+
+- [vk-mem-rs](https://github.com/gwihlidal/vk-mem-rs)
+
+## Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally
+submitted for inclusion in the work by you, as defined in the Apache-2.0
+license, shall be dual licensed as above, without any additional terms or
+conditions.
diff --git a/third_party/rust/gpu-allocator/examples/d3d12-buffer-winrs.rs b/third_party/rust/gpu-allocator/examples/d3d12-buffer-winrs.rs
new file mode 100644
index 0000000000..fedbea5f95
--- /dev/null
+++ b/third_party/rust/gpu-allocator/examples/d3d12-buffer-winrs.rs
@@ -0,0 +1,238 @@
+//! Example showcasing [`gpu-allocator`] with types and functions from the [`windows`] crate.
+use gpu_allocator::d3d12::{
+ AllocationCreateDesc, Allocator, AllocatorCreateDesc, ID3D12DeviceVersion, ResourceCategory,
+};
+use gpu_allocator::MemoryLocation;
+use log::*;
+use windows::core::{ComInterface, Result};
+use windows::Win32::{
+ Foundation::E_NOINTERFACE,
+ Graphics::{
+ Direct3D::{D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_12_0},
+ Direct3D12::{
+ D3D12CreateDevice, ID3D12Device, ID3D12Resource,
+ D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT, D3D12_RESOURCE_DESC,
+ D3D12_RESOURCE_DIMENSION_BUFFER, D3D12_RESOURCE_FLAG_NONE, D3D12_RESOURCE_STATE_COMMON,
+ D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
+ },
+ Dxgi::{
+ Common::{DXGI_FORMAT_UNKNOWN, DXGI_SAMPLE_DESC},
+ CreateDXGIFactory2, IDXGIAdapter4, IDXGIFactory6, DXGI_ADAPTER_FLAG3_SOFTWARE,
+ DXGI_ERROR_NOT_FOUND,
+ },
+ },
+};
+
+fn create_d3d12_device(dxgi_factory: &IDXGIFactory6) -> Option<ID3D12Device> {
+ for idx in 0.. {
+ // TODO: Might as well return Result<> from this function
+ let adapter1 = match unsafe { dxgi_factory.EnumAdapters1(idx) } {
+ Ok(a) => a,
+ Err(e) if e.code() == DXGI_ERROR_NOT_FOUND => break,
+ Err(e) => panic!("{:?}", e),
+ };
+ let adapter4: IDXGIAdapter4 = adapter1.cast().unwrap();
+
+ let mut desc = Default::default();
+ unsafe { adapter4.GetDesc3(&mut desc) }.unwrap();
+ // Skip software adapters
+ // Vote for https://github.com/microsoft/windows-rs/issues/793!
+ if (desc.Flags & DXGI_ADAPTER_FLAG3_SOFTWARE) == DXGI_ADAPTER_FLAG3_SOFTWARE {
+ continue;
+ }
+
+ let feature_levels = [
+ (D3D_FEATURE_LEVEL_11_0, "D3D_FEATURE_LEVEL_11_0"),
+ (D3D_FEATURE_LEVEL_11_1, "D3D_FEATURE_LEVEL_11_1"),
+ (D3D_FEATURE_LEVEL_12_0, "D3D_FEATURE_LEVEL_12_0"),
+ ];
+
+ let device =
+ feature_levels
+ .iter()
+ .rev()
+ .find_map(|&(feature_level, feature_level_name)| {
+ let mut device = None;
+ match unsafe { D3D12CreateDevice(&adapter4, feature_level, &mut device) } {
+ Ok(()) => {
+ info!("Using D3D12 feature level: {}", feature_level_name);
+ Some(device.unwrap())
+ }
+ Err(e) if e.code() == E_NOINTERFACE => {
+ error!("ID3D12Device interface not supported");
+ None
+ }
+ Err(e) => {
+ info!(
+ "D3D12 feature level {} not supported: {}",
+ feature_level_name, e
+ );
+ None
+ }
+ }
+ });
+ if device.is_some() {
+ return device;
+ }
+ }
+
+ None
+}
+
+fn main() -> Result<()> {
+ env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("trace")).init();
+
+ let dxgi_factory = unsafe { CreateDXGIFactory2(0) }?;
+
+ let device = create_d3d12_device(&dxgi_factory).expect("Failed to create D3D12 device.");
+
+ // Setting up the allocator
+ let mut allocator = Allocator::new(&AllocatorCreateDesc {
+ device: ID3D12DeviceVersion::Device(device.clone()),
+ debug_settings: Default::default(),
+ allocation_sizes: Default::default(),
+ })
+ .unwrap();
+
+ // Test allocating Gpu Only memory
+ {
+ let test_buffer_desc = D3D12_RESOURCE_DESC {
+ Dimension: D3D12_RESOURCE_DIMENSION_BUFFER,
+ Alignment: D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
+ Width: 512,
+ Height: 1,
+ DepthOrArraySize: 1,
+ MipLevels: 1,
+ Format: DXGI_FORMAT_UNKNOWN,
+ SampleDesc: DXGI_SAMPLE_DESC {
+ Count: 1,
+ Quality: 0,
+ },
+ Layout: D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
+ Flags: D3D12_RESOURCE_FLAG_NONE,
+ };
+
+ let allocation_desc = AllocationCreateDesc::from_d3d12_resource_desc(
+ allocator.device(),
+ &test_buffer_desc,
+ "Test allocation (Gpu only)",
+ MemoryLocation::GpuOnly,
+ );
+ let allocation = allocator.allocate(&allocation_desc).unwrap();
+
+ let mut resource: Option<ID3D12Resource> = None;
+ unsafe {
+ device.CreatePlacedResource(
+ allocation.heap(),
+ allocation.offset(),
+ &test_buffer_desc,
+ D3D12_RESOURCE_STATE_COMMON,
+ None,
+ &mut resource,
+ )
+ }?;
+
+ drop(resource);
+
+ allocator.free(allocation).unwrap();
+ info!("Allocation and deallocation of GpuOnly memory was successful.");
+ }
+
+ // Test allocating Cpu to Gpu memory
+ {
+ let test_buffer_desc = D3D12_RESOURCE_DESC {
+ Dimension: D3D12_RESOURCE_DIMENSION_BUFFER,
+ Alignment: D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
+ Width: 512,
+ Height: 1,
+ DepthOrArraySize: 1,
+ MipLevels: 1,
+ Format: DXGI_FORMAT_UNKNOWN,
+ SampleDesc: DXGI_SAMPLE_DESC {
+ Count: 1,
+ Quality: 0,
+ },
+ Layout: D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
+ Flags: D3D12_RESOURCE_FLAG_NONE,
+ };
+
+ let alloc_info = unsafe { device.GetResourceAllocationInfo(0, &[test_buffer_desc]) };
+
+ let allocation = allocator
+ .allocate(&AllocationCreateDesc {
+ name: "Test allocation (Cpu To Gpu)",
+ location: MemoryLocation::CpuToGpu,
+ size: alloc_info.SizeInBytes,
+ alignment: alloc_info.Alignment,
+ resource_category: ResourceCategory::Buffer,
+ })
+ .unwrap();
+
+ let mut resource: Option<ID3D12Resource> = None;
+ unsafe {
+ device.CreatePlacedResource(
+ allocation.heap(),
+ allocation.offset(),
+ &test_buffer_desc,
+ D3D12_RESOURCE_STATE_COMMON,
+ None,
+ &mut resource,
+ )
+ }?;
+
+ drop(resource);
+
+ allocator.free(allocation).unwrap();
+ info!("Allocation and deallocation of CpuToGpu memory was successful.");
+ }
+
+ // Test allocating Gpu to Cpu memory
+ {
+ let test_buffer_desc = D3D12_RESOURCE_DESC {
+ Dimension: D3D12_RESOURCE_DIMENSION_BUFFER,
+ Alignment: D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
+ Width: 512,
+ Height: 1,
+ DepthOrArraySize: 1,
+ MipLevels: 1,
+ Format: DXGI_FORMAT_UNKNOWN,
+ SampleDesc: DXGI_SAMPLE_DESC {
+ Count: 1,
+ Quality: 0,
+ },
+ Layout: D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
+ Flags: D3D12_RESOURCE_FLAG_NONE,
+ };
+
+ let alloc_info = unsafe { device.GetResourceAllocationInfo(0, &[test_buffer_desc]) };
+
+ let allocation = allocator
+ .allocate(&AllocationCreateDesc {
+ name: "Test allocation (Gpu to Cpu)",
+ location: MemoryLocation::GpuToCpu,
+ size: alloc_info.SizeInBytes,
+ alignment: alloc_info.Alignment,
+ resource_category: ResourceCategory::Buffer,
+ })
+ .unwrap();
+
+ let mut resource: Option<ID3D12Resource> = None;
+ unsafe {
+ device.CreatePlacedResource(
+ allocation.heap(),
+ allocation.offset(),
+ &test_buffer_desc,
+ D3D12_RESOURCE_STATE_COMMON,
+ None,
+ &mut resource,
+ )
+ }?;
+
+ drop(resource);
+
+ allocator.free(allocation).unwrap();
+ info!("Allocation and deallocation of CpuToGpu memory was successful.");
+ }
+
+ Ok(())
+}
diff --git a/third_party/rust/gpu-allocator/examples/d3d12-buffer.rs b/third_party/rust/gpu-allocator/examples/d3d12-buffer.rs
new file mode 100644
index 0000000000..73c5f8b619
--- /dev/null
+++ b/third_party/rust/gpu-allocator/examples/d3d12-buffer.rs
@@ -0,0 +1,284 @@
+//! Example showcasing [`winapi`] interop with [`gpu-allocator`] which is driven by the [`windows`] crate.
+use winapi::shared::{dxgiformat, winerror};
+use winapi::um::{d3d12, d3dcommon};
+use winapi::Interface;
+
+mod all_dxgi {
+ pub use winapi::shared::{dxgi1_3::*, dxgi1_6::*, dxgitype::*};
+}
+
+use log::*;
+
+use gpu_allocator::d3d12::{
+ AllocationCreateDesc, Allocator, AllocatorCreateDesc, ID3D12DeviceVersion, ResourceCategory,
+ ToWinapi, ToWindows,
+};
+use gpu_allocator::MemoryLocation;
+
+fn create_d3d12_device(
+ dxgi_factory: *mut all_dxgi::IDXGIFactory6,
+) -> Option<*mut d3d12::ID3D12Device> {
+ for idx in 0.. {
+ let mut adapter4: *mut all_dxgi::IDXGIAdapter4 = std::ptr::null_mut();
+ let hr = unsafe {
+ dxgi_factory.as_ref().unwrap().EnumAdapters1(
+ idx,
+ <*mut *mut all_dxgi::IDXGIAdapter4>::cast(&mut adapter4),
+ )
+ };
+
+ if hr == winerror::DXGI_ERROR_NOT_FOUND {
+ break;
+ }
+
+ assert_eq!(hr, winerror::S_OK);
+
+ let mut desc = all_dxgi::DXGI_ADAPTER_DESC3::default();
+ let hr = unsafe { adapter4.as_ref().unwrap().GetDesc3(&mut desc) };
+ if hr != winerror::S_OK {
+ error!("Failed to get adapter description for adapter");
+ continue;
+ }
+
+ // Skip software adapters
+ if (desc.Flags & all_dxgi::DXGI_ADAPTER_FLAG3_SOFTWARE)
+ == all_dxgi::DXGI_ADAPTER_FLAG3_SOFTWARE
+ {
+ continue;
+ }
+
+ let feature_levels = [
+ (d3dcommon::D3D_FEATURE_LEVEL_11_0, "D3D_FEATURE_LEVEL_11_0"),
+ (d3dcommon::D3D_FEATURE_LEVEL_11_1, "D3D_FEATURE_LEVEL_11_1"),
+ (d3dcommon::D3D_FEATURE_LEVEL_12_0, "D3D_FEATURE_LEVEL_12_0"),
+ ];
+
+ let device =
+ feature_levels
+ .iter()
+ .rev()
+ .find_map(|&(feature_level, feature_level_name)| {
+ let mut device: *mut d3d12::ID3D12Device = std::ptr::null_mut();
+ let hr = unsafe {
+ d3d12::D3D12CreateDevice(
+ adapter4.cast(),
+ feature_level,
+ &d3d12::ID3D12Device::uuidof(),
+ <*mut *mut d3d12::ID3D12Device>::cast(&mut device),
+ )
+ };
+ match hr {
+ winapi::shared::winerror::S_OK => {
+ info!("Using D3D12 feature level: {}.", feature_level_name);
+ Some(device)
+ }
+ winapi::shared::winerror::E_NOINTERFACE => {
+ error!("ID3D12Device interface not supported.");
+ None
+ }
+ _ => {
+ info!(
+ "D3D12 feature level: {} not supported: {:x}",
+ feature_level_name, hr
+ );
+ None
+ }
+ }
+ });
+ if device.is_some() {
+ return device;
+ }
+ }
+
+ None
+}
+
+fn main() {
+ env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("trace")).init();
+
+ let dxgi_factory = {
+ let mut dxgi_factory: *mut all_dxgi::IDXGIFactory6 = std::ptr::null_mut();
+ let hr = unsafe {
+ all_dxgi::CreateDXGIFactory2(
+ 0,
+ &all_dxgi::IID_IDXGIFactory6,
+ <*mut *mut all_dxgi::IDXGIFactory6>::cast(&mut dxgi_factory),
+ )
+ };
+
+ assert_eq!(
+ hr,
+ winapi::shared::winerror::S_OK,
+ "Failed to create DXGI factory",
+ );
+ dxgi_factory
+ };
+
+ let device = create_d3d12_device(dxgi_factory).expect("Failed to create D3D12 device.");
+
+ // Setting up the allocator
+ let mut allocator = Allocator::new(&AllocatorCreateDesc {
+ device: ID3D12DeviceVersion::Device(device.as_windows().clone()),
+ debug_settings: Default::default(),
+ allocation_sizes: Default::default(),
+ })
+ .unwrap();
+
+ let device = unsafe { device.as_ref() }.unwrap();
+
+ // Test allocating Gpu Only memory
+ {
+ let test_buffer_desc = d3d12::D3D12_RESOURCE_DESC {
+ Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER,
+ Alignment: d3d12::D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
+ Width: 512,
+ Height: 1,
+ DepthOrArraySize: 1,
+ MipLevels: 1,
+ Format: dxgiformat::DXGI_FORMAT_UNKNOWN,
+ SampleDesc: all_dxgi::DXGI_SAMPLE_DESC {
+ Count: 1,
+ Quality: 0,
+ },
+ Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
+ Flags: d3d12::D3D12_RESOURCE_FLAG_NONE,
+ };
+
+ let allocation_desc = AllocationCreateDesc::from_winapi_d3d12_resource_desc(
+ device,
+ &test_buffer_desc,
+ "Test allocation (Gpu Only)",
+ MemoryLocation::GpuOnly,
+ );
+ let allocation = allocator.allocate(&allocation_desc).unwrap();
+
+ let mut resource: *mut d3d12::ID3D12Resource = std::ptr::null_mut();
+ let hr = unsafe {
+ device.CreatePlacedResource(
+ allocation.heap().as_winapi() as *mut _,
+ allocation.offset(),
+ &test_buffer_desc,
+ d3d12::D3D12_RESOURCE_STATE_COMMON,
+ std::ptr::null(),
+ &d3d12::IID_ID3D12Resource,
+ <*mut *mut d3d12::ID3D12Resource>::cast(&mut resource),
+ )
+ };
+ if hr != winerror::S_OK {
+ panic!("Failed to create placed resource.");
+ }
+
+ unsafe { resource.as_ref().unwrap().Release() };
+
+ allocator.free(allocation).unwrap();
+ info!("Allocation and deallocation of GpuOnly memory was successful.");
+ }
+
+ // Test allocating Cpu to Gpu memory
+ {
+ let test_buffer_desc = d3d12::D3D12_RESOURCE_DESC {
+ Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER,
+ Alignment: d3d12::D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
+ Width: 512,
+ Height: 1,
+ DepthOrArraySize: 1,
+ MipLevels: 1,
+ Format: dxgiformat::DXGI_FORMAT_UNKNOWN,
+ SampleDesc: all_dxgi::DXGI_SAMPLE_DESC {
+ Count: 1,
+ Quality: 0,
+ },
+ Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
+ Flags: d3d12::D3D12_RESOURCE_FLAG_NONE,
+ };
+
+ let alloc_info = unsafe { device.GetResourceAllocationInfo(0, 1, &test_buffer_desc) };
+
+ let allocation = allocator
+ .allocate(&AllocationCreateDesc {
+ name: "Test allocation (Cpu to Gpu)",
+ location: MemoryLocation::CpuToGpu,
+ size: alloc_info.SizeInBytes,
+ alignment: alloc_info.Alignment,
+ resource_category: ResourceCategory::Buffer,
+ })
+ .unwrap();
+
+ let mut resource: *mut d3d12::ID3D12Resource = std::ptr::null_mut();
+ let hr = unsafe {
+ device.CreatePlacedResource(
+ allocation.heap().as_winapi() as *mut _,
+ allocation.offset(),
+ &test_buffer_desc,
+ d3d12::D3D12_RESOURCE_STATE_COMMON,
+ std::ptr::null(),
+ &d3d12::IID_ID3D12Resource,
+ <*mut *mut d3d12::ID3D12Resource>::cast(&mut resource),
+ )
+ };
+ if hr != winerror::S_OK {
+ panic!("Failed to create placed resource.");
+ }
+
+ unsafe { resource.as_ref().unwrap().Release() };
+
+ allocator.free(allocation).unwrap();
+ info!("Allocation and deallocation of CpuToGpu memory was successful.");
+ }
+
+ // Test allocating Gpu to Cpu memory
+ {
+ let test_buffer_desc = d3d12::D3D12_RESOURCE_DESC {
+ Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER,
+ Alignment: d3d12::D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
+ Width: 512,
+ Height: 1,
+ DepthOrArraySize: 1,
+ MipLevels: 1,
+ Format: dxgiformat::DXGI_FORMAT_UNKNOWN,
+ SampleDesc: all_dxgi::DXGI_SAMPLE_DESC {
+ Count: 1,
+ Quality: 0,
+ },
+ Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
+ Flags: d3d12::D3D12_RESOURCE_FLAG_NONE,
+ };
+
+ let alloc_info = unsafe { device.GetResourceAllocationInfo(0, 1, &test_buffer_desc) };
+
+ let allocation = allocator
+ .allocate(&AllocationCreateDesc {
+ name: "Test allocation (Gpu to Cpu)",
+ location: MemoryLocation::GpuToCpu,
+ size: alloc_info.SizeInBytes,
+ alignment: alloc_info.Alignment,
+ resource_category: ResourceCategory::Buffer,
+ })
+ .unwrap();
+
+ let mut resource: *mut d3d12::ID3D12Resource = std::ptr::null_mut();
+ let hr = unsafe {
+ device.CreatePlacedResource(
+ allocation.heap().as_winapi() as *mut _,
+ allocation.offset(),
+ &test_buffer_desc,
+ d3d12::D3D12_RESOURCE_STATE_COMMON,
+ std::ptr::null(),
+ &d3d12::IID_ID3D12Resource,
+ <*mut *mut d3d12::ID3D12Resource>::cast(&mut resource),
+ )
+ };
+ if hr != winerror::S_OK {
+ panic!("Failed to create placed resource.");
+ }
+
+ unsafe { resource.as_ref().unwrap().Release() };
+
+ allocator.free(allocation).unwrap();
+ info!("Allocation and deallocation of CpuToGpu memory was successful.");
+ }
+
+ drop(allocator); // Explicitly drop before destruction of device.
+ unsafe { device.Release() };
+ unsafe { dxgi_factory.as_ref().unwrap().Release() };
+}
diff --git a/third_party/rust/gpu-allocator/examples/vulkan-buffer.rs b/third_party/rust/gpu-allocator/examples/vulkan-buffer.rs
new file mode 100644
index 0000000000..6900717403
--- /dev/null
+++ b/third_party/rust/gpu-allocator/examples/vulkan-buffer.rs
@@ -0,0 +1,202 @@
+use std::default::Default;
+use std::ffi::CStr;
+
+use ash::vk;
+use log::info;
+
+use gpu_allocator::vulkan::{
+ AllocationCreateDesc, AllocationScheme, Allocator, AllocatorCreateDesc,
+};
+use gpu_allocator::MemoryLocation;
+
+fn main() {
+ env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("trace")).init();
+
+ let entry = unsafe { ash::Entry::load() }.unwrap();
+
+ // Create Vulkan instance
+ let instance = {
+ let app_name = CStr::from_bytes_with_nul(b"Vulkan gpu-allocator test\0").unwrap();
+
+ let appinfo = vk::ApplicationInfo::builder()
+ .application_name(app_name)
+ .application_version(0)
+ .engine_name(app_name)
+ .engine_version(0)
+ .api_version(vk::make_api_version(0, 1, 0, 0));
+
+ let layer_names_raw = [CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0")
+ .unwrap()
+ .as_ptr()];
+
+ let create_info = vk::InstanceCreateInfo::builder()
+ .application_info(&appinfo)
+ .enabled_layer_names(&layer_names_raw);
+
+ unsafe {
+ entry
+ .create_instance(&create_info, None)
+ .expect("Instance creation error")
+ }
+ };
+
+ // Look for vulkan physical device
+ let (pdevice, queue_family_index) = {
+ let pdevices = unsafe {
+ instance
+ .enumerate_physical_devices()
+ .expect("Physical device error")
+ };
+ pdevices
+ .iter()
+ .find_map(|pdevice| {
+ unsafe { instance.get_physical_device_queue_family_properties(*pdevice) }
+ .iter()
+ .enumerate()
+ .find_map(|(index, &info)| {
+ let supports_graphics = info.queue_flags.contains(vk::QueueFlags::GRAPHICS);
+ if supports_graphics {
+ Some((*pdevice, index))
+ } else {
+ None
+ }
+ })
+ })
+ .expect("Couldn't find suitable device.")
+ };
+
+ // Create vulkan device
+ let device = {
+ let device_extension_names_raw = vec![];
+ let features = vk::PhysicalDeviceFeatures {
+ shader_clip_distance: 1,
+ ..Default::default()
+ };
+ let priorities = [1.0];
+
+ let queue_info = vk::DeviceQueueCreateInfo::builder()
+ .queue_family_index(queue_family_index as u32)
+ .queue_priorities(&priorities);
+
+ let create_info = vk::DeviceCreateInfo::builder()
+ .queue_create_infos(std::slice::from_ref(&queue_info))
+ .enabled_extension_names(&device_extension_names_raw)
+ .enabled_features(&features);
+
+ unsafe { instance.create_device(pdevice, &create_info, None).unwrap() }
+ };
+
+ // Setting up the allocator
+ let mut allocator = Allocator::new(&AllocatorCreateDesc {
+ instance: instance.clone(),
+ device: device.clone(),
+ physical_device: pdevice,
+ debug_settings: Default::default(),
+ buffer_device_address: false,
+ allocation_sizes: Default::default(),
+ })
+ .unwrap();
+
+ // Test allocating Gpu Only memory
+ {
+ let test_buffer_info = vk::BufferCreateInfo::builder()
+ .size(512)
+ .usage(vk::BufferUsageFlags::STORAGE_BUFFER)
+ .sharing_mode(vk::SharingMode::EXCLUSIVE);
+ let test_buffer = unsafe { device.create_buffer(&test_buffer_info, None) }.unwrap();
+ let requirements = unsafe { device.get_buffer_memory_requirements(test_buffer) };
+ let location = MemoryLocation::GpuOnly;
+
+ let allocation = allocator
+ .allocate(&AllocationCreateDesc {
+ requirements,
+ location,
+ linear: true,
+ allocation_scheme: AllocationScheme::GpuAllocatorManaged,
+ name: "Test allocation (Gpu Only)",
+ })
+ .unwrap();
+
+ unsafe {
+ device
+ .bind_buffer_memory(test_buffer, allocation.memory(), allocation.offset())
+ .unwrap()
+ };
+
+ allocator.free(allocation).unwrap();
+
+ unsafe { device.destroy_buffer(test_buffer, None) };
+
+ info!("Allocation and deallocation of GpuOnly memory was successful.");
+ }
+
+ // Test allocating Cpu to Gpu memory
+ {
+ let test_buffer_info = vk::BufferCreateInfo::builder()
+ .size(512)
+ .usage(vk::BufferUsageFlags::STORAGE_BUFFER)
+ .sharing_mode(vk::SharingMode::EXCLUSIVE);
+ let test_buffer = unsafe { device.create_buffer(&test_buffer_info, None) }.unwrap();
+ let requirements = unsafe { device.get_buffer_memory_requirements(test_buffer) };
+ let location = MemoryLocation::CpuToGpu;
+
+ let allocation = allocator
+ .allocate(&AllocationCreateDesc {
+ requirements,
+ location,
+ linear: true,
+ allocation_scheme: AllocationScheme::GpuAllocatorManaged,
+ name: "Test allocation (Cpu to Gpu)",
+ })
+ .unwrap();
+
+ unsafe {
+ device
+ .bind_buffer_memory(test_buffer, allocation.memory(), allocation.offset())
+ .unwrap()
+ };
+
+ allocator.free(allocation).unwrap();
+
+ unsafe { device.destroy_buffer(test_buffer, None) };
+
+ info!("Allocation and deallocation of CpuToGpu memory was successful.");
+ }
+
+ // Test allocating Gpu to Cpu memory
+ {
+ let test_buffer_info = vk::BufferCreateInfo::builder()
+ .size(512)
+ .usage(vk::BufferUsageFlags::STORAGE_BUFFER)
+ .sharing_mode(vk::SharingMode::EXCLUSIVE);
+ let test_buffer = unsafe { device.create_buffer(&test_buffer_info, None) }.unwrap();
+ let requirements = unsafe { device.get_buffer_memory_requirements(test_buffer) };
+ let location = MemoryLocation::GpuToCpu;
+
+ let allocation = allocator
+ .allocate(&AllocationCreateDesc {
+ requirements,
+ location,
+ linear: true,
+ allocation_scheme: AllocationScheme::GpuAllocatorManaged,
+ name: "Test allocation (Gpu to Cpu)",
+ })
+ .unwrap();
+
+ unsafe {
+ device
+ .bind_buffer_memory(test_buffer, allocation.memory(), allocation.offset())
+ .unwrap()
+ };
+
+ allocator.free(allocation).unwrap();
+
+ unsafe { device.destroy_buffer(test_buffer, None) };
+
+ info!("Allocation and deallocation of GpuToCpu memory was successful.");
+ }
+
+ drop(allocator); // Explicitly drop before destruction of device and instance.
+ unsafe { device.destroy_device(None) };
+ unsafe { instance.destroy_instance(None) };
+}
diff --git a/third_party/rust/gpu-allocator/src/allocator/dedicated_block_allocator/mod.rs b/third_party/rust/gpu-allocator/src/allocator/dedicated_block_allocator/mod.rs
new file mode 100644
index 0000000000..79ec8ef896
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/allocator/dedicated_block_allocator/mod.rs
@@ -0,0 +1,136 @@
+#![deny(unsafe_code, clippy::unwrap_used)]
+
+#[cfg(feature = "visualizer")]
+pub(crate) mod visualizer;
+
+use std::{backtrace::Backtrace, sync::Arc};
+
+use log::{log, Level};
+
+use super::{AllocationReport, AllocationType, SubAllocator, SubAllocatorBase};
+use crate::{AllocationError, Result};
+
+#[derive(Debug)]
+pub(crate) struct DedicatedBlockAllocator {
+ size: u64,
+ allocated: u64,
+ /// Only used if [`crate::AllocatorDebugSettings::store_stack_traces`] is [`true`]
+ name: Option<String>,
+ backtrace: Arc<Backtrace>,
+}
+
+impl DedicatedBlockAllocator {
+ pub(crate) fn new(size: u64) -> Self {
+ Self {
+ size,
+ allocated: 0,
+ name: None,
+ backtrace: Arc::new(Backtrace::disabled()),
+ }
+ }
+}
+
+impl SubAllocatorBase for DedicatedBlockAllocator {}
+impl SubAllocator for DedicatedBlockAllocator {
+ fn allocate(
+ &mut self,
+ size: u64,
+ _alignment: u64,
+ _allocation_type: AllocationType,
+ _granularity: u64,
+ name: &str,
+ backtrace: Arc<Backtrace>,
+ ) -> Result<(u64, std::num::NonZeroU64)> {
+ if self.allocated != 0 {
+ return Err(AllocationError::OutOfMemory);
+ }
+
+ if self.size != size {
+ return Err(AllocationError::Internal(
+ "DedicatedBlockAllocator size must match allocation size.".into(),
+ ));
+ }
+
+ self.allocated = size;
+ self.name = Some(name.to_string());
+ self.backtrace = backtrace;
+
+ #[allow(clippy::unwrap_used)]
+ let dummy_id = std::num::NonZeroU64::new(1).unwrap();
+ Ok((0, dummy_id))
+ }
+
+ fn free(&mut self, chunk_id: Option<std::num::NonZeroU64>) -> Result<()> {
+ if chunk_id != std::num::NonZeroU64::new(1) {
+ Err(AllocationError::Internal("Chunk ID must be 1.".into()))
+ } else {
+ self.allocated = 0;
+ Ok(())
+ }
+ }
+
+ fn rename_allocation(
+ &mut self,
+ chunk_id: Option<std::num::NonZeroU64>,
+ name: &str,
+ ) -> Result<()> {
+ if chunk_id != std::num::NonZeroU64::new(1) {
+ Err(AllocationError::Internal("Chunk ID must be 1.".into()))
+ } else {
+ self.name = Some(name.into());
+ Ok(())
+ }
+ }
+
+ fn report_memory_leaks(
+ &self,
+ log_level: Level,
+ memory_type_index: usize,
+ memory_block_index: usize,
+ ) {
+ let empty = "".to_string();
+ let name = self.name.as_ref().unwrap_or(&empty);
+
+ log!(
+ log_level,
+ r#"leak detected: {{
+ memory type: {}
+ memory block: {}
+ dedicated allocation: {{
+ size: 0x{:x},
+ name: {},
+ backtrace: {}
+ }}
+}}"#,
+ memory_type_index,
+ memory_block_index,
+ self.size,
+ name,
+ self.backtrace
+ )
+ }
+
+ fn report_allocations(&self) -> Vec<AllocationReport> {
+ vec![AllocationReport {
+ name: self
+ .name
+ .clone()
+ .unwrap_or_else(|| "<Unnamed Dedicated allocation>".to_owned()),
+ size: self.size,
+ #[cfg(feature = "visualizer")]
+ backtrace: self.backtrace.clone(),
+ }]
+ }
+
+ fn size(&self) -> u64 {
+ self.size
+ }
+
+ fn allocated(&self) -> u64 {
+ self.allocated
+ }
+
+ fn supports_general_allocations(&self) -> bool {
+ false
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/allocator/dedicated_block_allocator/visualizer.rs b/third_party/rust/gpu-allocator/src/allocator/dedicated_block_allocator/visualizer.rs
new file mode 100644
index 0000000000..a2cd4d2058
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/allocator/dedicated_block_allocator/visualizer.rs
@@ -0,0 +1,8 @@
+use super::DedicatedBlockAllocator;
+use crate::visualizer::SubAllocatorVisualizer;
+
+impl SubAllocatorVisualizer for DedicatedBlockAllocator {
+ fn draw_base_info(&self, ui: &mut egui::Ui) {
+ ui.label("Dedicated Block");
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/allocator/free_list_allocator/mod.rs b/third_party/rust/gpu-allocator/src/allocator/free_list_allocator/mod.rs
new file mode 100644
index 0000000000..33437c85ee
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/allocator/free_list_allocator/mod.rs
@@ -0,0 +1,419 @@
+#![deny(unsafe_code, clippy::unwrap_used)]
+
+#[cfg(feature = "visualizer")]
+pub(crate) mod visualizer;
+
+use std::{
+ backtrace::Backtrace,
+ collections::{HashMap, HashSet},
+ sync::Arc,
+};
+
+use log::{log, Level};
+
+use super::{AllocationReport, AllocationType, SubAllocator, SubAllocatorBase};
+use crate::{AllocationError, Result};
+
+const USE_BEST_FIT: bool = true;
+
+fn align_down(val: u64, alignment: u64) -> u64 {
+ val & !(alignment - 1u64)
+}
+
+fn align_up(val: u64, alignment: u64) -> u64 {
+ align_down(val + alignment - 1u64, alignment)
+}
+
+#[derive(Debug)]
+pub(crate) struct MemoryChunk {
+ pub(crate) chunk_id: std::num::NonZeroU64,
+ pub(crate) size: u64,
+ pub(crate) offset: u64,
+ pub(crate) allocation_type: AllocationType,
+ pub(crate) name: Option<String>,
+ /// Only used if [`crate::AllocatorDebugSettings::store_stack_traces`] is [`true`]
+ pub(crate) backtrace: Arc<Backtrace>,
+ next: Option<std::num::NonZeroU64>,
+ prev: Option<std::num::NonZeroU64>,
+}
+
+#[derive(Debug)]
+pub(crate) struct FreeListAllocator {
+ size: u64,
+ allocated: u64,
+ pub(crate) chunk_id_counter: u64,
+ pub(crate) chunks: HashMap<std::num::NonZeroU64, MemoryChunk>,
+ free_chunks: HashSet<std::num::NonZeroU64>,
+}
+
+/// Test if two suballocations will overlap the same page.
+fn is_on_same_page(offset_a: u64, size_a: u64, offset_b: u64, page_size: u64) -> bool {
+ let end_a = offset_a + size_a - 1;
+ let end_page_a = align_down(end_a, page_size);
+ let start_b = offset_b;
+ let start_page_b = align_down(start_b, page_size);
+
+ end_page_a == start_page_b
+}
+
+/// Test if two allocation types will be conflicting or not.
+fn has_granularity_conflict(type0: AllocationType, type1: AllocationType) -> bool {
+ if type0 == AllocationType::Free || type1 == AllocationType::Free {
+ return false;
+ }
+
+ type0 != type1
+}
+
+impl FreeListAllocator {
+ pub(crate) fn new(size: u64) -> Self {
+ #[allow(clippy::unwrap_used)]
+ let initial_chunk_id = std::num::NonZeroU64::new(1).unwrap();
+
+ let mut chunks = HashMap::default();
+ chunks.insert(
+ initial_chunk_id,
+ MemoryChunk {
+ chunk_id: initial_chunk_id,
+ size,
+ offset: 0,
+ allocation_type: AllocationType::Free,
+ name: None,
+ backtrace: Arc::new(Backtrace::disabled()),
+ prev: None,
+ next: None,
+ },
+ );
+
+ let mut free_chunks = HashSet::default();
+ free_chunks.insert(initial_chunk_id);
+
+ Self {
+ size,
+ allocated: 0,
+ // 0 is not allowed as a chunk ID, 1 is used by the initial chunk, next chunk is going to be 2.
+ // The system well take the counter as the ID, and the increment the counter.
+ chunk_id_counter: 2,
+ chunks,
+ free_chunks,
+ }
+ }
+
+ /// Generates a new unique chunk ID
+ fn get_new_chunk_id(&mut self) -> Result<std::num::NonZeroU64> {
+ if self.chunk_id_counter == u64::MAX {
+ // End of chunk id counter reached, no more allocations are possible.
+ return Err(AllocationError::OutOfMemory);
+ }
+
+ let id = self.chunk_id_counter;
+ self.chunk_id_counter += 1;
+ std::num::NonZeroU64::new(id).ok_or_else(|| {
+ AllocationError::Internal("New chunk id was 0, which is not allowed.".into())
+ })
+ }
+ /// Finds the specified `chunk_id` in the list of free chunks and removes if from the list
+ fn remove_id_from_free_list(&mut self, chunk_id: std::num::NonZeroU64) {
+ self.free_chunks.remove(&chunk_id);
+ }
+ /// Merges two adjacent chunks. Right chunk will be merged into the left chunk
+ fn merge_free_chunks(
+ &mut self,
+ chunk_left: std::num::NonZeroU64,
+ chunk_right: std::num::NonZeroU64,
+ ) -> Result<()> {
+ // Gather data from right chunk and remove it
+ let (right_size, right_next) = {
+ let chunk = self.chunks.remove(&chunk_right).ok_or_else(|| {
+ AllocationError::Internal("Chunk ID not present in chunk list.".into())
+ })?;
+ self.remove_id_from_free_list(chunk.chunk_id);
+
+ (chunk.size, chunk.next)
+ };
+
+ // Merge into left chunk
+ {
+ let chunk = self.chunks.get_mut(&chunk_left).ok_or_else(|| {
+ AllocationError::Internal("Chunk ID not present in chunk list.".into())
+ })?;
+ chunk.next = right_next;
+ chunk.size += right_size;
+ }
+
+ // Patch pointers
+ if let Some(right_next) = right_next {
+ let chunk = self.chunks.get_mut(&right_next).ok_or_else(|| {
+ AllocationError::Internal("Chunk ID not present in chunk list.".into())
+ })?;
+ chunk.prev = Some(chunk_left);
+ }
+
+ Ok(())
+ }
+}
+
+impl SubAllocatorBase for FreeListAllocator {}
+impl SubAllocator for FreeListAllocator {
+ fn allocate(
+ &mut self,
+ size: u64,
+ alignment: u64,
+ allocation_type: AllocationType,
+ granularity: u64,
+ name: &str,
+ backtrace: Arc<Backtrace>,
+ ) -> Result<(u64, std::num::NonZeroU64)> {
+ let free_size = self.size - self.allocated;
+ if size > free_size {
+ return Err(AllocationError::OutOfMemory);
+ }
+
+ let mut best_fit_id: Option<std::num::NonZeroU64> = None;
+ let mut best_offset = 0u64;
+ let mut best_aligned_size = 0u64;
+ let mut best_chunk_size = 0u64;
+
+ for current_chunk_id in self.free_chunks.iter() {
+ let current_chunk = self.chunks.get(current_chunk_id).ok_or_else(|| {
+ AllocationError::Internal(
+ "Chunk ID in free list is not present in chunk list.".into(),
+ )
+ })?;
+
+ if current_chunk.size < size {
+ continue;
+ }
+
+ let mut offset = align_up(current_chunk.offset, alignment);
+
+ if let Some(prev_idx) = current_chunk.prev {
+ let previous = self.chunks.get(&prev_idx).ok_or_else(|| {
+ AllocationError::Internal("Invalid previous chunk reference.".into())
+ })?;
+ if is_on_same_page(previous.offset, previous.size, offset, granularity)
+ && has_granularity_conflict(previous.allocation_type, allocation_type)
+ {
+ offset = align_up(offset, granularity);
+ }
+ }
+
+ let padding = offset - current_chunk.offset;
+ let aligned_size = padding + size;
+
+ if aligned_size > current_chunk.size {
+ continue;
+ }
+
+ if let Some(next_idx) = current_chunk.next {
+ let next = self.chunks.get(&next_idx).ok_or_else(|| {
+ AllocationError::Internal("Invalid next chunk reference.".into())
+ })?;
+ if is_on_same_page(offset, size, next.offset, granularity)
+ && has_granularity_conflict(allocation_type, next.allocation_type)
+ {
+ continue;
+ }
+ }
+
+ if USE_BEST_FIT {
+ if best_fit_id.is_none() || current_chunk.size < best_chunk_size {
+ best_fit_id = Some(*current_chunk_id);
+ best_aligned_size = aligned_size;
+ best_offset = offset;
+
+ best_chunk_size = current_chunk.size;
+ };
+ } else {
+ best_fit_id = Some(*current_chunk_id);
+ best_aligned_size = aligned_size;
+ best_offset = offset;
+
+ best_chunk_size = current_chunk.size;
+ break;
+ }
+ }
+
+ let first_fit_id = best_fit_id.ok_or(AllocationError::OutOfMemory)?;
+
+ let chunk_id = if best_chunk_size > best_aligned_size {
+ let new_chunk_id = self.get_new_chunk_id()?;
+
+ let new_chunk = {
+ let free_chunk = self.chunks.get_mut(&first_fit_id).ok_or_else(|| {
+ AllocationError::Internal("Chunk ID must be in chunk list.".into())
+ })?;
+ let new_chunk = MemoryChunk {
+ chunk_id: new_chunk_id,
+ size: best_aligned_size,
+ offset: free_chunk.offset,
+ allocation_type,
+ name: Some(name.to_string()),
+ backtrace,
+ prev: free_chunk.prev,
+ next: Some(first_fit_id),
+ };
+
+ free_chunk.prev = Some(new_chunk.chunk_id);
+ free_chunk.offset += best_aligned_size;
+ free_chunk.size -= best_aligned_size;
+ new_chunk
+ };
+
+ if let Some(prev_id) = new_chunk.prev {
+ let prev_chunk = self.chunks.get_mut(&prev_id).ok_or_else(|| {
+ AllocationError::Internal("Invalid previous chunk reference.".into())
+ })?;
+ prev_chunk.next = Some(new_chunk.chunk_id);
+ }
+
+ self.chunks.insert(new_chunk_id, new_chunk);
+
+ new_chunk_id
+ } else {
+ let chunk = self
+ .chunks
+ .get_mut(&first_fit_id)
+ .ok_or_else(|| AllocationError::Internal("Invalid chunk reference.".into()))?;
+
+ chunk.allocation_type = allocation_type;
+ chunk.name = Some(name.to_string());
+ chunk.backtrace = backtrace;
+
+ self.remove_id_from_free_list(first_fit_id);
+
+ first_fit_id
+ };
+
+ self.allocated += best_aligned_size;
+
+ Ok((best_offset, chunk_id))
+ }
+
+ fn free(&mut self, chunk_id: Option<std::num::NonZeroU64>) -> Result<()> {
+ let chunk_id = chunk_id
+ .ok_or_else(|| AllocationError::Internal("Chunk ID must be a valid value.".into()))?;
+
+ let (next_id, prev_id) = {
+ let chunk = self.chunks.get_mut(&chunk_id).ok_or_else(|| {
+ AllocationError::Internal(
+ "Attempting to free chunk that is not in chunk list.".into(),
+ )
+ })?;
+ chunk.allocation_type = AllocationType::Free;
+ chunk.name = None;
+ chunk.backtrace = Arc::new(Backtrace::disabled());
+
+ self.allocated -= chunk.size;
+
+ self.free_chunks.insert(chunk.chunk_id);
+
+ (chunk.next, chunk.prev)
+ };
+
+ if let Some(next_id) = next_id {
+ if self.chunks[&next_id].allocation_type == AllocationType::Free {
+ self.merge_free_chunks(chunk_id, next_id)?;
+ }
+ }
+
+ if let Some(prev_id) = prev_id {
+ if self.chunks[&prev_id].allocation_type == AllocationType::Free {
+ self.merge_free_chunks(prev_id, chunk_id)?;
+ }
+ }
+ Ok(())
+ }
+
+ fn rename_allocation(
+ &mut self,
+ chunk_id: Option<std::num::NonZeroU64>,
+ name: &str,
+ ) -> Result<()> {
+ let chunk_id = chunk_id
+ .ok_or_else(|| AllocationError::Internal("Chunk ID must be a valid value.".into()))?;
+
+ let chunk = self.chunks.get_mut(&chunk_id).ok_or_else(|| {
+ AllocationError::Internal(
+ "Attempting to rename chunk that is not in chunk list.".into(),
+ )
+ })?;
+
+ if chunk.allocation_type == AllocationType::Free {
+ return Err(AllocationError::Internal(
+ "Attempting to rename a freed allocation.".into(),
+ ));
+ }
+
+ chunk.name = Some(name.into());
+
+ Ok(())
+ }
+
+ fn report_memory_leaks(
+ &self,
+ log_level: Level,
+ memory_type_index: usize,
+ memory_block_index: usize,
+ ) {
+ for (chunk_id, chunk) in self.chunks.iter() {
+ if chunk.allocation_type == AllocationType::Free {
+ continue;
+ }
+ let empty = "".to_string();
+ let name = chunk.name.as_ref().unwrap_or(&empty);
+
+ log!(
+ log_level,
+ r#"leak detected: {{
+ memory type: {}
+ memory block: {}
+ chunk: {{
+ chunk_id: {},
+ size: 0x{:x},
+ offset: 0x{:x},
+ allocation_type: {:?},
+ name: {},
+ backtrace: {}
+ }}
+}}"#,
+ memory_type_index,
+ memory_block_index,
+ chunk_id,
+ chunk.size,
+ chunk.offset,
+ chunk.allocation_type,
+ name,
+ chunk.backtrace
+ );
+ }
+ }
+
+ fn report_allocations(&self) -> Vec<AllocationReport> {
+ self.chunks
+ .iter()
+ .filter(|(_key, chunk)| chunk.allocation_type != AllocationType::Free)
+ .map(|(_key, chunk)| AllocationReport {
+ name: chunk
+ .name
+ .clone()
+ .unwrap_or_else(|| "<Unnamed FreeList allocation>".to_owned()),
+ size: chunk.size,
+ #[cfg(feature = "visualizer")]
+ backtrace: chunk.backtrace.clone(),
+ })
+ .collect::<Vec<_>>()
+ }
+
+ fn size(&self) -> u64 {
+ self.size
+ }
+
+ fn allocated(&self) -> u64 {
+ self.allocated
+ }
+
+ fn supports_general_allocations(&self) -> bool {
+ true
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/allocator/free_list_allocator/visualizer.rs b/third_party/rust/gpu-allocator/src/allocator/free_list_allocator/visualizer.rs
new file mode 100644
index 0000000000..5c77831057
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/allocator/free_list_allocator/visualizer.rs
@@ -0,0 +1,25 @@
+use super::FreeListAllocator;
+use crate::visualizer::{
+ render_memory_chunks_ui, ColorScheme, MemoryChunksVisualizationSettings, SubAllocatorVisualizer,
+};
+
+impl SubAllocatorVisualizer for FreeListAllocator {
+ fn supports_visualization(&self) -> bool {
+ true
+ }
+
+ fn draw_base_info(&self, ui: &mut egui::Ui) {
+ ui.label("free list sub-allocator");
+ ui.label(format!("chunk count: {}", self.chunks.len()));
+ ui.label(format!("chunk id counter: {}", self.chunk_id_counter));
+ }
+
+ fn draw_visualization(
+ &self,
+ color_scheme: &ColorScheme,
+ ui: &mut egui::Ui,
+ settings: &MemoryChunksVisualizationSettings,
+ ) {
+ render_memory_chunks_ui(ui, color_scheme, settings, self.size, self.chunks.values());
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/allocator/mod.rs b/third_party/rust/gpu-allocator/src/allocator/mod.rs
new file mode 100644
index 0000000000..a84c047ba5
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/allocator/mod.rs
@@ -0,0 +1,109 @@
+use std::{backtrace::Backtrace, sync::Arc};
+
+use log::*;
+
+use crate::result::*;
+
+pub(crate) mod dedicated_block_allocator;
+pub(crate) use dedicated_block_allocator::DedicatedBlockAllocator;
+
+pub(crate) mod free_list_allocator;
+pub(crate) use free_list_allocator::FreeListAllocator;
+
+#[derive(PartialEq, Copy, Clone, Debug)]
+#[repr(u8)]
+pub(crate) enum AllocationType {
+ Free,
+ Linear,
+ NonLinear,
+}
+
+impl AllocationType {
+ #[cfg(feature = "visualizer")]
+ pub fn as_str(self) -> &'static str {
+ match self {
+ Self::Free => "Free",
+ Self::Linear => "Linear",
+ Self::NonLinear => "Non-Linear",
+ }
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct AllocationReport {
+ pub(crate) name: String,
+ pub(crate) size: u64,
+ #[cfg(feature = "visualizer")]
+ pub(crate) backtrace: Arc<Backtrace>,
+}
+
+#[cfg(feature = "visualizer")]
+pub(crate) trait SubAllocatorBase: crate::visualizer::SubAllocatorVisualizer {}
+#[cfg(not(feature = "visualizer"))]
+pub(crate) trait SubAllocatorBase {}
+
+pub(crate) trait SubAllocator: SubAllocatorBase + std::fmt::Debug + Sync + Send {
+ fn allocate(
+ &mut self,
+ size: u64,
+ alignment: u64,
+ allocation_type: AllocationType,
+ granularity: u64,
+ name: &str,
+ backtrace: Arc<Backtrace>,
+ ) -> Result<(u64, std::num::NonZeroU64)>;
+
+ fn free(&mut self, chunk_id: Option<std::num::NonZeroU64>) -> Result<()>;
+
+ fn rename_allocation(
+ &mut self,
+ chunk_id: Option<std::num::NonZeroU64>,
+ name: &str,
+ ) -> Result<()>;
+
+ fn report_memory_leaks(
+ &self,
+ log_level: Level,
+ memory_type_index: usize,
+ memory_block_index: usize,
+ );
+
+ fn report_allocations(&self) -> Vec<AllocationReport>;
+
+ #[must_use]
+ fn supports_general_allocations(&self) -> bool;
+ #[must_use]
+ fn size(&self) -> u64;
+ #[must_use]
+ fn allocated(&self) -> u64;
+
+ /// Helper function: reports how much memory is available in this suballocator
+ #[must_use]
+ fn available_memory(&self) -> u64 {
+ self.size() - self.allocated()
+ }
+
+ /// Helper function: reports if the suballocator is empty (meaning, having no allocations).
+ #[must_use]
+ fn is_empty(&self) -> bool {
+ self.allocated() == 0
+ }
+}
+
+pub(crate) const VISUALIZER_TABLE_MAX_ENTRY_NAME_LEN: usize = 40;
+
+pub(crate) fn fmt_bytes(mut amount: u64) -> String {
+ const SUFFIX: [&str; 5] = ["B", "KB", "MB", "GB", "TB"];
+
+ let mut idx = 0;
+ let mut print_amount = amount as f64;
+ loop {
+ if amount < 1024 {
+ return format!("{:.2} {}", print_amount, SUFFIX[idx]);
+ }
+
+ print_amount = amount as f64 / 1024.0;
+ amount /= 1024;
+ idx += 1;
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/d3d12/mod.rs b/third_party/rust/gpu-allocator/src/d3d12/mod.rs
new file mode 100644
index 0000000000..d923330af5
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/d3d12/mod.rs
@@ -0,0 +1,1072 @@
+#![deny(clippy::unimplemented, clippy::unwrap_used, clippy::ok_expect)]
+
+use std::{backtrace::Backtrace, fmt, sync::Arc};
+
+use log::{debug, warn, Level};
+
+use windows::Win32::{Foundation::E_OUTOFMEMORY, Graphics::Direct3D12::*};
+
+#[cfg(feature = "public-winapi")]
+mod public_winapi {
+ use super::*;
+ pub use winapi::um::d3d12 as winapi_d3d12;
+
+ /// Trait similar to [`AsRef`]/[`AsMut`],
+ pub trait ToWinapi<T> {
+ fn as_winapi(&self) -> *const T;
+ fn as_winapi_mut(&mut self) -> *mut T;
+ }
+
+ /// [`windows`] types hold their pointer internally and provide drop semantics. As such this trait
+ /// is usually implemented on the _pointer type_ (`*const`, `*mut`) of the [`winapi`] object so that
+ /// a **borrow of** that pointer becomes a borrow of the [`windows`] type.
+ pub trait ToWindows<T> {
+ fn as_windows(&self) -> &T;
+ }
+
+ impl ToWinapi<winapi_d3d12::ID3D12Resource> for ID3D12Resource {
+ fn as_winapi(&self) -> *const winapi_d3d12::ID3D12Resource {
+ unsafe { std::mem::transmute_copy(self) }
+ }
+
+ fn as_winapi_mut(&mut self) -> *mut winapi_d3d12::ID3D12Resource {
+ unsafe { std::mem::transmute_copy(self) }
+ }
+ }
+
+ impl ToWinapi<winapi_d3d12::ID3D12Device> for ID3D12Device {
+ fn as_winapi(&self) -> *const winapi_d3d12::ID3D12Device {
+ unsafe { std::mem::transmute_copy(self) }
+ }
+
+ fn as_winapi_mut(&mut self) -> *mut winapi_d3d12::ID3D12Device {
+ unsafe { std::mem::transmute_copy(self) }
+ }
+ }
+
+ impl ToWindows<ID3D12Device> for *const winapi_d3d12::ID3D12Device {
+ fn as_windows(&self) -> &ID3D12Device {
+ unsafe { std::mem::transmute(self) }
+ }
+ }
+
+ impl ToWindows<ID3D12Device> for *mut winapi_d3d12::ID3D12Device {
+ fn as_windows(&self) -> &ID3D12Device {
+ unsafe { std::mem::transmute(self) }
+ }
+ }
+
+ impl ToWindows<ID3D12Device> for &mut winapi_d3d12::ID3D12Device {
+ fn as_windows(&self) -> &ID3D12Device {
+ unsafe { std::mem::transmute(self) }
+ }
+ }
+
+ impl ToWinapi<winapi_d3d12::ID3D12Heap> for ID3D12Heap {
+ fn as_winapi(&self) -> *const winapi_d3d12::ID3D12Heap {
+ unsafe { std::mem::transmute_copy(self) }
+ }
+
+ fn as_winapi_mut(&mut self) -> *mut winapi_d3d12::ID3D12Heap {
+ unsafe { std::mem::transmute_copy(self) }
+ }
+ }
+}
+
+#[cfg(feature = "public-winapi")]
+pub use public_winapi::*;
+
+#[cfg(feature = "visualizer")]
+mod visualizer;
+#[cfg(feature = "visualizer")]
+pub use visualizer::AllocatorVisualizer;
+
+use super::allocator;
+use super::allocator::AllocationType;
+
+use crate::{
+ allocator::fmt_bytes, AllocationError, AllocationSizes, AllocatorDebugSettings, MemoryLocation,
+ Result,
+};
+
+/// [`ResourceCategory`] is used for supporting [`D3D12_RESOURCE_HEAP_TIER_1`].
+/// [`ResourceCategory`] will be ignored if device supports [`D3D12_RESOURCE_HEAP_TIER_2`].
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum ResourceCategory {
+ Buffer,
+ RtvDsvTexture,
+ OtherTexture,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum ResourceStateOrBarrierLayout {
+ ResourceState(D3D12_RESOURCE_STATES),
+ BarrierLayout(D3D12_BARRIER_LAYOUT),
+}
+
+#[derive(Clone, Copy)]
+pub struct ResourceCreateDesc<'a> {
+ pub name: &'a str,
+ pub memory_location: MemoryLocation,
+ pub resource_category: ResourceCategory,
+ pub resource_desc: &'a D3D12_RESOURCE_DESC,
+ pub clear_value: Option<&'a D3D12_CLEAR_VALUE>,
+ pub initial_state_or_layout: ResourceStateOrBarrierLayout,
+ pub resource_type: &'a ResourceType<'a>,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum HeapCategory {
+ All,
+ Buffer,
+ RtvDsvTexture,
+ OtherTexture,
+}
+
+impl From<ResourceCategory> for HeapCategory {
+ fn from(resource_category: ResourceCategory) -> Self {
+ match resource_category {
+ ResourceCategory::Buffer => Self::Buffer,
+ ResourceCategory::RtvDsvTexture => Self::RtvDsvTexture,
+ ResourceCategory::OtherTexture => Self::OtherTexture,
+ }
+ }
+}
+
+impl From<&D3D12_RESOURCE_DESC> for ResourceCategory {
+ fn from(desc: &D3D12_RESOURCE_DESC) -> Self {
+ if desc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER {
+ Self::Buffer
+ } else if (desc.Flags
+ & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL))
+ != D3D12_RESOURCE_FLAG_NONE
+ {
+ Self::RtvDsvTexture
+ } else {
+ Self::OtherTexture
+ }
+ }
+}
+
+#[cfg(feature = "public-winapi")]
+impl From<&winapi_d3d12::D3D12_RESOURCE_DESC> for ResourceCategory {
+ fn from(desc: &winapi_d3d12::D3D12_RESOURCE_DESC) -> Self {
+ if desc.Dimension == winapi_d3d12::D3D12_RESOURCE_DIMENSION_BUFFER {
+ Self::Buffer
+ } else if (desc.Flags
+ & (winapi_d3d12::D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET
+ | winapi_d3d12::D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL))
+ != 0
+ {
+ Self::RtvDsvTexture
+ } else {
+ Self::OtherTexture
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct AllocationCreateDesc<'a> {
+ /// Name of the allocation, for tracking and debugging purposes
+ pub name: &'a str,
+ /// Location where the memory allocation should be stored
+ pub location: MemoryLocation,
+
+ /// Size of allocation, should be queried using [`ID3D12Device::GetResourceAllocationInfo()`]
+ pub size: u64,
+ /// Alignment of allocation, should be queried using [`ID3D12Device::GetResourceAllocationInfo()`]
+ pub alignment: u64,
+ /// Resource category based on resource dimension and flags. Can be created from a [`D3D12_RESOURCE_DESC`]
+ /// using the helper into function. The resource category is ignored when Resource Heap Tier 2 or higher
+ /// is supported.
+ pub resource_category: ResourceCategory,
+}
+
+impl<'a> AllocationCreateDesc<'a> {
+ /// Helper conversion function utilizing [`winapi`] types.
+ ///
+ /// This function is also available for [`windows::Win32::Graphics::Direct3D12`]
+ /// types as [`from_d3d12_resource_desc()`][Self::from_d3d12_resource_desc()].
+ #[cfg(feature = "public-winapi")]
+ pub fn from_winapi_d3d12_resource_desc(
+ device: *const winapi_d3d12::ID3D12Device,
+ desc: &winapi_d3d12::D3D12_RESOURCE_DESC,
+ name: &'a str,
+ location: MemoryLocation,
+ ) -> AllocationCreateDesc<'a> {
+ let device = device.as_windows();
+ // Raw structs are binary-compatible
+ let desc = unsafe { std::mem::transmute(desc) };
+ let allocation_info =
+ unsafe { device.GetResourceAllocationInfo(0, std::slice::from_ref(desc)) };
+ let resource_category: ResourceCategory = desc.into();
+
+ AllocationCreateDesc {
+ name,
+ location,
+ size: allocation_info.SizeInBytes,
+ alignment: allocation_info.Alignment,
+ resource_category,
+ }
+ }
+
+ /// Helper conversion function utilizing [`windows::Win32::Graphics::Direct3D12`] types.
+ ///
+ /// This function is also available for `winapi` types as `from_winapi_d3d12_resource_desc()`
+ /// when the `public-winapi` feature is enabled.
+ pub fn from_d3d12_resource_desc(
+ device: &ID3D12Device,
+ desc: &D3D12_RESOURCE_DESC,
+ name: &'a str,
+ location: MemoryLocation,
+ ) -> AllocationCreateDesc<'a> {
+ let allocation_info =
+ unsafe { device.GetResourceAllocationInfo(0, std::slice::from_ref(desc)) };
+ let resource_category: ResourceCategory = desc.into();
+
+ AllocationCreateDesc {
+ name,
+ location,
+ size: allocation_info.SizeInBytes,
+ alignment: allocation_info.Alignment,
+ resource_category,
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub enum ID3D12DeviceVersion {
+ /// Basic device compatible with legacy barriers only, i.e. can only be used in conjunction
+ /// with [`ResourceStateOrBarrierLayout::ResourceState`].
+ Device(ID3D12Device),
+ /// Required for enhanced barrier support, i.e. when using
+ /// [`ResourceStateOrBarrierLayout::BarrierLayout`].
+ Device10(ID3D12Device10),
+}
+
+impl std::ops::Deref for ID3D12DeviceVersion {
+ type Target = ID3D12Device;
+
+ fn deref(&self) -> &Self::Target {
+ match self {
+ Self::Device(device) => device,
+ // Windows-rs hides CanInto, we know that Device10 is a subclass of Device but there's not even a Deref.
+ Self::Device10(device10) => windows::core::CanInto::can_into(device10),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct AllocatorCreateDesc {
+ pub device: ID3D12DeviceVersion,
+ pub debug_settings: AllocatorDebugSettings,
+ pub allocation_sizes: AllocationSizes,
+}
+
+pub enum ResourceType<'a> {
+ /// Allocation equivalent to Dx12's CommittedResource.
+ Committed {
+ heap_properties: &'a D3D12_HEAP_PROPERTIES,
+ heap_flags: D3D12_HEAP_FLAGS,
+ },
+ /// Allocation equivalent to Dx12's PlacedResource.
+ Placed,
+}
+
+#[derive(Debug)]
+pub struct Resource {
+ name: String,
+ pub allocation: Option<Allocation>,
+ resource: Option<ID3D12Resource>,
+ pub memory_location: MemoryLocation,
+ memory_type_index: Option<usize>,
+ pub size: u64,
+}
+
+impl Resource {
+ pub fn resource(&self) -> &ID3D12Resource {
+ self.resource.as_ref().expect("Resource was already freed.")
+ }
+}
+
+impl Drop for Resource {
+ fn drop(&mut self) {
+ if self.resource.is_some() {
+ warn!("Dropping resource `{}` that was not freed. Call `Allocator::free_resource(resource)` instead.", self.name);
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct CommittedAllocationStatistics {
+ pub num_allocations: usize,
+ pub total_size: u64,
+}
+
+#[derive(Debug)]
+pub struct Allocation {
+ chunk_id: Option<std::num::NonZeroU64>,
+ offset: u64,
+ size: u64,
+ memory_block_index: usize,
+ memory_type_index: usize,
+ heap: ID3D12Heap,
+
+ name: Option<Box<str>>,
+}
+
+impl Allocation {
+ pub fn chunk_id(&self) -> Option<std::num::NonZeroU64> {
+ self.chunk_id
+ }
+
+ /// Returns the [`ID3D12Heap`] object that is backing this allocation.
+ /// This heap object can be shared with multiple other allocations and shouldn't be freed (or allocated from)
+ /// without this library, because that will lead to undefined behavior.
+ ///
+ /// # Safety
+ /// The result of this function be safely passed into [`ID3D12Device::CreatePlacedResource()`].
+ /// It is exposed for this reason. Keep in mind to also pass [`Self::offset()`] along to it.
+ pub unsafe fn heap(&self) -> &ID3D12Heap {
+ &self.heap
+ }
+
+ /// Returns the offset of the allocation on the [`ID3D12Heap`].
+ /// When creating a placed resources, this offset needs to be supplied as well.
+ pub fn offset(&self) -> u64 {
+ self.offset
+ }
+
+ /// Returns the size of the allocation
+ pub fn size(&self) -> u64 {
+ self.size
+ }
+
+ pub fn is_null(&self) -> bool {
+ self.chunk_id.is_none()
+ }
+}
+
+#[derive(Debug)]
+struct MemoryBlock {
+ heap: ID3D12Heap,
+ size: u64,
+ sub_allocator: Box<dyn allocator::SubAllocator>,
+}
+impl MemoryBlock {
+ fn new(
+ device: &ID3D12Device,
+ size: u64,
+ heap_properties: &D3D12_HEAP_PROPERTIES,
+ heap_category: HeapCategory,
+ dedicated: bool,
+ ) -> Result<Self> {
+ let heap = {
+ let mut desc = D3D12_HEAP_DESC {
+ SizeInBytes: size,
+ Properties: *heap_properties,
+ Alignment: D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT as u64,
+ ..Default::default()
+ };
+ desc.Flags = match heap_category {
+ HeapCategory::All => D3D12_HEAP_FLAG_NONE,
+ HeapCategory::Buffer => D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS,
+ HeapCategory::RtvDsvTexture => D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES,
+ HeapCategory::OtherTexture => D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES,
+ };
+
+ let mut heap = None;
+ let hr = unsafe { device.CreateHeap(&desc, &mut heap) };
+ match hr {
+ Err(e) if e.code() == E_OUTOFMEMORY => Err(AllocationError::OutOfMemory),
+ Err(e) => Err(AllocationError::Internal(format!(
+ "ID3D12Device::CreateHeap failed: {}",
+ e
+ ))),
+ Ok(()) => heap.ok_or_else(|| {
+ AllocationError::Internal(
+ "ID3D12Heap pointer is null, but should not be.".into(),
+ )
+ }),
+ }?
+ };
+
+ let sub_allocator: Box<dyn allocator::SubAllocator> = if dedicated {
+ Box::new(allocator::DedicatedBlockAllocator::new(size))
+ } else {
+ Box::new(allocator::FreeListAllocator::new(size))
+ };
+
+ Ok(Self {
+ heap,
+ size,
+ sub_allocator,
+ })
+ }
+}
+
+#[derive(Debug)]
+struct MemoryType {
+ memory_blocks: Vec<Option<MemoryBlock>>,
+ committed_allocations: CommittedAllocationStatistics,
+ memory_location: MemoryLocation,
+ heap_category: HeapCategory,
+ heap_properties: D3D12_HEAP_PROPERTIES,
+ memory_type_index: usize,
+ active_general_blocks: usize,
+}
+
+impl MemoryType {
+ fn allocate(
+ &mut self,
+ device: &ID3D12DeviceVersion,
+ desc: &AllocationCreateDesc<'_>,
+ backtrace: Arc<Backtrace>,
+ allocation_sizes: &AllocationSizes,
+ ) -> Result<Allocation> {
+ let allocation_type = AllocationType::Linear;
+
+ let memblock_size = if self.heap_properties.Type == D3D12_HEAP_TYPE_DEFAULT {
+ allocation_sizes.device_memblock_size
+ } else {
+ allocation_sizes.host_memblock_size
+ };
+
+ let size = desc.size;
+ let alignment = desc.alignment;
+
+ // Create a dedicated block for large memory allocations
+ if size > memblock_size {
+ let mem_block = MemoryBlock::new(
+ device,
+ size,
+ &self.heap_properties,
+ self.heap_category,
+ true,
+ )?;
+
+ let block_index = self.memory_blocks.iter().position(|block| block.is_none());
+ let block_index = match block_index {
+ Some(i) => {
+ self.memory_blocks[i].replace(mem_block);
+ i
+ }
+ None => {
+ self.memory_blocks.push(Some(mem_block));
+ self.memory_blocks.len() - 1
+ }
+ };
+
+ let mem_block = self.memory_blocks[block_index]
+ .as_mut()
+ .ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
+
+ let (offset, chunk_id) = mem_block.sub_allocator.allocate(
+ size,
+ alignment,
+ allocation_type,
+ 1,
+ desc.name,
+ backtrace,
+ )?;
+
+ return Ok(Allocation {
+ chunk_id: Some(chunk_id),
+ size,
+ offset,
+ memory_block_index: block_index,
+ memory_type_index: self.memory_type_index,
+ heap: mem_block.heap.clone(),
+ name: Some(desc.name.into()),
+ });
+ }
+
+ let mut empty_block_index = None;
+ for (mem_block_i, mem_block) in self.memory_blocks.iter_mut().enumerate().rev() {
+ if let Some(mem_block) = mem_block {
+ let allocation = mem_block.sub_allocator.allocate(
+ size,
+ alignment,
+ allocation_type,
+ 1,
+ desc.name,
+ backtrace.clone(),
+ );
+
+ match allocation {
+ Ok((offset, chunk_id)) => {
+ return Ok(Allocation {
+ chunk_id: Some(chunk_id),
+ offset,
+ size,
+ memory_block_index: mem_block_i,
+ memory_type_index: self.memory_type_index,
+ heap: mem_block.heap.clone(),
+ name: Some(desc.name.into()),
+ });
+ }
+ Err(AllocationError::OutOfMemory) => {} // Block is full, continue search.
+ Err(err) => return Err(err), // Unhandled error, return.
+ }
+ } else if empty_block_index.is_none() {
+ empty_block_index = Some(mem_block_i);
+ }
+ }
+
+ let new_memory_block = MemoryBlock::new(
+ device,
+ memblock_size,
+ &self.heap_properties,
+ self.heap_category,
+ false,
+ )?;
+
+ let new_block_index = if let Some(block_index) = empty_block_index {
+ self.memory_blocks[block_index] = Some(new_memory_block);
+ block_index
+ } else {
+ self.memory_blocks.push(Some(new_memory_block));
+ self.memory_blocks.len() - 1
+ };
+
+ self.active_general_blocks += 1;
+
+ let mem_block = self.memory_blocks[new_block_index]
+ .as_mut()
+ .ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
+ let allocation = mem_block.sub_allocator.allocate(
+ size,
+ alignment,
+ allocation_type,
+ 1,
+ desc.name,
+ backtrace,
+ );
+ let (offset, chunk_id) = match allocation {
+ Err(AllocationError::OutOfMemory) => Err(AllocationError::Internal(
+ "Allocation that must succeed failed. This is a bug in the allocator.".into(),
+ )),
+ a => a,
+ }?;
+
+ Ok(Allocation {
+ chunk_id: Some(chunk_id),
+ offset,
+ size,
+ memory_block_index: new_block_index,
+ memory_type_index: self.memory_type_index,
+ heap: mem_block.heap.clone(),
+ name: Some(desc.name.into()),
+ })
+ }
+
+ #[allow(clippy::needless_pass_by_value)]
+ fn free(&mut self, allocation: Allocation) -> Result<()> {
+ let block_idx = allocation.memory_block_index;
+
+ let mem_block = self.memory_blocks[block_idx]
+ .as_mut()
+ .ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
+
+ mem_block.sub_allocator.free(allocation.chunk_id)?;
+
+ if mem_block.sub_allocator.is_empty() {
+ if mem_block.sub_allocator.supports_general_allocations() {
+ if self.active_general_blocks > 1 {
+ let block = self.memory_blocks[block_idx].take();
+ if block.is_none() {
+ return Err(AllocationError::Internal(
+ "Memory block must be Some.".into(),
+ ));
+ }
+ // Note that `block` will be destroyed on `drop` here
+
+ self.active_general_blocks -= 1;
+ }
+ } else {
+ let block = self.memory_blocks[block_idx].take();
+ if block.is_none() {
+ return Err(AllocationError::Internal(
+ "Memory block must be Some.".into(),
+ ));
+ }
+ // Note that `block` will be destroyed on `drop` here
+ }
+ }
+
+ Ok(())
+ }
+}
+
+pub struct Allocator {
+ device: ID3D12DeviceVersion,
+ debug_settings: AllocatorDebugSettings,
+ memory_types: Vec<MemoryType>,
+ allocation_sizes: AllocationSizes,
+}
+
+impl Allocator {
+ pub fn device(&self) -> &ID3D12DeviceVersion {
+ &self.device
+ }
+
+ pub fn new(desc: &AllocatorCreateDesc) -> Result<Self> {
+ // Perform AddRef on the device
+ let device = desc.device.clone();
+
+ // Query device for feature level
+ let mut options = Default::default();
+ unsafe {
+ device.CheckFeatureSupport(
+ D3D12_FEATURE_D3D12_OPTIONS,
+ <*mut D3D12_FEATURE_DATA_D3D12_OPTIONS>::cast(&mut options),
+ std::mem::size_of_val(&options) as u32,
+ )
+ }
+ .map_err(|e| {
+ AllocationError::Internal(format!("ID3D12Device::CheckFeatureSupport failed: {}", e))
+ })?;
+
+ let is_heap_tier1 = options.ResourceHeapTier == D3D12_RESOURCE_HEAP_TIER_1;
+
+ let heap_types = [
+ (
+ MemoryLocation::GpuOnly,
+ D3D12_HEAP_PROPERTIES {
+ Type: D3D12_HEAP_TYPE_DEFAULT,
+ ..Default::default()
+ },
+ ),
+ (
+ MemoryLocation::CpuToGpu,
+ D3D12_HEAP_PROPERTIES {
+ Type: D3D12_HEAP_TYPE_CUSTOM,
+ CPUPageProperty: D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE,
+ MemoryPoolPreference: D3D12_MEMORY_POOL_L0,
+ ..Default::default()
+ },
+ ),
+ (
+ MemoryLocation::GpuToCpu,
+ D3D12_HEAP_PROPERTIES {
+ Type: D3D12_HEAP_TYPE_CUSTOM,
+ CPUPageProperty: D3D12_CPU_PAGE_PROPERTY_WRITE_BACK,
+ MemoryPoolPreference: D3D12_MEMORY_POOL_L0,
+ ..Default::default()
+ },
+ ),
+ ];
+
+ let heap_types = if is_heap_tier1 {
+ heap_types
+ .iter()
+ .flat_map(|(memory_location, heap_properties)| {
+ [
+ (HeapCategory::Buffer, *memory_location, *heap_properties),
+ (
+ HeapCategory::RtvDsvTexture,
+ *memory_location,
+ *heap_properties,
+ ),
+ (
+ HeapCategory::OtherTexture,
+ *memory_location,
+ *heap_properties,
+ ),
+ ]
+ .to_vec()
+ })
+ .collect::<Vec<_>>()
+ } else {
+ heap_types
+ .iter()
+ .map(|(memory_location, heap_properties)| {
+ (HeapCategory::All, *memory_location, *heap_properties)
+ })
+ .collect::<Vec<_>>()
+ };
+
+ let memory_types = heap_types
+ .iter()
+ .enumerate()
+ .map(
+ |(i, &(heap_category, memory_location, heap_properties))| MemoryType {
+ memory_blocks: Vec::default(),
+ memory_location,
+ heap_category,
+ heap_properties,
+ memory_type_index: i,
+ active_general_blocks: 0,
+ committed_allocations: CommittedAllocationStatistics {
+ num_allocations: 0,
+ total_size: 0,
+ },
+ },
+ )
+ .collect::<Vec<_>>();
+
+ Ok(Self {
+ memory_types,
+ device,
+ debug_settings: desc.debug_settings,
+ allocation_sizes: desc.allocation_sizes,
+ })
+ }
+
+ pub fn allocate(&mut self, desc: &AllocationCreateDesc<'_>) -> Result<Allocation> {
+ let size = desc.size;
+ let alignment = desc.alignment;
+
+ let backtrace = Arc::new(if self.debug_settings.store_stack_traces {
+ Backtrace::force_capture()
+ } else {
+ Backtrace::disabled()
+ });
+
+ if self.debug_settings.log_allocations {
+ debug!(
+ "Allocating `{}` of {} bytes with an alignment of {}.",
+ &desc.name, size, alignment
+ );
+ if self.debug_settings.log_stack_traces {
+ let backtrace = Backtrace::force_capture();
+ debug!("Allocation stack trace: {}", backtrace);
+ }
+ }
+
+ if size == 0 || !alignment.is_power_of_two() {
+ return Err(AllocationError::InvalidAllocationCreateDesc);
+ }
+
+ // Find memory type
+ let memory_type = self
+ .memory_types
+ .iter_mut()
+ .find(|memory_type| {
+ let is_location_compatible = desc.location == MemoryLocation::Unknown
+ || desc.location == memory_type.memory_location;
+
+ let is_category_compatible = memory_type.heap_category == HeapCategory::All
+ || memory_type.heap_category == desc.resource_category.into();
+
+ is_location_compatible && is_category_compatible
+ })
+ .ok_or(AllocationError::NoCompatibleMemoryTypeFound)?;
+
+ memory_type.allocate(&self.device, desc, backtrace, &self.allocation_sizes)
+ }
+
+ pub fn free(&mut self, allocation: Allocation) -> Result<()> {
+ if self.debug_settings.log_frees {
+ let name = allocation.name.as_deref().unwrap_or("<null>");
+ debug!("Freeing `{}`.", name);
+ if self.debug_settings.log_stack_traces {
+ let backtrace = Backtrace::force_capture();
+ debug!("Free stack trace: {}", backtrace);
+ }
+ }
+
+ if allocation.is_null() {
+ return Ok(());
+ }
+
+ self.memory_types[allocation.memory_type_index].free(allocation)?;
+
+ Ok(())
+ }
+
+ pub fn rename_allocation(&mut self, allocation: &mut Allocation, name: &str) -> Result<()> {
+ allocation.name = Some(name.into());
+
+ if allocation.is_null() {
+ return Ok(());
+ }
+
+ let mem_type = &mut self.memory_types[allocation.memory_type_index];
+ let mem_block = mem_type.memory_blocks[allocation.memory_block_index]
+ .as_mut()
+ .ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
+
+ mem_block
+ .sub_allocator
+ .rename_allocation(allocation.chunk_id, name)?;
+
+ Ok(())
+ }
+
+ pub fn report_memory_leaks(&self, log_level: Level) {
+ for (mem_type_i, mem_type) in self.memory_types.iter().enumerate() {
+ for (block_i, mem_block) in mem_type.memory_blocks.iter().enumerate() {
+ if let Some(mem_block) = mem_block {
+ mem_block
+ .sub_allocator
+ .report_memory_leaks(log_level, mem_type_i, block_i);
+ }
+ }
+ }
+ }
+
+ /// Create a resource according to the provided parameters.
+ /// Created resources should be freed at the end of their lifetime by calling [`Self::free_resource()`].
+ pub fn create_resource(&mut self, desc: &ResourceCreateDesc<'_>) -> Result<Resource> {
+ match desc.resource_type {
+ ResourceType::Committed {
+ heap_properties,
+ heap_flags,
+ } => {
+ let mut result: Option<ID3D12Resource> = None;
+
+ let clear_value: Option<*const D3D12_CLEAR_VALUE> =
+ desc.clear_value.map(|v| -> *const _ { v });
+
+ if let Err(e) = unsafe {
+ match (&self.device, desc.initial_state_or_layout) {
+ (device, ResourceStateOrBarrierLayout::ResourceState(initial_state)) => {
+ device.CreateCommittedResource(
+ *heap_properties,
+ *heap_flags,
+ desc.resource_desc,
+ initial_state,
+ clear_value,
+ &mut result,
+ )
+ }
+ (
+ ID3D12DeviceVersion::Device10(device),
+ ResourceStateOrBarrierLayout::BarrierLayout(initial_layout),
+ ) => {
+ let resource_desc1 = D3D12_RESOURCE_DESC1 {
+ Dimension: desc.resource_desc.Dimension,
+ Alignment: desc.resource_desc.Alignment,
+ Width: desc.resource_desc.Width,
+ Height: desc.resource_desc.Height,
+ DepthOrArraySize: desc.resource_desc.DepthOrArraySize,
+ MipLevels: desc.resource_desc.MipLevels,
+ Format: desc.resource_desc.Format,
+ SampleDesc: desc.resource_desc.SampleDesc,
+ Layout: desc.resource_desc.Layout,
+ Flags: desc.resource_desc.Flags,
+ // TODO: This is the only new field
+ SamplerFeedbackMipRegion: D3D12_MIP_REGION::default(),
+ };
+
+ device.CreateCommittedResource3(
+ *heap_properties,
+ *heap_flags,
+ &resource_desc1,
+ initial_layout,
+ clear_value,
+ None, // TODO
+ None, // TODO: https://github.com/microsoft/DirectX-Specs/blob/master/d3d/VulkanOn12.md#format-list-casting
+ &mut result,
+ )
+ }
+ _ => return Err(AllocationError::BarrierLayoutNeedsDevice10),
+ }
+ } {
+ return Err(AllocationError::Internal(format!(
+ "ID3D12Device::CreateCommittedResource failed: {}",
+ e
+ )));
+ }
+
+ let resource = result.expect("Allocation succeeded but no resource was returned?");
+
+ let allocation_info = unsafe {
+ self.device
+ .GetResourceAllocationInfo(0, &[*desc.resource_desc])
+ };
+
+ let memory_type = self
+ .memory_types
+ .iter_mut()
+ .find(|memory_type| {
+ let is_location_compatible = desc.memory_location
+ == MemoryLocation::Unknown
+ || desc.memory_location == memory_type.memory_location;
+
+ let is_category_compatible = memory_type.heap_category == HeapCategory::All
+ || memory_type.heap_category == desc.resource_category.into();
+
+ is_location_compatible && is_category_compatible
+ })
+ .ok_or(AllocationError::NoCompatibleMemoryTypeFound)?;
+
+ memory_type.committed_allocations.num_allocations += 1;
+ memory_type.committed_allocations.total_size += allocation_info.SizeInBytes;
+
+ Ok(Resource {
+ name: desc.name.into(),
+ allocation: None,
+ resource: Some(resource),
+ size: allocation_info.SizeInBytes,
+ memory_location: desc.memory_location,
+ memory_type_index: Some(memory_type.memory_type_index),
+ })
+ }
+ ResourceType::Placed => {
+ let allocation_desc = {
+ let allocation_info = unsafe {
+ self.device
+ .GetResourceAllocationInfo(0, &[*desc.resource_desc])
+ };
+
+ AllocationCreateDesc {
+ name: desc.name,
+ location: desc.memory_location,
+ size: allocation_info.SizeInBytes,
+ alignment: allocation_info.Alignment,
+ resource_category: desc.resource_category,
+ }
+ };
+
+ let allocation = self.allocate(&allocation_desc)?;
+
+ let mut result: Option<ID3D12Resource> = None;
+ if let Err(e) = unsafe {
+ match (&self.device, desc.initial_state_or_layout) {
+ (device, ResourceStateOrBarrierLayout::ResourceState(initial_state)) => {
+ device.CreatePlacedResource(
+ allocation.heap(),
+ allocation.offset(),
+ desc.resource_desc,
+ initial_state,
+ None,
+ &mut result,
+ )
+ }
+ (
+ ID3D12DeviceVersion::Device10(device),
+ ResourceStateOrBarrierLayout::BarrierLayout(initial_layout),
+ ) => {
+ let resource_desc1 = D3D12_RESOURCE_DESC1 {
+ Dimension: desc.resource_desc.Dimension,
+ Alignment: desc.resource_desc.Alignment,
+ Width: desc.resource_desc.Width,
+ Height: desc.resource_desc.Height,
+ DepthOrArraySize: desc.resource_desc.DepthOrArraySize,
+ MipLevels: desc.resource_desc.MipLevels,
+ Format: desc.resource_desc.Format,
+ SampleDesc: desc.resource_desc.SampleDesc,
+ Layout: desc.resource_desc.Layout,
+ Flags: desc.resource_desc.Flags,
+ // TODO: This is the only new field
+ SamplerFeedbackMipRegion: D3D12_MIP_REGION::default(),
+ };
+ device.CreatePlacedResource2(
+ allocation.heap(),
+ allocation.offset(),
+ &resource_desc1,
+ initial_layout,
+ None,
+ None, // TODO: https://github.com/microsoft/DirectX-Specs/blob/master/d3d/VulkanOn12.md#format-list-casting
+ &mut result,
+ )
+ }
+ _ => return Err(AllocationError::BarrierLayoutNeedsDevice10),
+ }
+ } {
+ return Err(AllocationError::Internal(format!(
+ "ID3D12Device::CreatePlacedResource failed: {}",
+ e
+ )));
+ }
+
+ let resource = result.expect("Allocation succeeded but no resource was returned?");
+ let size = allocation.size();
+ Ok(Resource {
+ name: desc.name.into(),
+ allocation: Some(allocation),
+ resource: Some(resource),
+ size,
+ memory_location: desc.memory_location,
+ memory_type_index: None,
+ })
+ }
+ }
+ }
+
+ /// Free a resource and its memory.
+ pub fn free_resource(&mut self, mut resource: Resource) -> Result<()> {
+ // Explicitly drop the resource (which is backed by a refcounted COM object)
+ // before freeing allocated memory. Windows-rs performs a Release() on drop().
+ let _ = resource
+ .resource
+ .take()
+ .expect("Resource was already freed.");
+
+ if let Some(allocation) = resource.allocation.take() {
+ self.free(allocation)
+ } else {
+ // Dx12 CommittedResources do not have an application managed allocation.
+ // We only have to update the tracked allocation count and memory usage.
+ if let Some(memory_type_index) = resource.memory_type_index {
+ let memory_type = &mut self.memory_types[memory_type_index];
+
+ memory_type.committed_allocations.num_allocations -= 1;
+ memory_type.committed_allocations.total_size -= resource.size;
+ }
+ Ok(())
+ }
+ }
+}
+
+impl fmt::Debug for Allocator {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut allocation_report = vec![];
+ let mut total_reserved_size_in_bytes = 0;
+
+ for memory_type in &self.memory_types {
+ for block in memory_type.memory_blocks.iter().flatten() {
+ total_reserved_size_in_bytes += block.size;
+ allocation_report.extend(block.sub_allocator.report_allocations())
+ }
+ }
+
+ let total_used_size_in_bytes = allocation_report.iter().map(|report| report.size).sum();
+
+ allocation_report.sort_by_key(|alloc| std::cmp::Reverse(alloc.size));
+
+ writeln!(
+ f,
+ "================================================================",
+ )?;
+ writeln!(
+ f,
+ "ALLOCATION BREAKDOWN ({} / {})",
+ fmt_bytes(total_used_size_in_bytes),
+ fmt_bytes(total_reserved_size_in_bytes),
+ )?;
+
+ let max_num_allocations_to_print = f.precision().map_or(usize::MAX, |n| n);
+ for (idx, alloc) in allocation_report.iter().enumerate() {
+ if idx >= max_num_allocations_to_print {
+ break;
+ }
+ writeln!(
+ f,
+ "{:max_len$.max_len$}\t- {}",
+ alloc.name,
+ fmt_bytes(alloc.size),
+ max_len = allocator::VISUALIZER_TABLE_MAX_ENTRY_NAME_LEN,
+ )?;
+ }
+
+ Ok(())
+ }
+}
+
+impl Drop for Allocator {
+ fn drop(&mut self) {
+ if self.debug_settings.log_leaks_on_shutdown {
+ self.report_memory_leaks(Level::Warn);
+ }
+
+ // Because Rust drop rules drop members in source-code order (that would be the
+ // ID3D12Device before the ID3D12Heaps nested in these memory blocks), free
+ // all remaining memory blocks manually first by dropping.
+ for mem_type in self.memory_types.iter_mut() {
+ mem_type.memory_blocks.clear();
+ }
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/d3d12/visualizer.rs b/third_party/rust/gpu-allocator/src/d3d12/visualizer.rs
new file mode 100644
index 0000000000..1b3f71facb
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/d3d12/visualizer.rs
@@ -0,0 +1,252 @@
+#![allow(clippy::new_without_default)]
+
+use super::Allocator;
+use crate::visualizer::{
+ render_allocation_reports_ui, AllocationReportVisualizeSettings, ColorScheme,
+ MemoryChunksVisualizationSettings,
+};
+
+use windows::Win32::Graphics::Direct3D12::*;
+
+struct AllocatorVisualizerBlockWindow {
+ memory_type_index: usize,
+ block_index: usize,
+ settings: MemoryChunksVisualizationSettings,
+}
+
+impl AllocatorVisualizerBlockWindow {
+ fn new(memory_type_index: usize, block_index: usize) -> Self {
+ Self {
+ memory_type_index,
+ block_index,
+ settings: Default::default(),
+ }
+ }
+}
+
+pub struct AllocatorVisualizer {
+ selected_blocks: Vec<AllocatorVisualizerBlockWindow>,
+ color_scheme: ColorScheme,
+ breakdown_settings: AllocationReportVisualizeSettings,
+}
+
+fn format_heap_type(heap_type: D3D12_HEAP_TYPE) -> &'static str {
+ let names = [
+ "D3D12_HEAP_TYPE_DEFAULT_INVALID",
+ "D3D12_HEAP_TYPE_DEFAULT",
+ "D3D12_HEAP_TYPE_UPLOAD",
+ "D3D12_HEAP_TYPE_READBACK",
+ "D3D12_HEAP_TYPE_CUSTOM",
+ ];
+
+ names[heap_type.0 as usize]
+}
+
+fn format_cpu_page_property(prop: D3D12_CPU_PAGE_PROPERTY) -> &'static str {
+ let names = [
+ "D3D12_CPU_PAGE_PROPERTY_UNKNOWN",
+ "D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE",
+ "D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE",
+ "D3D12_CPU_PAGE_PROPERTY_WRITE_BACK",
+ ];
+
+ names[prop.0 as usize]
+}
+
+fn format_memory_pool(pool: D3D12_MEMORY_POOL) -> &'static str {
+ let names = [
+ "D3D12_MEMORY_POOL_UNKNOWN",
+ "D3D12_MEMORY_POOL_L0",
+ "D3D12_MEMORY_POOL_L1",
+ ];
+
+ names[pool.0 as usize]
+}
+
+impl AllocatorVisualizer {
+ pub fn new() -> Self {
+ Self {
+ selected_blocks: Vec::default(),
+ color_scheme: ColorScheme::default(),
+ breakdown_settings: Default::default(),
+ }
+ }
+
+ pub fn set_color_scheme(&mut self, color_scheme: ColorScheme) {
+ self.color_scheme = color_scheme;
+ }
+
+ pub fn render_memory_block_ui(&mut self, ui: &mut egui::Ui, alloc: &Allocator) {
+ ui.collapsing(
+ format!("Memory Types: ({} types)", alloc.memory_types.len()),
+ |ui| {
+ for (mem_type_idx, mem_type) in alloc.memory_types.iter().enumerate() {
+ ui.collapsing(
+ format!(
+ "Type: {} ({} blocks)",
+ mem_type_idx,
+ mem_type.memory_blocks.len()
+ ),
+ |ui| {
+ let mut total_block_size = 0;
+ let mut total_allocated = 0;
+
+ for block in mem_type.memory_blocks.iter().flatten() {
+ total_block_size += block.sub_allocator.size();
+ total_allocated += block.sub_allocator.allocated();
+ }
+
+ let active_block_count = mem_type
+ .memory_blocks
+ .iter()
+ .filter(|block| block.is_some())
+ .count();
+
+ ui.label(format!("heap category: {:?}", mem_type.heap_category));
+ ui.label(format!(
+ "Heap Type: {} ({})",
+ format_heap_type(mem_type.heap_properties.Type),
+ mem_type.heap_properties.Type.0
+ ));
+ ui.label(format!(
+ "CpuPageProperty: {} ({})",
+ format_cpu_page_property(mem_type.heap_properties.CPUPageProperty),
+ mem_type.heap_properties.CPUPageProperty.0
+ ));
+ ui.label(format!(
+ "MemoryPoolPreference: {} ({})",
+ format_memory_pool(mem_type.heap_properties.MemoryPoolPreference),
+ mem_type.heap_properties.MemoryPoolPreference.0
+ ));
+ ui.label(format!("total block size: {} KiB", total_block_size / 1024));
+ ui.label(format!("total allocated: {} KiB", total_allocated / 1024));
+ ui.label(format!(
+ "committed resource allocations: {}",
+ mem_type.committed_allocations.num_allocations
+ ));
+ ui.label(format!(
+ "total committed resource allocations: {} KiB",
+ mem_type.committed_allocations.total_size
+ ));
+ ui.label(format!("block count: {}", active_block_count));
+
+ for (block_idx, block) in mem_type.memory_blocks.iter().enumerate() {
+ let Some(block) = block else { continue };
+
+ ui.collapsing(format!("Block: {}", block_idx), |ui| {
+ ui.label(format!(
+ "size: {} KiB",
+ block.sub_allocator.size() / 1024
+ ));
+ ui.label(format!(
+ "allocated: {} KiB",
+ block.sub_allocator.allocated() / 1024
+ ));
+ ui.label(format!("D3D12 heap: {:?}", block.heap));
+ block.sub_allocator.draw_base_info(ui);
+
+ if block.sub_allocator.supports_visualization()
+ && ui.button("visualize").clicked()
+ && !self.selected_blocks.iter().enumerate().any(|(_, x)| {
+ x.memory_type_index == mem_type_idx
+ && x.block_index == block_idx
+ })
+ {
+ self.selected_blocks.push(
+ AllocatorVisualizerBlockWindow::new(
+ mem_type_idx,
+ block_idx,
+ ),
+ );
+ }
+ });
+ }
+ },
+ );
+ }
+ },
+ );
+ }
+
+ pub fn render_memory_block_window(
+ &mut self,
+ ctx: &egui::Context,
+ allocator: &Allocator,
+ open: &mut bool,
+ ) {
+ egui::Window::new("Allocator Memory Blocks")
+ .open(open)
+ .show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
+ }
+
+ pub fn render_memory_block_visualization_windows(
+ &mut self,
+ ctx: &egui::Context,
+ allocator: &Allocator,
+ ) {
+ // Draw each window.
+ let color_scheme = &self.color_scheme;
+
+ self.selected_blocks.retain_mut(|window| {
+ let mut open = true;
+
+ egui::Window::new(format!(
+ "Block Visualizer {}:{}",
+ window.memory_type_index, window.block_index
+ ))
+ .default_size([1920.0 * 0.5, 1080.0 * 0.5])
+ .open(&mut open)
+ .show(ctx, |ui| {
+ let memblock = &allocator.memory_types[window.memory_type_index].memory_blocks
+ [window.block_index]
+ .as_ref();
+ if let Some(memblock) = memblock {
+ ui.label(format!(
+ "Memory type {}, Memory block {}, Block size: {} KiB",
+ window.memory_type_index,
+ window.block_index,
+ memblock.sub_allocator.size() / 1024
+ ));
+
+ window
+ .settings
+ .ui(ui, allocator.debug_settings.store_stack_traces);
+
+ ui.separator();
+
+ memblock
+ .sub_allocator
+ .draw_visualization(color_scheme, ui, &window.settings);
+ } else {
+ ui.label("Deallocated memory block");
+ }
+ });
+
+ open
+ });
+ }
+
+ pub fn render_breakdown_ui(&mut self, ui: &mut egui::Ui, allocator: &Allocator) {
+ render_allocation_reports_ui(
+ ui,
+ &mut self.breakdown_settings,
+ allocator
+ .memory_types
+ .iter()
+ .flat_map(|memory_type| memory_type.memory_blocks.iter())
+ .flatten()
+ .flat_map(|memory_block| memory_block.sub_allocator.report_allocations()),
+ );
+ }
+
+ pub fn render_breakdown_window(
+ &mut self,
+ ctx: &egui::Context,
+ allocator: &Allocator,
+ open: &mut bool,
+ ) {
+ egui::Window::new("Allocator Breakdown")
+ .open(open)
+ .show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/lib.rs b/third_party/rust/gpu-allocator/src/lib.rs
new file mode 100644
index 0000000000..636e239f59
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/lib.rs
@@ -0,0 +1,277 @@
+//! This crate provides a fully written in Rust memory allocator for Vulkan and DirectX 12.
+//!
+//! # [Windows-rs] and [winapi]
+//!
+//! `gpu-allocator` recently migrated from [winapi] to [windows-rs] but still provides convenient helpers to convert to and from [winapi] types, enabled when compiling with the `public-winapi` crate feature.
+//!
+//! [Windows-rs]: https://github.com/microsoft/windows-rs
+//! [winapi]: https://github.com/retep998/winapi-rs
+//!
+//! # Setting up the Vulkan memory allocator
+//!
+//! ```no_run
+//! # #[cfg(feature = "vulkan")]
+//! # fn main() {
+//! use gpu_allocator::vulkan::*;
+//! # use ash::vk;
+//! # let device = todo!();
+//! # let instance = todo!();
+//! # let physical_device = todo!();
+//!
+//! let mut allocator = Allocator::new(&AllocatorCreateDesc {
+//! instance,
+//! device,
+//! physical_device,
+//! debug_settings: Default::default(),
+//! buffer_device_address: true, // Ideally, check the BufferDeviceAddressFeatures struct.
+//! allocation_sizes: Default::default(),
+//! });
+//! # }
+//! # #[cfg(not(feature = "vulkan"))]
+//! # fn main() {}
+//! ```
+//!
+//! # Simple Vulkan allocation example
+//!
+//! ```no_run
+//! # #[cfg(feature = "vulkan")]
+//! # fn main() {
+//! use gpu_allocator::vulkan::*;
+//! use gpu_allocator::MemoryLocation;
+//! # use ash::vk;
+//! # let device = todo!();
+//! # let instance = todo!();
+//! # let physical_device = todo!();
+//! # let mut allocator = Allocator::new(&AllocatorCreateDesc {
+//! # instance,
+//! # device,
+//! # physical_device,
+//! # debug_settings: Default::default(),
+//! # buffer_device_address: true, // Ideally, check the BufferDeviceAddressFeatures struct.
+//! # allocation_sizes: Default::default(),
+//! # }).unwrap();
+//!
+//! // Setup vulkan info
+//! let vk_info = vk::BufferCreateInfo::builder()
+//! .size(512)
+//! .usage(vk::BufferUsageFlags::STORAGE_BUFFER);
+//!
+//! let buffer = unsafe { device.create_buffer(&vk_info, None) }.unwrap();
+//! let requirements = unsafe { device.get_buffer_memory_requirements(buffer) };
+//!
+//! let allocation = allocator
+//! .allocate(&AllocationCreateDesc {
+//! name: "Example allocation",
+//! requirements,
+//! location: MemoryLocation::CpuToGpu,
+//! linear: true, // Buffers are always linear
+//! allocation_scheme: AllocationScheme::GpuAllocatorManaged,
+//! }).unwrap();
+//!
+//! // Bind memory to the buffer
+//! unsafe { device.bind_buffer_memory(buffer, allocation.memory(), allocation.offset()).unwrap() };
+//!
+//! // Cleanup
+//! allocator.free(allocation).unwrap();
+//! unsafe { device.destroy_buffer(buffer, None) };
+//! # }
+//! # #[cfg(not(feature = "vulkan"))]
+//! # fn main() {}
+//! ```
+//!
+//! # Setting up the D3D12 memory allocator
+//!
+//! ```no_run
+//! # #[cfg(feature = "d3d12")]
+//! # fn main() {
+//! use gpu_allocator::d3d12::*;
+//! # let device = todo!();
+//!
+//! let mut allocator = Allocator::new(&AllocatorCreateDesc {
+//! device: ID3D12DeviceVersion::Device(device),
+//! debug_settings: Default::default(),
+//! allocation_sizes: Default::default(),
+//! });
+//! # }
+//! # #[cfg(not(feature = "d3d12"))]
+//! # fn main() {}
+//! ```
+//!
+//! # Simple d3d12 allocation example
+//!
+//! ```no_run
+//! # #[cfg(feature = "d3d12")]
+//! # fn main() -> windows::core::Result<()> {
+//! use gpu_allocator::d3d12::*;
+//! use gpu_allocator::MemoryLocation;
+//! # use windows::Win32::Graphics::{Dxgi, Direct3D12};
+//! # let device = todo!();
+//!
+//! # let mut allocator = Allocator::new(&AllocatorCreateDesc {
+//! # device: ID3D12DeviceVersion::Device(device),
+//! # debug_settings: Default::default(),
+//! # allocation_sizes: Default::default(),
+//! # }).unwrap();
+//!
+//! let buffer_desc = Direct3D12::D3D12_RESOURCE_DESC {
+//! Dimension: Direct3D12::D3D12_RESOURCE_DIMENSION_BUFFER,
+//! Alignment: 0,
+//! Width: 512,
+//! Height: 1,
+//! DepthOrArraySize: 1,
+//! MipLevels: 1,
+//! Format: Dxgi::Common::DXGI_FORMAT_UNKNOWN,
+//! SampleDesc: Dxgi::Common::DXGI_SAMPLE_DESC {
+//! Count: 1,
+//! Quality: 0,
+//! },
+//! Layout: Direct3D12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
+//! Flags: Direct3D12::D3D12_RESOURCE_FLAG_NONE,
+//! };
+//! let allocation_desc = AllocationCreateDesc::from_d3d12_resource_desc(
+//! &allocator.device(),
+//! &buffer_desc,
+//! "Example allocation",
+//! MemoryLocation::GpuOnly,
+//! );
+//! let allocation = allocator.allocate(&allocation_desc).unwrap();
+//! let mut resource: Option<Direct3D12::ID3D12Resource> = None;
+//! let hr = unsafe {
+//! device.CreatePlacedResource(
+//! allocation.heap(),
+//! allocation.offset(),
+//! &buffer_desc,
+//! Direct3D12::D3D12_RESOURCE_STATE_COMMON,
+//! None,
+//! &mut resource,
+//! )
+//! }?;
+//!
+//! // Cleanup
+//! drop(resource);
+//! allocator.free(allocation).unwrap();
+//! # Ok(())
+//! # }
+//! # #[cfg(not(feature = "d3d12"))]
+//! # fn main() {}
+//! ```
+
+mod result;
+pub use result::*;
+
+pub(crate) mod allocator;
+
+#[cfg(feature = "visualizer")]
+pub mod visualizer;
+
+#[cfg(feature = "vulkan")]
+pub mod vulkan;
+
+#[cfg(all(windows, feature = "d3d12"))]
+pub mod d3d12;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub enum MemoryLocation {
+ /// The allocated resource is stored at an unknown memory location; let the driver decide what's the best location
+ Unknown,
+ /// Store the allocation in GPU only accessible memory - typically this is the faster GPU resource and this should be
+ /// where most of the allocations live.
+ GpuOnly,
+ /// Memory useful for uploading data to the GPU and potentially for constant buffers
+ CpuToGpu,
+ /// Memory useful for CPU readback of data
+ GpuToCpu,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct AllocatorDebugSettings {
+ /// Logs out debugging information about the various heaps the current device has on startup
+ pub log_memory_information: bool,
+ /// Logs out all memory leaks on shutdown with log level Warn
+ pub log_leaks_on_shutdown: bool,
+ /// Stores a copy of the full backtrace for every allocation made, this makes it easier to debug leaks
+ /// or other memory allocations, but storing stack traces has a RAM overhead so should be disabled
+ /// in shipping applications.
+ pub store_stack_traces: bool,
+ /// Log out every allocation as it's being made with log level Debug, rather spammy so off by default
+ pub log_allocations: bool,
+ /// Log out every free that is being called with log level Debug, rather spammy so off by default
+ pub log_frees: bool,
+ /// Log out stack traces when either `log_allocations` or `log_frees` is enabled.
+ pub log_stack_traces: bool,
+}
+
+impl Default for AllocatorDebugSettings {
+ fn default() -> Self {
+ Self {
+ log_memory_information: false,
+ log_leaks_on_shutdown: true,
+ store_stack_traces: false,
+ log_allocations: false,
+ log_frees: false,
+ log_stack_traces: false,
+ }
+ }
+}
+
+/// The sizes of the memory blocks that the allocator will create.
+///
+/// Useful for tuning the allocator to your application's needs. For example most games will be fine with the default
+/// values, but eg. an app might want to use smaller block sizes to reduce the amount of memory used.
+///
+/// Clamped between 4MB and 256MB, and rounds up to the nearest multiple of 4MB for alignment reasons.
+#[derive(Clone, Copy, Debug)]
+pub struct AllocationSizes {
+ /// The size of the memory blocks that will be created for the GPU only memory type.
+ ///
+ /// Defaults to 256MB.
+ device_memblock_size: u64,
+ /// The size of the memory blocks that will be created for the CPU visible memory types.
+ ///
+ /// Defaults to 64MB.
+ host_memblock_size: u64,
+}
+
+impl AllocationSizes {
+ pub fn new(device_memblock_size: u64, host_memblock_size: u64) -> Self {
+ const FOUR_MB: u64 = 4 * 1024 * 1024;
+ const TWO_HUNDRED_AND_FIFTY_SIX_MB: u64 = 256 * 1024 * 1024;
+
+ let mut device_memblock_size =
+ device_memblock_size.clamp(FOUR_MB, TWO_HUNDRED_AND_FIFTY_SIX_MB);
+ let mut host_memblock_size =
+ host_memblock_size.clamp(FOUR_MB, TWO_HUNDRED_AND_FIFTY_SIX_MB);
+
+ if device_memblock_size % FOUR_MB != 0 {
+ let val = device_memblock_size / FOUR_MB + 1;
+ device_memblock_size = val * FOUR_MB;
+ log::warn!(
+ "Device memory block size must be a multiple of 4MB, clamping to {}MB",
+ device_memblock_size / 1024 / 1024
+ )
+ }
+
+ if host_memblock_size % FOUR_MB != 0 {
+ let val = host_memblock_size / FOUR_MB + 1;
+ host_memblock_size = val * FOUR_MB;
+ log::warn!(
+ "Host memory block size must be a multiple of 4MB, clamping to {}MB",
+ host_memblock_size / 1024 / 1024
+ )
+ }
+
+ Self {
+ device_memblock_size,
+ host_memblock_size,
+ }
+ }
+}
+
+impl Default for AllocationSizes {
+ fn default() -> Self {
+ Self {
+ device_memblock_size: 256 * 1024 * 1024,
+ host_memblock_size: 64 * 1024 * 1024,
+ }
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/result.rs b/third_party/rust/gpu-allocator/src/result.rs
new file mode 100644
index 0000000000..7d5336d582
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/result.rs
@@ -0,0 +1,21 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum AllocationError {
+ #[error("Out of memory")]
+ OutOfMemory,
+ #[error("Failed to map memory: {0}")]
+ FailedToMap(String),
+ #[error("No compatible memory type available")]
+ NoCompatibleMemoryTypeFound,
+ #[error("Invalid AllocationCreateDesc")]
+ InvalidAllocationCreateDesc,
+ #[error("Invalid AllocatorCreateDesc {0}")]
+ InvalidAllocatorCreateDesc(String),
+ #[error("Internal error: {0}")]
+ Internal(String),
+ #[error("Initial `BARRIER_LAYOUT` needs `Device10`")]
+ BarrierLayoutNeedsDevice10,
+}
+
+pub type Result<V, E = AllocationError> = ::std::result::Result<V, E>;
diff --git a/third_party/rust/gpu-allocator/src/visualizer/allocation_reports.rs b/third_party/rust/gpu-allocator/src/visualizer/allocation_reports.rs
new file mode 100644
index 0000000000..a57f212b8e
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/visualizer/allocation_reports.rs
@@ -0,0 +1,138 @@
+use std::backtrace::BacktraceStatus;
+
+use egui::{Label, Response, Sense, Ui, WidgetText};
+use egui_extras::{Column, TableBuilder};
+
+use crate::allocator::{fmt_bytes, AllocationReport};
+
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
+pub(crate) enum AllocationReportVisualizeSorting {
+ #[default]
+ None,
+ Idx,
+ Name,
+ Size,
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct AllocationReportVisualizeSettings {
+ pub filter: String,
+ pub sorting: AllocationReportVisualizeSorting,
+ pub ascending: bool,
+}
+
+pub(crate) fn render_allocation_reports_ui(
+ ui: &mut Ui,
+ settings: &mut AllocationReportVisualizeSettings,
+ allocations: impl IntoIterator<Item = AllocationReport>,
+) {
+ ui.horizontal(|ui| {
+ ui.label("Filter");
+ ui.text_edit_singleline(&mut settings.filter);
+ });
+ let breakdown_filter = settings.filter.to_lowercase();
+
+ let mut allocations = allocations
+ .into_iter()
+ .enumerate()
+ .filter(|(_, report)| report.name.to_lowercase().contains(&breakdown_filter))
+ .collect::<Vec<_>>();
+
+ let row_height = ui.text_style_height(&egui::TextStyle::Body);
+
+ let table = TableBuilder::new(ui)
+ .striped(true)
+ .resizable(true)
+ .column(Column::exact(30.0))
+ .column(Column::initial(300.0).at_least(200.0).clip(true))
+ .column(Column::exact(70.0));
+
+ fn header_button(ui: &mut Ui, label: &str) -> Response {
+ let label = WidgetText::from(label).strong();
+ let label = Label::new(label.strong()).sense(Sense::click());
+ ui.add(label)
+ }
+
+ let table = table.header(row_height, |mut row| {
+ row.col(|ui| {
+ if header_button(ui, "Idx").clicked() {
+ if settings.sorting == AllocationReportVisualizeSorting::Idx {
+ settings.ascending = !settings.ascending;
+ } else {
+ settings.sorting = AllocationReportVisualizeSorting::Idx;
+ settings.ascending = false;
+ }
+ }
+ });
+ row.col(|ui| {
+ if header_button(ui, "Name").clicked() {
+ if settings.sorting == AllocationReportVisualizeSorting::Name {
+ settings.ascending = !settings.ascending;
+ } else {
+ settings.sorting = AllocationReportVisualizeSorting::Name;
+ settings.ascending = false;
+ }
+ }
+ });
+ row.col(|ui| {
+ if header_button(ui, "Size").clicked() {
+ if settings.sorting == AllocationReportVisualizeSorting::Size {
+ settings.ascending = !settings.ascending;
+ } else {
+ settings.sorting = AllocationReportVisualizeSorting::Size;
+ settings.ascending = false;
+ }
+ }
+ });
+ });
+
+ match (settings.sorting, settings.ascending) {
+ (AllocationReportVisualizeSorting::None, _) => {}
+ (AllocationReportVisualizeSorting::Idx, true) => allocations.sort_by_key(|(idx, _)| *idx),
+ (AllocationReportVisualizeSorting::Idx, false) => {
+ allocations.sort_by_key(|(idx, _)| std::cmp::Reverse(*idx))
+ }
+ (AllocationReportVisualizeSorting::Name, true) => {
+ allocations.sort_by(|(_, alloc1), (_, alloc2)| alloc1.name.cmp(&alloc2.name))
+ }
+ (AllocationReportVisualizeSorting::Name, false) => {
+ allocations.sort_by(|(_, alloc1), (_, alloc2)| alloc1.name.cmp(&alloc2.name).reverse())
+ }
+ (AllocationReportVisualizeSorting::Size, true) => {
+ allocations.sort_by_key(|(_, alloc)| alloc.size)
+ }
+ (AllocationReportVisualizeSorting::Size, false) => {
+ allocations.sort_by_key(|(_, alloc)| std::cmp::Reverse(alloc.size))
+ }
+ }
+
+ table.body(|mut body| {
+ for (idx, alloc) in allocations {
+ body.row(row_height, |mut row| {
+ let AllocationReport {
+ name,
+ size,
+ backtrace,
+ } = alloc;
+
+ row.col(|ui| {
+ ui.label(idx.to_string());
+ });
+
+ let resp = row.col(|ui| {
+ ui.label(name);
+ });
+
+ if backtrace.status() == BacktraceStatus::Captured {
+ resp.1.on_hover_ui(|ui| {
+ ui.label(backtrace.to_string());
+ });
+ }
+
+ row.col(|ui| {
+ ui.label(fmt_bytes(size));
+ });
+ });
+ }
+ });
+}
diff --git a/third_party/rust/gpu-allocator/src/visualizer/memory_chunks.rs b/third_party/rust/gpu-allocator/src/visualizer/memory_chunks.rs
new file mode 100644
index 0000000000..3ef3ff22a0
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/visualizer/memory_chunks.rs
@@ -0,0 +1,134 @@
+use std::backtrace::BacktraceStatus;
+
+use egui::{Color32, DragValue, Rect, ScrollArea, Sense, Ui, Vec2};
+
+use crate::allocator::free_list_allocator::MemoryChunk;
+
+use super::ColorScheme;
+
+pub(crate) struct MemoryChunksVisualizationSettings {
+ pub width_in_bytes: u64,
+ pub show_backtraces: bool,
+}
+
+impl Default for MemoryChunksVisualizationSettings {
+ fn default() -> Self {
+ Self {
+ width_in_bytes: 1024,
+ show_backtraces: false,
+ }
+ }
+}
+
+impl MemoryChunksVisualizationSettings {
+ pub fn ui(&mut self, ui: &mut Ui, store_stack_traces: bool) {
+ if store_stack_traces {
+ ui.checkbox(&mut self.show_backtraces, "Show backtraces");
+ }
+
+ // Slider for changing the 'zoom' level of the visualizer.
+ const BYTES_PER_UNIT_MIN: i32 = 1;
+ const BYTES_PER_UNIT_MAX: i32 = 1024 * 1024;
+
+ ui.horizontal(|ui| {
+ ui.add(
+ DragValue::new(&mut self.width_in_bytes)
+ .clamp_range(BYTES_PER_UNIT_MIN..=BYTES_PER_UNIT_MAX)
+ .speed(10.0),
+ );
+ ui.label("Bytes per line");
+ });
+ }
+}
+
+pub(crate) fn render_memory_chunks_ui<'a>(
+ ui: &mut Ui,
+ color_scheme: &ColorScheme,
+ settings: &MemoryChunksVisualizationSettings,
+ total_size_in_bytes: u64,
+ data: impl IntoIterator<Item = &'a MemoryChunk>,
+) {
+ let line_height = ui.text_style_height(&egui::TextStyle::Body);
+ let number_of_rows =
+ (total_size_in_bytes as f32 / settings.width_in_bytes as f32).ceil() as usize;
+
+ ScrollArea::new([false, true]).show_rows(ui, line_height, number_of_rows, |ui, range| {
+ // Let range be in bytes
+ let start_in_bytes = range.start as u64 * settings.width_in_bytes;
+ let end_in_bytes = range.end as u64 * settings.width_in_bytes;
+
+ let mut data = data
+ .into_iter()
+ .filter(|chunk| {
+ (chunk.offset + chunk.size) > start_in_bytes && chunk.offset < end_in_bytes
+ })
+ .collect::<Vec<_>>();
+ data.sort_by_key(|chunk| chunk.offset);
+
+ let screen_width = ui.available_width();
+ let mut cursor_idx = 0;
+ let mut bytes_required = data[cursor_idx].offset + data[cursor_idx].size - start_in_bytes;
+
+ for _ in range {
+ ui.horizontal(|ui| {
+ let mut bytes_left = settings.width_in_bytes;
+ let mut cursor = ui.cursor().min;
+
+ while cursor_idx < data.len() && bytes_left > 0 {
+ // Block is depleted, so reset for more chunks
+ while bytes_required == 0 {
+ cursor_idx += 1;
+ if cursor_idx < data.len() {
+ bytes_required = data[cursor_idx].size;
+ }
+ continue;
+ }
+
+ let bytes_used = bytes_required.min(bytes_left);
+ let width_used =
+ bytes_used as f32 * screen_width / settings.width_in_bytes as f32;
+
+ // Draw the rectangle
+ let resp = ui.allocate_rect(
+ Rect::from_min_size(cursor, Vec2::new(width_used, line_height)),
+ Sense::click(),
+ );
+
+ if ui.is_rect_visible(resp.rect) {
+ ui.painter().rect(
+ resp.rect,
+ egui::Rounding::ZERO,
+ color_scheme
+ .get_allocation_type_color(data[cursor_idx].allocation_type),
+ egui::Stroke::new(1.0, Color32::BLACK),
+ );
+
+ resp.on_hover_ui_at_pointer(|ui| {
+ let chunk = &data[cursor_idx];
+ ui.label(format!("id: {}", chunk.chunk_id));
+ ui.label(format!("offset: 0x{:x}", chunk.offset));
+ ui.label(format!("size: 0x{:x}", chunk.size));
+ ui.label(format!(
+ "allocation_type: {}",
+ chunk.allocation_type.as_str()
+ ));
+ if let Some(name) = &chunk.name {
+ ui.label(format!("name: {}", name));
+ }
+ if settings.show_backtraces
+ && chunk.backtrace.status() == BacktraceStatus::Captured
+ {
+ ui.label(chunk.backtrace.to_string());
+ }
+ });
+ }
+
+ // Update our cursors
+ cursor.x += width_used;
+ bytes_left -= bytes_used;
+ bytes_required -= bytes_used;
+ }
+ });
+ }
+ });
+}
diff --git a/third_party/rust/gpu-allocator/src/visualizer/mod.rs b/third_party/rust/gpu-allocator/src/visualizer/mod.rs
new file mode 100644
index 0000000000..113c8454c2
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/visualizer/mod.rs
@@ -0,0 +1,56 @@
+use egui::{Color32, Ui};
+
+mod allocation_reports;
+mod memory_chunks;
+
+pub(crate) use allocation_reports::*;
+pub(crate) use memory_chunks::*;
+
+use crate::allocator::AllocationType;
+
+pub const DEFAULT_COLOR_ALLOCATION_TYPE_FREE: Color32 = Color32::from_rgb(159, 159, 159); // gray
+pub const DEFAULT_COLOR_ALLOCATION_TYPE_LINEAR: Color32 = Color32::from_rgb(91, 206, 250); // blue
+pub const DEFAULT_COLOR_ALLOCATION_TYPE_NON_LINEAR: Color32 = Color32::from_rgb(250, 169, 184); // pink
+
+#[derive(Clone)]
+pub struct ColorScheme {
+ pub free_color: Color32,
+ pub linear_color: Color32,
+ pub non_linear_color: Color32,
+}
+
+impl Default for ColorScheme {
+ fn default() -> Self {
+ Self {
+ free_color: DEFAULT_COLOR_ALLOCATION_TYPE_FREE,
+ linear_color: DEFAULT_COLOR_ALLOCATION_TYPE_LINEAR,
+ non_linear_color: DEFAULT_COLOR_ALLOCATION_TYPE_NON_LINEAR,
+ }
+ }
+}
+
+impl ColorScheme {
+ pub(crate) fn get_allocation_type_color(&self, allocation_type: AllocationType) -> Color32 {
+ match allocation_type {
+ AllocationType::Free => self.free_color,
+ AllocationType::Linear => self.linear_color,
+ AllocationType::NonLinear => self.non_linear_color,
+ }
+ }
+}
+
+pub(crate) trait SubAllocatorVisualizer {
+ fn supports_visualization(&self) -> bool {
+ false
+ }
+ fn draw_base_info(&self, ui: &mut Ui) {
+ ui.label("No sub allocator information available");
+ }
+ fn draw_visualization(
+ &self,
+ _color_scheme: &ColorScheme,
+ _ui: &mut Ui,
+ _settings: &MemoryChunksVisualizationSettings,
+ ) {
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/vulkan/mod.rs b/third_party/rust/gpu-allocator/src/vulkan/mod.rs
new file mode 100644
index 0000000000..39344b509d
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/vulkan/mod.rs
@@ -0,0 +1,993 @@
+#![deny(clippy::unimplemented, clippy::unwrap_used, clippy::ok_expect)]
+
+#[cfg(feature = "visualizer")]
+mod visualizer;
+#[cfg(feature = "visualizer")]
+pub use visualizer::AllocatorVisualizer;
+
+use std::{backtrace::Backtrace, fmt, marker::PhantomData, sync::Arc};
+
+use ash::vk;
+use log::{debug, Level};
+
+use super::allocator::{self, AllocationType};
+use crate::{
+ allocator::fmt_bytes, AllocationError, AllocationSizes, AllocatorDebugSettings, MemoryLocation,
+ Result,
+};
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum AllocationScheme {
+ /// Perform a dedicated, driver-managed allocation for the given buffer, allowing
+ /// it to perform optimizations on this type of allocation.
+ DedicatedBuffer(vk::Buffer),
+ /// Perform a dedicated, driver-managed allocation for the given image, allowing
+ /// it to perform optimizations on this type of allocation.
+ DedicatedImage(vk::Image),
+ /// The memory for this resource will be allocated and managed by gpu-allocator.
+ GpuAllocatorManaged,
+}
+
+#[derive(Clone, Debug)]
+pub struct AllocationCreateDesc<'a> {
+ /// Name of the allocation, for tracking and debugging purposes
+ pub name: &'a str,
+ /// Vulkan memory requirements for an allocation
+ pub requirements: vk::MemoryRequirements,
+ /// Location where the memory allocation should be stored
+ pub location: MemoryLocation,
+ /// If the resource is linear (buffer / linear texture) or a regular (tiled) texture.
+ pub linear: bool,
+ /// Determines how this allocation should be managed.
+ pub allocation_scheme: AllocationScheme,
+}
+
+/// Wrapper type to only mark a raw pointer [`Send`] + [`Sync`] without having to
+/// mark the entire [`Allocation`] as such, instead relying on the compiler to
+/// auto-implement this or fail if fields are added that violate this constraint
+#[derive(Clone, Copy, Debug)]
+pub(crate) struct SendSyncPtr(std::ptr::NonNull<std::ffi::c_void>);
+// Sending is fine because mapped_ptr does not change based on the thread we are in
+unsafe impl Send for SendSyncPtr {}
+// Sync is also okay because Sending &Allocation is safe: a mutable reference
+// to the data in mapped_ptr is never exposed while `self` is immutably borrowed.
+// In order to break safety guarantees, the user needs to `unsafe`ly dereference
+// `mapped_ptr` themselves.
+unsafe impl Sync for SendSyncPtr {}
+
+pub struct AllocatorCreateDesc {
+ pub instance: ash::Instance,
+ pub device: ash::Device,
+ pub physical_device: ash::vk::PhysicalDevice,
+ pub debug_settings: AllocatorDebugSettings,
+ pub buffer_device_address: bool,
+ pub allocation_sizes: AllocationSizes,
+}
+
+/// A piece of allocated memory.
+///
+/// Could be contained in its own individual underlying memory object or as a sub-region
+/// of a larger allocation.
+///
+/// # Copying data into a CPU-mapped [`Allocation`]
+///
+/// You'll very likely want to copy data into CPU-mapped [`Allocation`]s in order to send that data to the GPU.
+/// Doing this data transfer correctly without invoking undefined behavior can be quite fraught and non-obvious<sup>[\[1\]]</sup>.
+///
+/// To help you do this correctly, [`Allocation`] implements [`presser::Slab`], which means you can directly
+/// pass it in to many of `presser`'s [helper functions] (for example, [`copy_from_slice_to_offset`]).
+///
+/// In most cases, this will work perfectly. However, note that if you try to use an [`Allocation`] as a
+/// [`Slab`] and it is not valid to do so (if it is not CPU-mapped or if its `size > isize::MAX`),
+/// you will cause a panic. If you aren't sure about these conditions, you may use [`Allocation::try_as_mapped_slab`].
+///
+/// ## Example
+///
+/// Say we've created an [`Allocation`] called `my_allocation`, which is CPU-mapped.
+/// ```ignore
+/// let mut my_allocation: Allocation = my_allocator.allocate(...)?;
+/// ```
+///
+/// And we want to fill it with some data in the form of a `my_gpu_data: Vec<MyGpuVector>`, defined as such:
+///
+/// ```ignore
+/// // note that this is size(12) but align(16), thus we have 4 padding bytes.
+/// // this would mean a `&[MyGpuVector]` is invalid to cast as a `&[u8]`, but
+/// // we can still use `presser` to copy it directly in a valid manner.
+/// #[repr(C, align(16))]
+/// #[derive(Clone, Copy)]
+/// struct MyGpuVertex {
+/// x: f32,
+/// y: f32,
+/// z: f32,
+/// }
+///
+/// let my_gpu_data: Vec<MyGpuData> = make_vertex_data();
+/// ```
+///
+/// Depending on how the data we're copying will be used, the vulkan device may have a minimum
+/// alignment requirement for that data:
+///
+/// ```ignore
+/// let min_gpu_align = my_vulkan_device_specifications.min_alignment_thing;
+/// ```
+///
+/// Finally, we can use [`presser::copy_from_slice_to_offset_with_align`] to perform the copy,
+/// simply passing `&mut my_allocation` since [`Allocation`] implements [`Slab`].
+///
+/// ```ignore
+/// let copy_record = presser::copy_from_slice_to_offset_with_align(
+/// &my_gpu_data[..], // a slice containing all elements of my_gpu_data
+/// &mut my_allocation, // our Allocation
+/// 0, // start as close to the beginning of the allocation as possible
+/// min_gpu_align, // the minimum alignment we queried previously
+/// )?;
+/// ```
+///
+/// It's important to note that the data may not have actually been copied starting at the requested
+/// `start_offset` (0 in the example above) depending on the alignment of the underlying allocation
+/// as well as the alignment requirements of `MyGpuVector` and the `min_gpu_align` we passed in. Thus,
+/// we can query the `copy_record` for the actual starting offset:
+///
+/// ```ignore
+/// let actual_data_start_offset = copy_record.copy_start_offset;
+/// ```
+///
+/// ## Safety
+///
+/// It is technically not fully safe to use an [`Allocation`] as a [`presser::Slab`] because we can't validate that the
+/// GPU is not using the data in the buffer while `self` is borrowed. However, trying
+/// to validate this statically is really hard and the community has basically decided that
+/// requiring `unsafe` for functions like this creates too much "unsafe-noise", ultimately making it
+/// harder to debug more insidious unsafety that is unrelated to GPU-CPU sync issues.
+///
+/// So, as would always be the case, you must ensure the GPU
+/// is not using the data in `self` for the duration that you hold the returned [`MappedAllocationSlab`].
+///
+/// [`Slab`]: presser::Slab
+/// [`copy_from_slice_to_offset`]: presser::copy_from_slice_to_offset
+/// [helper functions]: presser#functions
+/// [\[1\]]: presser#motivation
+#[derive(Debug)]
+pub struct Allocation {
+ chunk_id: Option<std::num::NonZeroU64>,
+ offset: u64,
+ size: u64,
+ memory_block_index: usize,
+ memory_type_index: usize,
+ device_memory: vk::DeviceMemory,
+ mapped_ptr: Option<SendSyncPtr>,
+ dedicated_allocation: bool,
+ memory_properties: vk::MemoryPropertyFlags,
+ name: Option<Box<str>>,
+}
+
+impl Allocation {
+ /// Tries to borrow the CPU-mapped memory that backs this allocation as a [`presser::Slab`], which you can then
+ /// use to safely copy data into the raw, potentially-uninitialized buffer.
+ /// See [the documentation of Allocation][Allocation#example] for an example of this.
+ ///
+ /// Returns [`None`] if `self.mapped_ptr()` is `None`, or if `self.size()` is greater than `isize::MAX` because
+ /// this could lead to undefined behavior.
+ ///
+ /// Note that [`Allocation`] implements [`Slab`] natively, so you can actually pass this allocation as a [`Slab`]
+ /// directly. However, if `self` is not actually a valid [`Slab`] (this function would return `None` as described above),
+ /// then trying to use it as a [`Slab`] will panic.
+ ///
+ /// # Safety
+ ///
+ /// See the note about safety in [the documentation of Allocation][Allocation#safety]
+ ///
+ /// [`Slab`]: presser::Slab
+ // best to be explicit where the lifetime is coming from since we're doing unsafe things
+ // and relying on an inferred liftime type in the PhantomData below
+ #[allow(clippy::needless_lifetimes)]
+ pub fn try_as_mapped_slab<'a>(&'a mut self) -> Option<MappedAllocationSlab<'a>> {
+ let mapped_ptr = self.mapped_ptr()?.cast().as_ptr();
+
+ if self.size > isize::MAX as _ {
+ return None;
+ }
+
+ // this will always succeed since size is <= isize::MAX which is < usize::MAX
+ let size = self.size as usize;
+
+ Some(MappedAllocationSlab {
+ _borrowed_alloc: PhantomData,
+ mapped_ptr,
+ size,
+ })
+ }
+
+ pub fn chunk_id(&self) -> Option<std::num::NonZeroU64> {
+ self.chunk_id
+ }
+
+ ///Returns the [`vk::MemoryPropertyFlags`] of this allocation.
+ pub fn memory_properties(&self) -> vk::MemoryPropertyFlags {
+ self.memory_properties
+ }
+
+ /// Returns the [`vk::DeviceMemory`] object that is backing this allocation.
+ /// This memory object can be shared with multiple other allocations and shouldn't be freed (or allocated from)
+ /// without this library, because that will lead to undefined behavior.
+ ///
+ /// # Safety
+ /// The result of this function can safely be used to pass into [`ash::Device::bind_buffer_memory()`],
+ /// [`ash::Device::bind_image_memory()`] etc. It is exposed for this reason. Keep in mind to also
+ /// pass [`Self::offset()`] along to those.
+ pub unsafe fn memory(&self) -> vk::DeviceMemory {
+ self.device_memory
+ }
+
+ /// Returns [`true`] if this allocation is using a dedicated underlying allocation.
+ pub fn is_dedicated(&self) -> bool {
+ self.dedicated_allocation
+ }
+
+ /// Returns the offset of the allocation on the [`vk::DeviceMemory`].
+ /// When binding the memory to a buffer or image, this offset needs to be supplied as well.
+ pub fn offset(&self) -> u64 {
+ self.offset
+ }
+
+ /// Returns the size of the allocation
+ pub fn size(&self) -> u64 {
+ self.size
+ }
+
+ /// Returns a valid mapped pointer if the memory is host visible, otherwise it will return None.
+ /// The pointer already points to the exact memory region of the suballocation, so no offset needs to be applied.
+ pub fn mapped_ptr(&self) -> Option<std::ptr::NonNull<std::ffi::c_void>> {
+ self.mapped_ptr.map(|SendSyncPtr(p)| p)
+ }
+
+ /// Returns a valid mapped slice if the memory is host visible, otherwise it will return None.
+ /// The slice already references the exact memory region of the allocation, so no offset needs to be applied.
+ pub fn mapped_slice(&self) -> Option<&[u8]> {
+ self.mapped_ptr().map(|ptr| unsafe {
+ std::slice::from_raw_parts(ptr.cast().as_ptr(), self.size as usize)
+ })
+ }
+
+ /// Returns a valid mapped mutable slice if the memory is host visible, otherwise it will return None.
+ /// The slice already references the exact memory region of the allocation, so no offset needs to be applied.
+ pub fn mapped_slice_mut(&mut self) -> Option<&mut [u8]> {
+ self.mapped_ptr().map(|ptr| unsafe {
+ std::slice::from_raw_parts_mut(ptr.cast().as_ptr(), self.size as usize)
+ })
+ }
+
+ pub fn is_null(&self) -> bool {
+ self.chunk_id.is_none()
+ }
+}
+
+impl Default for Allocation {
+ fn default() -> Self {
+ Self {
+ chunk_id: None,
+ offset: 0,
+ size: 0,
+ memory_block_index: !0,
+ memory_type_index: !0,
+ device_memory: vk::DeviceMemory::null(),
+ mapped_ptr: None,
+ memory_properties: vk::MemoryPropertyFlags::empty(),
+ name: None,
+ dedicated_allocation: false,
+ }
+ }
+}
+
+/// A wrapper struct over a borrowed [`Allocation`] that infallibly implements [`presser::Slab`].
+///
+/// This type should be acquired by calling [`Allocation::try_as_mapped_slab`].
+pub struct MappedAllocationSlab<'a> {
+ _borrowed_alloc: PhantomData<&'a mut Allocation>,
+ mapped_ptr: *mut u8,
+ size: usize,
+}
+
+// SAFETY: See the safety comment of Allocation::as_mapped_slab above.
+unsafe impl<'a> presser::Slab for MappedAllocationSlab<'a> {
+ fn base_ptr(&self) -> *const u8 {
+ self.mapped_ptr
+ }
+
+ fn base_ptr_mut(&mut self) -> *mut u8 {
+ self.mapped_ptr
+ }
+
+ fn size(&self) -> usize {
+ self.size
+ }
+}
+
+// SAFETY: See the safety comment of Allocation::as_mapped_slab above.
+unsafe impl presser::Slab for Allocation {
+ fn base_ptr(&self) -> *const u8 {
+ self.mapped_ptr
+ .expect("tried to use a non-mapped Allocation as a Slab")
+ .0
+ .as_ptr()
+ .cast()
+ }
+
+ fn base_ptr_mut(&mut self) -> *mut u8 {
+ self.mapped_ptr
+ .expect("tried to use a non-mapped Allocation as a Slab")
+ .0
+ .as_ptr()
+ .cast()
+ }
+
+ fn size(&self) -> usize {
+ if self.size > isize::MAX as _ {
+ panic!("tried to use an Allocation with size > isize::MAX as a Slab")
+ }
+ // this will always work if the above passed
+ self.size as usize
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct MemoryBlock {
+ pub(crate) device_memory: vk::DeviceMemory,
+ pub(crate) size: u64,
+ pub(crate) mapped_ptr: Option<SendSyncPtr>,
+ pub(crate) sub_allocator: Box<dyn allocator::SubAllocator>,
+ #[cfg(feature = "visualizer")]
+ pub(crate) dedicated_allocation: bool,
+}
+
+impl MemoryBlock {
+ fn new(
+ device: &ash::Device,
+ size: u64,
+ mem_type_index: usize,
+ mapped: bool,
+ buffer_device_address: bool,
+ allocation_scheme: AllocationScheme,
+ requires_personal_block: bool,
+ ) -> Result<Self> {
+ let device_memory = {
+ let alloc_info = vk::MemoryAllocateInfo::builder()
+ .allocation_size(size)
+ .memory_type_index(mem_type_index as u32);
+
+ let allocation_flags = vk::MemoryAllocateFlags::DEVICE_ADDRESS;
+ let mut flags_info = vk::MemoryAllocateFlagsInfo::builder().flags(allocation_flags);
+ // TODO(manon): Test this based on if the device has this feature enabled or not
+ let alloc_info = if buffer_device_address {
+ alloc_info.push_next(&mut flags_info)
+ } else {
+ alloc_info
+ };
+
+ // Flag the memory as dedicated if required.
+ let mut dedicated_memory_info = vk::MemoryDedicatedAllocateInfo::builder();
+ let alloc_info = match allocation_scheme {
+ AllocationScheme::DedicatedBuffer(buffer) => {
+ dedicated_memory_info = dedicated_memory_info.buffer(buffer);
+ alloc_info.push_next(&mut dedicated_memory_info)
+ }
+ AllocationScheme::DedicatedImage(image) => {
+ dedicated_memory_info = dedicated_memory_info.image(image);
+ alloc_info.push_next(&mut dedicated_memory_info)
+ }
+ AllocationScheme::GpuAllocatorManaged => alloc_info,
+ };
+
+ unsafe { device.allocate_memory(&alloc_info, None) }.map_err(|e| match e {
+ vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => AllocationError::OutOfMemory,
+ e => AllocationError::Internal(format!(
+ "Unexpected error in vkAllocateMemory: {:?}",
+ e
+ )),
+ })?
+ };
+
+ let mapped_ptr = mapped
+ .then(|| {
+ unsafe {
+ device.map_memory(
+ device_memory,
+ 0,
+ vk::WHOLE_SIZE,
+ vk::MemoryMapFlags::empty(),
+ )
+ }
+ .map_err(|e| {
+ unsafe { device.free_memory(device_memory, None) };
+ AllocationError::FailedToMap(e.to_string())
+ })
+ .and_then(|p| {
+ std::ptr::NonNull::new(p).map(SendSyncPtr).ok_or_else(|| {
+ AllocationError::FailedToMap("Returned mapped pointer is null".to_owned())
+ })
+ })
+ })
+ .transpose()?;
+
+ let sub_allocator: Box<dyn allocator::SubAllocator> = if allocation_scheme
+ != AllocationScheme::GpuAllocatorManaged
+ || requires_personal_block
+ {
+ Box::new(allocator::DedicatedBlockAllocator::new(size))
+ } else {
+ Box::new(allocator::FreeListAllocator::new(size))
+ };
+
+ Ok(Self {
+ device_memory,
+ size,
+ mapped_ptr,
+ sub_allocator,
+ #[cfg(feature = "visualizer")]
+ dedicated_allocation: allocation_scheme != AllocationScheme::GpuAllocatorManaged,
+ })
+ }
+
+ fn destroy(self, device: &ash::Device) {
+ if self.mapped_ptr.is_some() {
+ unsafe { device.unmap_memory(self.device_memory) };
+ }
+
+ unsafe { device.free_memory(self.device_memory, None) };
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct MemoryType {
+ pub(crate) memory_blocks: Vec<Option<MemoryBlock>>,
+ pub(crate) memory_properties: vk::MemoryPropertyFlags,
+ pub(crate) memory_type_index: usize,
+ pub(crate) heap_index: usize,
+ pub(crate) mappable: bool,
+ pub(crate) active_general_blocks: usize,
+ pub(crate) buffer_device_address: bool,
+}
+
+impl MemoryType {
+ fn allocate(
+ &mut self,
+ device: &ash::Device,
+ desc: &AllocationCreateDesc<'_>,
+ granularity: u64,
+ backtrace: Arc<Backtrace>,
+ allocation_sizes: &AllocationSizes,
+ ) -> Result<Allocation> {
+ let allocation_type = if desc.linear {
+ AllocationType::Linear
+ } else {
+ AllocationType::NonLinear
+ };
+
+ let memblock_size = if self
+ .memory_properties
+ .contains(vk::MemoryPropertyFlags::HOST_VISIBLE)
+ {
+ allocation_sizes.host_memblock_size
+ } else {
+ allocation_sizes.device_memblock_size
+ };
+
+ let size = desc.requirements.size;
+ let alignment = desc.requirements.alignment;
+
+ let dedicated_allocation = desc.allocation_scheme != AllocationScheme::GpuAllocatorManaged;
+ let requires_personal_block = size > memblock_size;
+
+ // Create a dedicated block for large memory allocations or allocations that require dedicated memory allocations.
+ if dedicated_allocation || requires_personal_block {
+ let mem_block = MemoryBlock::new(
+ device,
+ size,
+ self.memory_type_index,
+ self.mappable,
+ self.buffer_device_address,
+ desc.allocation_scheme,
+ requires_personal_block,
+ )?;
+
+ let mut block_index = None;
+ for (i, block) in self.memory_blocks.iter().enumerate() {
+ if block.is_none() {
+ block_index = Some(i);
+ break;
+ }
+ }
+
+ let block_index = match block_index {
+ Some(i) => {
+ self.memory_blocks[i].replace(mem_block);
+ i
+ }
+ None => {
+ self.memory_blocks.push(Some(mem_block));
+ self.memory_blocks.len() - 1
+ }
+ };
+
+ let mem_block = self.memory_blocks[block_index]
+ .as_mut()
+ .ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
+
+ let (offset, chunk_id) = mem_block.sub_allocator.allocate(
+ size,
+ alignment,
+ allocation_type,
+ granularity,
+ desc.name,
+ backtrace,
+ )?;
+
+ return Ok(Allocation {
+ chunk_id: Some(chunk_id),
+ offset,
+ size,
+ memory_block_index: block_index,
+ memory_type_index: self.memory_type_index,
+ device_memory: mem_block.device_memory,
+ mapped_ptr: mem_block.mapped_ptr,
+ memory_properties: self.memory_properties,
+ name: Some(desc.name.into()),
+ dedicated_allocation,
+ });
+ }
+
+ let mut empty_block_index = None;
+ for (mem_block_i, mem_block) in self.memory_blocks.iter_mut().enumerate().rev() {
+ if let Some(mem_block) = mem_block {
+ let allocation = mem_block.sub_allocator.allocate(
+ size,
+ alignment,
+ allocation_type,
+ granularity,
+ desc.name,
+ backtrace.clone(),
+ );
+
+ match allocation {
+ Ok((offset, chunk_id)) => {
+ let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr
+ {
+ let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
+ std::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
+ } else {
+ None
+ };
+ return Ok(Allocation {
+ chunk_id: Some(chunk_id),
+ offset,
+ size,
+ memory_block_index: mem_block_i,
+ memory_type_index: self.memory_type_index,
+ device_memory: mem_block.device_memory,
+ memory_properties: self.memory_properties,
+ mapped_ptr,
+ dedicated_allocation: false,
+ name: Some(desc.name.into()),
+ });
+ }
+ Err(err) => match err {
+ AllocationError::OutOfMemory => {} // Block is full, continue search.
+ _ => return Err(err), // Unhandled error, return.
+ },
+ }
+ } else if empty_block_index.is_none() {
+ empty_block_index = Some(mem_block_i);
+ }
+ }
+
+ let new_memory_block = MemoryBlock::new(
+ device,
+ memblock_size,
+ self.memory_type_index,
+ self.mappable,
+ self.buffer_device_address,
+ desc.allocation_scheme,
+ false,
+ )?;
+
+ let new_block_index = if let Some(block_index) = empty_block_index {
+ self.memory_blocks[block_index] = Some(new_memory_block);
+ block_index
+ } else {
+ self.memory_blocks.push(Some(new_memory_block));
+ self.memory_blocks.len() - 1
+ };
+
+ self.active_general_blocks += 1;
+
+ let mem_block = self.memory_blocks[new_block_index]
+ .as_mut()
+ .ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
+ let allocation = mem_block.sub_allocator.allocate(
+ size,
+ alignment,
+ allocation_type,
+ granularity,
+ desc.name,
+ backtrace,
+ );
+ let (offset, chunk_id) = match allocation {
+ Ok(value) => value,
+ Err(err) => match err {
+ AllocationError::OutOfMemory => {
+ return Err(AllocationError::Internal(
+ "Allocation that must succeed failed. This is a bug in the allocator."
+ .into(),
+ ))
+ }
+ _ => return Err(err),
+ },
+ };
+
+ let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr {
+ let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
+ std::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
+ } else {
+ None
+ };
+
+ Ok(Allocation {
+ chunk_id: Some(chunk_id),
+ offset,
+ size,
+ memory_block_index: new_block_index,
+ memory_type_index: self.memory_type_index,
+ device_memory: mem_block.device_memory,
+ mapped_ptr,
+ memory_properties: self.memory_properties,
+ name: Some(desc.name.into()),
+ dedicated_allocation: false,
+ })
+ }
+
+ #[allow(clippy::needless_pass_by_value)]
+ fn free(&mut self, allocation: Allocation, device: &ash::Device) -> Result<()> {
+ let block_idx = allocation.memory_block_index;
+
+ let mem_block = self.memory_blocks[block_idx]
+ .as_mut()
+ .ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
+
+ mem_block.sub_allocator.free(allocation.chunk_id)?;
+
+ if mem_block.sub_allocator.is_empty() {
+ if mem_block.sub_allocator.supports_general_allocations() {
+ if self.active_general_blocks > 1 {
+ let block = self.memory_blocks[block_idx].take();
+ let block = block.ok_or_else(|| {
+ AllocationError::Internal("Memory block must be Some.".into())
+ })?;
+ block.destroy(device);
+
+ self.active_general_blocks -= 1;
+ }
+ } else {
+ let block = self.memory_blocks[block_idx].take();
+ let block = block.ok_or_else(|| {
+ AllocationError::Internal("Memory block must be Some.".into())
+ })?;
+ block.destroy(device);
+ }
+ }
+
+ Ok(())
+ }
+}
+
+pub struct Allocator {
+ pub(crate) memory_types: Vec<MemoryType>,
+ pub(crate) memory_heaps: Vec<vk::MemoryHeap>,
+ device: ash::Device,
+ pub(crate) buffer_image_granularity: u64,
+ pub(crate) debug_settings: AllocatorDebugSettings,
+ allocation_sizes: AllocationSizes,
+}
+
+impl fmt::Debug for Allocator {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut allocation_report = vec![];
+ let mut total_reserved_size_in_bytes = 0;
+
+ for memory_type in &self.memory_types {
+ for block in memory_type.memory_blocks.iter().flatten() {
+ total_reserved_size_in_bytes += block.size;
+ allocation_report.extend(block.sub_allocator.report_allocations())
+ }
+ }
+
+ let total_used_size_in_bytes = allocation_report.iter().map(|report| report.size).sum();
+
+ allocation_report.sort_by_key(|alloc| std::cmp::Reverse(alloc.size));
+
+ writeln!(
+ f,
+ "================================================================",
+ )?;
+ writeln!(
+ f,
+ "ALLOCATION BREAKDOWN ({} / {})",
+ fmt_bytes(total_used_size_in_bytes),
+ fmt_bytes(total_reserved_size_in_bytes),
+ )?;
+
+ let max_num_allocations_to_print = f.precision().map_or(usize::MAX, |n| n);
+ for (idx, alloc) in allocation_report.iter().enumerate() {
+ if idx >= max_num_allocations_to_print {
+ break;
+ }
+
+ writeln!(
+ f,
+ "{:max_len$.max_len$}\t- {}",
+ alloc.name,
+ fmt_bytes(alloc.size),
+ max_len = allocator::VISUALIZER_TABLE_MAX_ENTRY_NAME_LEN,
+ )?;
+ }
+
+ Ok(())
+ }
+}
+
+impl Allocator {
+ pub fn new(desc: &AllocatorCreateDesc) -> Result<Self> {
+ if desc.physical_device == ash::vk::PhysicalDevice::null() {
+ return Err(AllocationError::InvalidAllocatorCreateDesc(
+ "AllocatorCreateDesc field `physical_device` is null.".into(),
+ ));
+ }
+
+ let mem_props = unsafe {
+ desc.instance
+ .get_physical_device_memory_properties(desc.physical_device)
+ };
+
+ let memory_types = &mem_props.memory_types[..mem_props.memory_type_count as _];
+ let memory_heaps = mem_props.memory_heaps[..mem_props.memory_heap_count as _].to_vec();
+
+ if desc.debug_settings.log_memory_information {
+ debug!("memory type count: {}", mem_props.memory_type_count);
+ debug!("memory heap count: {}", mem_props.memory_heap_count);
+
+ for (i, mem_type) in memory_types.iter().enumerate() {
+ let flags = mem_type.property_flags;
+ debug!(
+ "memory type[{}]: prop flags: 0x{:x}, heap[{}]",
+ i,
+ flags.as_raw(),
+ mem_type.heap_index,
+ );
+ }
+ for (i, heap) in memory_heaps.iter().enumerate() {
+ debug!(
+ "heap[{}] flags: 0x{:x}, size: {} MiB",
+ i,
+ heap.flags.as_raw(),
+ heap.size / (1024 * 1024)
+ );
+ }
+ }
+
+ let memory_types = memory_types
+ .iter()
+ .enumerate()
+ .map(|(i, mem_type)| MemoryType {
+ memory_blocks: Vec::default(),
+ memory_properties: mem_type.property_flags,
+ memory_type_index: i,
+ heap_index: mem_type.heap_index as usize,
+ mappable: mem_type
+ .property_flags
+ .contains(vk::MemoryPropertyFlags::HOST_VISIBLE),
+ active_general_blocks: 0,
+ buffer_device_address: desc.buffer_device_address,
+ })
+ .collect::<Vec<_>>();
+
+ let physical_device_properties = unsafe {
+ desc.instance
+ .get_physical_device_properties(desc.physical_device)
+ };
+
+ let granularity = physical_device_properties.limits.buffer_image_granularity;
+
+ Ok(Self {
+ memory_types,
+ memory_heaps,
+ device: desc.device.clone(),
+ buffer_image_granularity: granularity,
+ debug_settings: desc.debug_settings,
+ allocation_sizes: AllocationSizes::default(),
+ })
+ }
+
+ pub fn allocate(&mut self, desc: &AllocationCreateDesc<'_>) -> Result<Allocation> {
+ let size = desc.requirements.size;
+ let alignment = desc.requirements.alignment;
+
+ let backtrace = Arc::new(if self.debug_settings.store_stack_traces {
+ Backtrace::force_capture()
+ } else {
+ Backtrace::disabled()
+ });
+
+ if self.debug_settings.log_allocations {
+ debug!(
+ "Allocating `{}` of {} bytes with an alignment of {}.",
+ &desc.name, size, alignment
+ );
+ if self.debug_settings.log_stack_traces {
+ let backtrace = Backtrace::force_capture();
+ debug!("Allocation stack trace: {}", backtrace);
+ }
+ }
+
+ if size == 0 || !alignment.is_power_of_two() {
+ return Err(AllocationError::InvalidAllocationCreateDesc);
+ }
+
+ let mem_loc_preferred_bits = match desc.location {
+ MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
+ MemoryLocation::CpuToGpu => {
+ vk::MemoryPropertyFlags::HOST_VISIBLE
+ | vk::MemoryPropertyFlags::HOST_COHERENT
+ | vk::MemoryPropertyFlags::DEVICE_LOCAL
+ }
+ MemoryLocation::GpuToCpu => {
+ vk::MemoryPropertyFlags::HOST_VISIBLE
+ | vk::MemoryPropertyFlags::HOST_COHERENT
+ | vk::MemoryPropertyFlags::HOST_CACHED
+ }
+ MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
+ };
+ let mut memory_type_index_opt =
+ self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
+
+ if memory_type_index_opt.is_none() {
+ let mem_loc_required_bits = match desc.location {
+ MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
+ MemoryLocation::CpuToGpu | MemoryLocation::GpuToCpu => {
+ vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT
+ }
+ MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
+ };
+
+ memory_type_index_opt =
+ self.find_memorytype_index(&desc.requirements, mem_loc_required_bits);
+ }
+
+ let memory_type_index = match memory_type_index_opt {
+ Some(x) => x as usize,
+ None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
+ };
+
+ //Do not try to create a block if the heap is smaller than the required size (avoids validation warnings).
+ let memory_type = &mut self.memory_types[memory_type_index];
+ let allocation = if size > self.memory_heaps[memory_type.heap_index].size {
+ Err(AllocationError::OutOfMemory)
+ } else {
+ memory_type.allocate(
+ &self.device,
+ desc,
+ self.buffer_image_granularity,
+ backtrace.clone(),
+ &self.allocation_sizes,
+ )
+ };
+
+ if desc.location == MemoryLocation::CpuToGpu {
+ if allocation.is_err() {
+ let mem_loc_preferred_bits =
+ vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT;
+
+ let memory_type_index_opt =
+ self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
+
+ let memory_type_index = match memory_type_index_opt {
+ Some(x) => x as usize,
+ None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
+ };
+
+ self.memory_types[memory_type_index].allocate(
+ &self.device,
+ desc,
+ self.buffer_image_granularity,
+ backtrace,
+ &self.allocation_sizes,
+ )
+ } else {
+ allocation
+ }
+ } else {
+ allocation
+ }
+ }
+
+ pub fn free(&mut self, allocation: Allocation) -> Result<()> {
+ if self.debug_settings.log_frees {
+ let name = allocation.name.as_deref().unwrap_or("<null>");
+ debug!("Freeing `{}`.", name);
+ if self.debug_settings.log_stack_traces {
+ let backtrace = Backtrace::force_capture();
+ debug!("Free stack trace: {}", backtrace);
+ }
+ }
+
+ if allocation.is_null() {
+ return Ok(());
+ }
+
+ self.memory_types[allocation.memory_type_index].free(allocation, &self.device)?;
+
+ Ok(())
+ }
+
+ pub fn rename_allocation(&mut self, allocation: &mut Allocation, name: &str) -> Result<()> {
+ allocation.name = Some(name.into());
+
+ if allocation.is_null() {
+ return Ok(());
+ }
+
+ let mem_type = &mut self.memory_types[allocation.memory_type_index];
+ let mem_block = mem_type.memory_blocks[allocation.memory_block_index]
+ .as_mut()
+ .ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
+
+ mem_block
+ .sub_allocator
+ .rename_allocation(allocation.chunk_id, name)?;
+
+ Ok(())
+ }
+
+ pub fn report_memory_leaks(&self, log_level: Level) {
+ for (mem_type_i, mem_type) in self.memory_types.iter().enumerate() {
+ for (block_i, mem_block) in mem_type.memory_blocks.iter().enumerate() {
+ if let Some(mem_block) = mem_block {
+ mem_block
+ .sub_allocator
+ .report_memory_leaks(log_level, mem_type_i, block_i);
+ }
+ }
+ }
+ }
+
+ fn find_memorytype_index(
+ &self,
+ memory_req: &vk::MemoryRequirements,
+ flags: vk::MemoryPropertyFlags,
+ ) -> Option<u32> {
+ self.memory_types
+ .iter()
+ .find(|memory_type| {
+ (1 << memory_type.memory_type_index) & memory_req.memory_type_bits != 0
+ && memory_type.memory_properties.contains(flags)
+ })
+ .map(|memory_type| memory_type.memory_type_index as _)
+ }
+}
+
+impl Drop for Allocator {
+ fn drop(&mut self) {
+ if self.debug_settings.log_leaks_on_shutdown {
+ self.report_memory_leaks(Level::Warn);
+ }
+
+ // Free all remaining memory blocks
+ for mem_type in self.memory_types.iter_mut() {
+ for mem_block in mem_type.memory_blocks.iter_mut() {
+ let block = mem_block.take();
+ if let Some(block) = block {
+ block.destroy(&self.device);
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/gpu-allocator/src/vulkan/visualizer.rs b/third_party/rust/gpu-allocator/src/vulkan/visualizer.rs
new file mode 100644
index 0000000000..8f274fe583
--- /dev/null
+++ b/third_party/rust/gpu-allocator/src/vulkan/visualizer.rs
@@ -0,0 +1,226 @@
+#![allow(clippy::new_without_default)]
+
+use super::Allocator;
+use crate::visualizer::{
+ render_allocation_reports_ui, AllocationReportVisualizeSettings, ColorScheme,
+ MemoryChunksVisualizationSettings,
+};
+
+struct AllocatorVisualizerBlockWindow {
+ memory_type_index: usize,
+ block_index: usize,
+ settings: MemoryChunksVisualizationSettings,
+}
+impl AllocatorVisualizerBlockWindow {
+ fn new(memory_type_index: usize, block_index: usize) -> Self {
+ Self {
+ memory_type_index,
+ block_index,
+ settings: Default::default(),
+ }
+ }
+}
+
+pub struct AllocatorVisualizer {
+ selected_blocks: Vec<AllocatorVisualizerBlockWindow>,
+ color_scheme: ColorScheme,
+ breakdown_settings: AllocationReportVisualizeSettings,
+}
+
+impl AllocatorVisualizer {
+ pub fn new() -> Self {
+ Self {
+ selected_blocks: Vec::default(),
+ color_scheme: ColorScheme::default(),
+ breakdown_settings: Default::default(),
+ }
+ }
+
+ pub fn set_color_scheme(&mut self, color_scheme: ColorScheme) {
+ self.color_scheme = color_scheme;
+ }
+
+ pub fn render_memory_block_ui(&mut self, ui: &mut egui::Ui, alloc: &Allocator) {
+ ui.label(format!(
+ "buffer image granularity: {:?}",
+ alloc.buffer_image_granularity
+ ));
+
+ ui.collapsing(
+ format!("Memory Heaps ({} heaps)", alloc.memory_heaps.len()),
+ |ui| {
+ for (i, heap) in alloc.memory_heaps.iter().enumerate() {
+ ui.collapsing(format!("Heap: {}", i), |ui| {
+ ui.label(format!("flags: {:?}", heap.flags));
+ ui.label(format!(
+ "size: {} MiB",
+ heap.size as f64 / (1024 * 1024) as f64
+ ));
+ });
+ }
+ },
+ );
+
+ ui.collapsing(
+ format!("Memory Types: ({} types)", alloc.memory_types.len()),
+ |ui| {
+ for (mem_type_idx, mem_type) in alloc.memory_types.iter().enumerate() {
+ ui.collapsing(
+ format!(
+ "Type: {} ({} blocks)",
+ mem_type_idx,
+ mem_type.memory_blocks.len(),
+ ),
+ |ui| {
+ let mut total_block_size = 0;
+ let mut total_allocated = 0;
+
+ for block in mem_type.memory_blocks.iter().flatten() {
+ total_block_size += block.size;
+ total_allocated += block.sub_allocator.allocated();
+ }
+
+ let active_block_count = mem_type
+ .memory_blocks
+ .iter()
+ .filter(|block| block.is_some())
+ .count();
+
+ ui.label(format!("properties: {:?}", mem_type.memory_properties));
+ ui.label(format!("heap index: {}", mem_type.heap_index));
+ ui.label(format!("total block size: {} KiB", total_block_size / 1024));
+ ui.label(format!("total allocated: {} KiB", total_allocated / 1024));
+ ui.label(format!("block count: {}", active_block_count));
+
+ for (block_idx, block) in mem_type.memory_blocks.iter().enumerate() {
+ let Some(block) = block else { continue };
+
+ ui.collapsing(format!("Block: {}", block_idx), |ui| {
+ use ash::vk::Handle;
+
+ ui.label(format!("size: {} KiB", block.size / 1024));
+ ui.label(format!(
+ "allocated: {} KiB",
+ block.sub_allocator.allocated() / 1024
+ ));
+ ui.label(format!(
+ "vk device memory: 0x{:x}",
+ block.device_memory.as_raw()
+ ));
+ if let Some(mapped_ptr) = block.mapped_ptr {
+ ui.label(format!(
+ "mapped pointer: {:#p}",
+ mapped_ptr.0.as_ptr()
+ ));
+ }
+ if block.dedicated_allocation {
+ ui.label("Dedicated Allocation");
+ }
+
+ block.sub_allocator.draw_base_info(ui);
+
+ if block.sub_allocator.supports_visualization()
+ && ui.button("visualize").clicked()
+ && !self.selected_blocks.iter().enumerate().any(|(_, x)| {
+ x.memory_type_index == mem_type_idx
+ && x.block_index == block_idx
+ })
+ {
+ self.selected_blocks.push(
+ AllocatorVisualizerBlockWindow::new(
+ mem_type_idx,
+ block_idx,
+ ),
+ );
+ }
+ });
+ }
+ },
+ );
+ }
+ },
+ );
+ }
+
+ pub fn render_memory_block_window(
+ &mut self,
+ ctx: &egui::Context,
+ allocator: &Allocator,
+ open: &mut bool,
+ ) {
+ egui::Window::new("Allocator Memory Blocks")
+ .open(open)
+ .show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
+ }
+
+ pub fn render_memory_block_visualization_windows(
+ &mut self,
+ ctx: &egui::Context,
+ allocator: &Allocator,
+ ) {
+ // Draw each window.
+ let color_scheme = &self.color_scheme;
+
+ self.selected_blocks.retain_mut(|window| {
+ let mut open = true;
+
+ egui::Window::new(format!(
+ "Block Visualizer {}:{}",
+ window.memory_type_index, window.block_index
+ ))
+ .default_size([1920.0 * 0.5, 1080.0 * 0.5])
+ .open(&mut open)
+ .show(ctx, |ui| {
+ let memblock = &allocator.memory_types[window.memory_type_index].memory_blocks
+ [window.block_index]
+ .as_ref();
+ if let Some(memblock) = memblock {
+ ui.label(format!(
+ "Memory type {}, Memory block {}, Block size: {} KiB",
+ window.memory_type_index,
+ window.block_index,
+ memblock.size / 1024
+ ));
+
+ window
+ .settings
+ .ui(ui, allocator.debug_settings.store_stack_traces);
+
+ ui.separator();
+
+ memblock
+ .sub_allocator
+ .draw_visualization(color_scheme, ui, &window.settings);
+ } else {
+ ui.label("Deallocated memory block");
+ }
+ });
+
+ open
+ });
+ }
+
+ pub fn render_breakdown_ui(&mut self, ui: &mut egui::Ui, allocator: &Allocator) {
+ render_allocation_reports_ui(
+ ui,
+ &mut self.breakdown_settings,
+ allocator
+ .memory_types
+ .iter()
+ .flat_map(|memory_type| memory_type.memory_blocks.iter())
+ .flatten()
+ .flat_map(|memory_block| memory_block.sub_allocator.report_allocations()),
+ );
+ }
+
+ pub fn render_breakdown_window(
+ &mut self,
+ ctx: &egui::Context,
+ allocator: &Allocator,
+ open: &mut bool,
+ ) {
+ egui::Window::new("Allocator Breakdown")
+ .open(open)
+ .show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
+ }
+}