summaryrefslogtreecommitdiffstats
path: root/third_party/rust/bytemuck
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/bytemuck')
-rw-r--r--third_party/rust/bytemuck/.cargo-checksum.json1
-rw-r--r--third_party/rust/bytemuck/Cargo.toml33
-rw-r--r--third_party/rust/bytemuck/LICENSE-ZLIB.md11
-rw-r--r--third_party/rust/bytemuck/README.md26
-rw-r--r--third_party/rust/bytemuck/appveyor.yml45
-rw-r--r--third_party/rust/bytemuck/bors.toml1
-rw-r--r--third_party/rust/bytemuck/changelog.md25
-rw-r--r--third_party/rust/bytemuck/pedantic.bat1
-rw-r--r--third_party/rust/bytemuck/rustfmt.toml8
-rwxr-xr-xthird_party/rust/bytemuck/scripts/travis.sh77
-rw-r--r--third_party/rust/bytemuck/src/allocation.rs119
-rw-r--r--third_party/rust/bytemuck/src/contiguous.rs203
-rw-r--r--third_party/rust/bytemuck/src/lib.rs433
-rw-r--r--third_party/rust/bytemuck/src/offset_of.rs103
-rw-r--r--third_party/rust/bytemuck/src/pod.rs99
-rw-r--r--third_party/rust/bytemuck/src/transparent.rs133
-rw-r--r--third_party/rust/bytemuck/src/zeroable.rs142
-rw-r--r--third_party/rust/bytemuck/tests/cast_slice_tests.rs90
-rw-r--r--third_party/rust/bytemuck/tests/doc_tests.rs121
-rw-r--r--third_party/rust/bytemuck/tests/std_tests.rs29
20 files changed, 1700 insertions, 0 deletions
diff --git a/third_party/rust/bytemuck/.cargo-checksum.json b/third_party/rust/bytemuck/.cargo-checksum.json
new file mode 100644
index 0000000000..29bcc92287
--- /dev/null
+++ b/third_party/rust/bytemuck/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"671c72410a736a19fa60d743233baa254c1588a443d9847e25edb3c7e04f2829","LICENSE-ZLIB.md":"84b34dd7608f7fb9b17bd588a6bf392bf7de504e2716f024a77d89f1b145a151","README.md":"4a27f4dcc0e3fbd2b6c4495d310e9179fb7fa4b77a3504821442f769ff4841ba","appveyor.yml":"09c69d96f1d6298a909d514e5a4fcdf0562be65619bdd2cdb966041ade217ef3","bors.toml":"1d8a7a56c5c76925a3daa8c50a40cc82cbfc638f521f864106bd60b1e8a219a2","changelog.md":"cff011496dbe7d4ca419b079e990f568354c0ffc230101486a08261d57c2da8a","pedantic.bat":"afd79f32caf7dc86e0390838992030decc5024c1348c86eb1c519c9c832bfe5e","rustfmt.toml":"1717bca34bc413693c82b6c50d633be8023545fa9a387b2da817ae848e2f1fc1","scripts/travis.sh":"a57fcf5ece149dd6da26481ebb429f359ccebd733a73de2e87f451371302142b","src/allocation.rs":"5bee031d7a2e4e7201543b0a4181c4f95e461049b045d3a3ab489819677847d9","src/contiguous.rs":"288aa77eca807f47d28c4372f6eb3fd87d885dcaf886fb725c10fdbaf1fd27d0","src/lib.rs":"32baa9a75add0916856e25fa37b3f5082c319d20f523e09d747b0b46333f3e0a","src/offset_of.rs":"aa89eb88ab3acd5694936e9bc922de5d0923e991afe732803946e4b66d7f2ef2","src/pod.rs":"b64399dac0d0dcc6179b4da48c02a15dee881afe858d27aed58253775016f4da","src/transparent.rs":"7d72eaa199c8b8656df324e7a846eb5589cb848080ecb4a75cbbef3b284ee46b","src/zeroable.rs":"c1ab8a5b9af7094fa710338529ee31588e616e2f954db1df0c98b15bbd1a18f6","tests/cast_slice_tests.rs":"de4a5879b0ef74df96ffe04412d7da49364725812e8ba1770e43867d58d8952c","tests/doc_tests.rs":"0008789fc7281f581c8c91eac13ea4683f82cdeadadc4119c7b21b38f7d41577","tests/std_tests.rs":"69661f26dc385c38d6c2bd37a62ba476e81ef88b4ed6565f3a47dd173133365c"},"package":"37fa13df2292ecb479ec23aa06f4507928bef07839be9ef15281411076629431"} \ No newline at end of file
diff --git a/third_party/rust/bytemuck/Cargo.toml b/third_party/rust/bytemuck/Cargo.toml
new file mode 100644
index 0000000000..e06c9c0f75
--- /dev/null
+++ b/third_party/rust/bytemuck/Cargo.toml
@@ -0,0 +1,33 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "bytemuck"
+version = "1.2.0"
+authors = ["Lokathor <zefria@gmail.com>"]
+description = "A crate for mucking around with piles of bytes."
+readme = "README.md"
+keywords = ["transmute", "bytes", "casting"]
+categories = ["encoding", "no-std"]
+license = "Zlib"
+repository = "https://github.com/Lokathor/bytemuck"
+[package.metadata.docs.rs]
+all-features = true
+
+[features]
+extern_crate_alloc = []
+[badges.appveyor]
+repository = "Lokathor/bytemuck"
+
+[badges.travis-ci]
+repository = "Lokathor/bytemuck"
diff --git a/third_party/rust/bytemuck/LICENSE-ZLIB.md b/third_party/rust/bytemuck/LICENSE-ZLIB.md
new file mode 100644
index 0000000000..d70707c75a
--- /dev/null
+++ b/third_party/rust/bytemuck/LICENSE-ZLIB.md
@@ -0,0 +1,11 @@
+Copyright (c) 2019 Daniel "Lokathor" Gee.
+
+This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions:
+
+1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
+
+2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
+
+3. This notice may not be removed or altered from any source distribution.
diff --git a/third_party/rust/bytemuck/README.md b/third_party/rust/bytemuck/README.md
new file mode 100644
index 0000000000..94080c5759
--- /dev/null
+++ b/third_party/rust/bytemuck/README.md
@@ -0,0 +1,26 @@
+[![License:Zlib](https://img.shields.io/badge/License-Zlib-brightgreen.svg)](https://opensource.org/licenses/Zlib)
+![Minimum Rust Version](https://img.shields.io/badge/Min%20Rust-1.34-green.svg)
+[![travis.ci](https://travis-ci.org/Lokathor/bytemuck.svg?branch=master)](https://travis-ci.org/Lokathor/bytemuck)
+[![AppVeyor](https://ci.appveyor.com/api/projects/status/hgr4if0snmkmqj88/branch/master?svg=true)](https://ci.appveyor.com/project/Lokathor/bytemuck/branch/master)
+[![crates.io](https://img.shields.io/crates/v/bytemuck.svg)](https://crates.io/crates/bytemuck)
+[![docs.rs](https://docs.rs/bytemuck/badge.svg)](https://docs.rs/bytemuck/)
+
+# bytemuck
+
+A crate for mucking around with piles of bytes.
+
+## Extensions
+
+There is experimental support for the `Zeroable` trait being derived through a
+proc-macro. I'm not the author of that crate, please file bugs with that crate
+in the other repo.
+
+* https://github.com/rodrimati1992/zeroable_crates
+
+## Stability
+
+The goal is to stay at 1.y.z until _at least_ the next edition of Rust.
+
+I consider any increase of the Minimum Rust Version to be a semver breaking change,
+so `rustc-1.34` will continue to be supported for at least the rest of the
+`bytemuck-1.y.z` series of the crate.
diff --git a/third_party/rust/bytemuck/appveyor.yml b/third_party/rust/bytemuck/appveyor.yml
new file mode 100644
index 0000000000..d6c4f5e921
--- /dev/null
+++ b/third_party/rust/bytemuck/appveyor.yml
@@ -0,0 +1,45 @@
+
+os: Visual Studio 2015
+
+branches:
+ only:
+ - staging
+ - trying
+ - master
+ - dev
+
+matrix:
+ fast_finish: true
+
+environment:
+ matrix:
+ # Stable
+ - channel: 1.34.0
+ target: i686-pc-windows-msvc
+ - channel: 1.34.0
+ target: i686-pc-windows-gnu
+ - channel: 1.34.0
+ target: x86_64-pc-windows-msvc
+ - channel: 1.34.0
+ target: x86_64-pc-windows-gnu
+ # Beta and Nightly are checked by TravisCI since builds there run in
+ # parallel.
+
+install:
+ - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
+ - rustup-init -y --default-toolchain %channel% --default-host %target%
+ - set PATH=%PATH%;%USERPROFILE%\.cargo\bin
+ - rustup component add rustfmt
+ - rustup component add clippy
+ - rustc -vV
+ - cargo -vV
+
+# On advice of retep we skip the "build" script phase
+build: false
+
+test_script:
+ - cargo fmt -- --check
+ - cargo clippy
+ - cargo test --no-default-features
+ - cargo test
+ #- cargo test --all-features
diff --git a/third_party/rust/bytemuck/bors.toml b/third_party/rust/bytemuck/bors.toml
new file mode 100644
index 0000000000..359f8947ba
--- /dev/null
+++ b/third_party/rust/bytemuck/bors.toml
@@ -0,0 +1 @@
+status = ["continuous-integration/travis-ci/push"]
diff --git a/third_party/rust/bytemuck/changelog.md b/third_party/rust/bytemuck/changelog.md
new file mode 100644
index 0000000000..9c2e2a9579
--- /dev/null
+++ b/third_party/rust/bytemuck/changelog.md
@@ -0,0 +1,25 @@
+# `bytemuck` changelog
+
+## 1.2.0
+
+* [thomcc](https://github.com/thomcc) added many things:
+ * A fully sound `offset_of!` macro [#10](https://github.com/Lokathor/bytemuck/pull/10)
+ * A `Contiguous` trait for when you've got enums with declared values
+ all in a row [#12](https://github.com/Lokathor/bytemuck/pull/12)
+ * A `TransparentWrapper` marker trait for when you want to more clearly
+ enable adding and removing a wrapper struct to its inner value
+ [#15](https://github.com/Lokathor/bytemuck/pull/15)
+ * Now MIRI is run on CI in every sigle push!
+ [#16](https://github.com/Lokathor/bytemuck/pull/16)
+
+## 1.1.0
+
+* [SimonSapin](https://github.com/SimonSapin) added `from_bytes`,
+ `from_bytes_mut`, `try_from_bytes`, and `try_from_bytes_mut` ([PR
+ Link](https://github.com/Lokathor/bytemuck/pull/8))
+
+## 1.0.1
+
+* Changed to the [zlib](https://opensource.org/licenses/Zlib) license.
+* Added much more proper documentation.
+* Reduced the minimum Rust version to 1.34
diff --git a/third_party/rust/bytemuck/pedantic.bat b/third_party/rust/bytemuck/pedantic.bat
new file mode 100644
index 0000000000..d6323bf4b5
--- /dev/null
+++ b/third_party/rust/bytemuck/pedantic.bat
@@ -0,0 +1 @@
+cargo clippy -- -W clippy::pedantic
diff --git a/third_party/rust/bytemuck/rustfmt.toml b/third_party/rust/bytemuck/rustfmt.toml
new file mode 100644
index 0000000000..50860b8230
--- /dev/null
+++ b/third_party/rust/bytemuck/rustfmt.toml
@@ -0,0 +1,8 @@
+error_on_line_overflow = false
+merge_imports = true
+reorder_imports = true
+use_try_shorthand = true
+tab_spaces = 2
+max_width = 80
+color = "Never"
+use_small_heuristics = "Max"
diff --git a/third_party/rust/bytemuck/scripts/travis.sh b/third_party/rust/bytemuck/scripts/travis.sh
new file mode 100755
index 0000000000..31201ce6d2
--- /dev/null
+++ b/third_party/rust/bytemuck/scripts/travis.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+set -e
+
+if [[ "$RUN_MIRI" != "" ]]; then
+
+ cargo clean
+
+ # Install and run the latest version of nightly where miri built successfully.
+ # Taken from: https://github.com/rust-lang/miri#running-miri-on-ci
+
+ MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
+ echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
+ rustup set profile minimal
+ rustup default "$MIRI_NIGHTLY"
+
+ rustup component add miri
+ cargo miri setup
+
+ cargo miri test --verbose
+ cargo miri test --verbose --no-default-features
+ cargo miri test --verbose --all-features
+
+else
+
+ rustup component add clippy
+ cargo clippy
+
+ if [[ "$TARGET" != "" ]]; then rustup target install $TARGET; fi
+
+ if [[ "$TARGET" == "wasm32-"* && "$TARGET" != "wasm32-wasi" ]]; then
+ cargo-web --version || cargo install cargo-web
+ cargo web test --no-default-features $FLAGS --target=$TARGET
+ cargo web test $FLAGS --target=$TARGET
+ #cargo web test --all-features $FLAGS --target=$TARGET
+
+ elif [[ "$TARGET" == *"-linux-android"* ]]; then
+ export PATH=/usr/local/android-sdk/ndk-bundle/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH
+ pushd linux-android
+ cargo build --no-default-features --target=$TARGET $FLAGS
+ cargo build --target=$TARGET $FLAGS
+ #cargo build --all-features --target=$TARGET $FLAGS
+ # Don't test, can't run android emulators successfully on travis currently
+ popd
+
+ elif [[ "$TARGET" == *"-apple-ios" || "$TARGET" == "wasm32-wasi" ]]; then
+ cargo build --no-default-features --target=$TARGET $FLAGS
+ cargo build --target=$TARGET $FLAGS
+ #cargo build --all-features --target=$TARGET $FLAGS
+ # Don't test
+ # iOS simulator setup/teardown is complicated
+ # cargo-web doesn't support wasm32-wasi yet, nor can wasm-pack test specify a target
+
+ elif [[ "$TARGET" == *"-unknown-linux-gnueabihf" ]]; then
+ #sudo apt-get update
+ #sudo apt-get install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf
+ pushd generic-cross
+ cargo build --no-default-features --target=$TARGET $FLAGS
+ cargo build --target=$TARGET $FLAGS
+ #cargo build --all-features --target=$TARGET $FLAGS
+ # Don't test
+ popd
+
+ elif [[ "$TARGET" != "" ]]; then
+ pushd generic-cross
+ cargo test --no-default-features --target=$TARGET $FLAGS
+ cargo test --target=$TARGET $FLAGS
+ #cargo test --all-features --target=$TARGET $FLAGS
+ popd
+
+ else
+ # Push nothing, target host CPU architecture
+ cargo test --no-default-features $FLAGS
+ cargo test $FLAGS
+ fi
+
+fi
diff --git a/third_party/rust/bytemuck/src/allocation.rs b/third_party/rust/bytemuck/src/allocation.rs
new file mode 100644
index 0000000000..0676f3f4bf
--- /dev/null
+++ b/third_party/rust/bytemuck/src/allocation.rs
@@ -0,0 +1,119 @@
+//! Stuff to boost things in the `alloc` crate.
+//!
+//! * You must enable the `extern_crate_alloc` feature of `bytemuck` or you will
+//! not be able to use this module!
+
+use super::*;
+use alloc::{
+ alloc::{alloc_zeroed, Layout},
+ boxed::Box,
+ vec::Vec,
+};
+
+/// As [`try_cast_box`](try_cast_box), but unwraps for you.
+#[inline]
+pub fn cast_box<A: Pod, B: Pod>(input: Box<A>) -> Box<B> {
+ try_cast_box(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a [`Box`](alloc::boxed::Box).
+///
+/// On failure you get back an error along with the starting `Box`.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Box` must have the exact same
+/// alignment.
+/// * The start and end size of the `Box` must have the exact same size.
+#[inline]
+pub fn try_cast_box<A: Pod, B: Pod>(
+ input: Box<A>,
+) -> Result<Box<B>, (PodCastError, Box<A>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Note(Lokathor): This is much simpler than with the Vec casting!
+ let ptr: *mut B = Box::into_raw(input) as *mut B;
+ Ok(unsafe { Box::from_raw(ptr) })
+ }
+}
+
+/// Allocates a `Box<T>` with all of the contents being zeroed out.
+///
+/// This uses the global allocator to create a zeroed allocation and _then_
+/// turns it into a Box. In other words, it's 100% assured that the zeroed data
+/// won't be put temporarily on the stack. You can make a box of any size
+/// without fear of a stack overflow.
+///
+/// ## Failure
+///
+/// This fails if the allocation fails.
+#[inline]
+pub fn try_zeroed_box<T: Zeroable>() -> Result<Box<T>, ()> {
+ if size_of::<T>() == 0 {
+ return Ok(Box::new(T::zeroed()));
+ }
+ let layout =
+ Layout::from_size_align(size_of::<T>(), align_of::<T>()).unwrap();
+ let ptr = unsafe { alloc_zeroed(layout) };
+ if ptr.is_null() {
+ // we don't know what the error is because `alloc_zeroed` is a dumb API
+ Err(())
+ } else {
+ Ok(unsafe { Box::<T>::from_raw(ptr as *mut T) })
+ }
+}
+
+/// As [`try_zeroed_box`], but unwraps for you.
+#[inline]
+pub fn zeroed_box<T: Zeroable>() -> Box<T> {
+ try_zeroed_box().unwrap()
+}
+
+/// As [`try_cast_vec`](try_cast_vec), but unwraps for you.
+#[inline]
+pub fn cast_vec<A: Pod, B: Pod>(input: Vec<A>) -> Vec<B> {
+ try_cast_vec(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a [`Vec`](alloc::vec::Vec).
+///
+/// On failure you get back an error along with the starting `Vec`.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Vec` must have the exact same
+/// alignment.
+/// * The start and end size of the `Vec` must have the exact same size.
+/// * In the future this second restriction might be lessened by having the
+/// capacity and length get adjusted during transmutation, but for now it's
+/// absolute.
+#[inline]
+pub fn try_cast_vec<A: Pod, B: Pod>(
+ input: Vec<A>,
+) -> Result<Vec<B>, (PodCastError, Vec<A>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ // Note(Lokathor): Under some conditions it would be possible to cast
+ // between Vec content types of the same alignment but different sizes by
+ // changing the capacity and len values in the output Vec. However, we will
+ // not attempt that for now.
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Note(Lokathor): First we record the length and capacity, which don't have
+ // any secret provenance metadata.
+ let length: usize = input.len();
+ let capacity: usize = input.capacity();
+ // Note(Lokathor): Next we "pre-forget" the old Vec by wrapping with
+ // ManuallyDrop, because if we used `core::mem::forget` after taking the
+ // pointer then that would invalidate our pointer. In nightly there's a
+ // "into raw parts" method, which we can switch this too eventually.
+ let mut manual_drop_vec = ManuallyDrop::new(input);
+ let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr();
+ let ptr: *mut B = vec_ptr as *mut B;
+ Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) })
+ }
+}
diff --git a/third_party/rust/bytemuck/src/contiguous.rs b/third_party/rust/bytemuck/src/contiguous.rs
new file mode 100644
index 0000000000..30709a7cb3
--- /dev/null
+++ b/third_party/rust/bytemuck/src/contiguous.rs
@@ -0,0 +1,203 @@
+use super::*;
+use core::mem::{size_of, transmute_copy};
+
+/// A trait indicating that:
+///
+/// 1. A type has an equivalent representation to some known integral type.
+/// 2. All instances of this type fall in a fixed range of values.
+/// 3. Within that range, there are no gaps.
+///
+/// This is generally useful for fieldless enums (aka "c-style" enums), however
+/// it's important that it only be used for those with an explicit `#[repr]`, as
+/// `#[repr(Rust)]` fieldess enums have an unspecified layout.
+///
+/// Additionally, you shouldn't assume that all implementations are enums. Any
+/// type which meets the requirements above while following the rules under
+/// "Safety" below is valid.
+///
+/// # Example
+///
+/// ```
+/// # use bytemuck::Contiguous;
+/// #[repr(u8)]
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// enum Foo {
+/// A = 0,
+/// B = 1,
+/// C = 2,
+/// D = 3,
+/// E = 4,
+/// }
+/// unsafe impl Contiguous for Foo {
+/// type Int = u8;
+/// const MIN_VALUE: u8 = Foo::A as u8;
+/// const MAX_VALUE: u8 = Foo::E as u8;
+/// }
+/// assert_eq!(Foo::from_integer(3).unwrap(), Foo::D);
+/// assert_eq!(Foo::from_integer(8), None);
+/// assert_eq!(Foo::C.into_integer(), 2);
+/// ```
+/// # Safety
+///
+/// This is an unsafe trait, and incorrectly implementing it is undefined
+/// behavior.
+///
+/// Informally, by implementing it, you're asserting that `C` is identical to
+/// the integral type `C::Int`, and that every `C` falls between `C::MIN_VALUE`
+/// and `C::MAX_VALUE` exactly once, without any gaps.
+///
+/// Precisely, the guarantees you must uphold when implementing `Contiguous` for
+/// some type `C` are:
+///
+/// 1. The size of `C` and `C::Int` must be the same, and neither may be a ZST.
+/// (Note: alignment is explicitly allowed to differ)
+///
+/// 2. `C::Int` must be a primitive integer, and not a wrapper type. In the
+/// future, this may be lifted to include cases where the behavior is
+/// identical for a relevant set of traits (Ord, arithmetic, ...).
+///
+/// 3. All `C::Int`s which are in the *inclusive* range between `C::MIN_VALUE`
+/// and `C::MAX_VALUE` are bitwise identical to unique valid instances of
+/// `C`.
+///
+/// 4. There exist no instances of `C` such that their bitpatterns, when
+/// interpreted as instances of `C::Int`, fall outside of the `MAX_VALUE` /
+/// `MIN_VALUE` range -- It is legal for unsafe code to assume that if it
+/// gets a `C` that implements `Contiguous`, it is in the appropriate range.
+///
+/// 5. Finally, you promise not to provide overridden implementations of
+/// `Contiguous::from_integer` and `Contiguous::into_integer`.
+///
+/// For clarity, the following rules could be derived from the above, but are
+/// listed explicitly:
+///
+/// - `C::MAX_VALUE` must be greater or equal to `C::MIN_VALUE` (therefore, `C`
+/// must be an inhabited type).
+///
+/// - There exist no two values between `MIN_VALUE` and `MAX_VALUE` such that
+/// when interpreted as a `C` they are considered identical (by, say, match).
+pub unsafe trait Contiguous: Copy + 'static {
+ /// The primitive integer type with an identical representation to this
+ /// type.
+ ///
+ /// Contiguous is broadly intended for use with fieldless enums, and for
+ /// these the correct integer type is easy: The enum should have a
+ /// `#[repr(Int)]` or `#[repr(C)]` attribute, (if it does not, it is
+ /// *unsound* to implement `Contiguous`!).
+ ///
+ /// - For `#[repr(Int)]`, use the listed `Int`. e.g. `#[repr(u8)]` should
+ /// use `type Int = u8`.
+ ///
+ /// - For `#[repr(C)]`, use whichever type the C compiler will use to
+ /// represent the given enum. This is usually `c_int` (from `std::os::raw`
+ /// or `libc`), but it's up to you to make the determination as the
+ /// implementer of the unsafe trait.
+ ///
+ /// For precise rules, see the list under "Safety" above.
+ type Int: Copy + Ord;
+
+ /// The upper *inclusive* bound for valid instances of this type.
+ const MAX_VALUE: Self::Int;
+
+ /// The lower *inclusive* bound for valid instances of this type.
+ const MIN_VALUE: Self::Int;
+
+ /// If `value` is within the range for valid instances of this type,
+ /// returns `Some(converted_value)`, otherwise, returns `None`.
+ ///
+ /// This is a trait method so that you can write `value.into_integer()` in
+ /// your code. It is a contract of this trait that if you implement
+ /// `Contiguous` on your type you **must not** override this method.
+ ///
+ /// # Panics
+ ///
+ /// We will not panic for any correct implementation of `Contiguous`, but
+ /// *may* panic if we detect an incorrect one.
+ ///
+ /// This is undefined behavior regardless, so it could have been the nasal
+ /// demons at that point anyway ;).
+ #[inline]
+ fn from_integer(value: Self::Int) -> Option<Self> {
+ // Guard against an illegal implementation of Contiguous. Annoyingly we
+ // can't rely on `transmute` to do this for us (see below), but
+ // whatever, this gets compiled into nothing in release.
+ assert!(size_of::<Self>() == size_of::<Self::Int>());
+ if Self::MIN_VALUE <= value && value <= Self::MAX_VALUE {
+ // SAFETY: We've checked their bounds (and their size, even though
+ // they've sworn under the Oath Of Unsafe Rust that that already
+ // matched) so this is allowed by `Contiguous`'s unsafe contract.
+ //
+ // So, the `transmute_copy`. ideally we'd use transmute here, which
+ // is more obviously safe. Sadly, we can't, as these types still
+ // have unspecified sizes.
+ Some(unsafe { transmute_copy::<Self::Int, Self>(&value) })
+ } else {
+ None
+ }
+ }
+
+ /// Perform the conversion from `C` into the underlying integral type. This
+ /// mostly exists otherwise generic code would need unsafe for the `value as
+ /// integer`
+ ///
+ /// This is a trait method so that you can write `value.into_integer()` in
+ /// your code. It is a contract of this trait that if you implement
+ /// `Contiguous` on your type you **must not** override this method.
+ ///
+ /// # Panics
+ ///
+ /// We will not panic for any correct implementation of `Contiguous`, but
+ /// *may* panic if we detect an incorrect one.
+ ///
+ /// This is undefined behavior regardless, so it could have been the nasal
+ /// demons at that point anyway ;).
+ #[inline]
+ fn into_integer(self) -> Self::Int {
+ // Guard against an illegal implementation of Contiguous. Annoyingly we
+ // can't rely on `transmute` to do the size check for us (see
+ // `from_integer's comment`), but whatever, this gets compiled into
+ // nothing in release. Note that we don't check the result of cast
+ assert!(size_of::<Self>() == size_of::<Self::Int>());
+
+ // SAFETY: The unsafe contract requires that these have identical
+ // representations, and that the range be entirely valid. Using
+ // transmute_copy instead of transmute here is annoying, but is required
+ // as `Self` and `Self::Int` have unspecified sizes still.
+ unsafe { transmute_copy::<Self, Self::Int>(&self) }
+ }
+}
+
+macro_rules! impl_contiguous {
+ ($($src:ty as $repr:ident in [$min:expr, $max:expr];)*) => {$(
+ unsafe impl Contiguous for $src {
+ type Int = $repr;
+ const MAX_VALUE: $repr = $max;
+ const MIN_VALUE: $repr = $min;
+ }
+ )*};
+}
+
+impl_contiguous! {
+ bool as u8 in [0, 1];
+
+ u8 as u8 in [0, u8::max_value()];
+ u16 as u16 in [0, u16::max_value()];
+ u32 as u32 in [0, u32::max_value()];
+ u64 as u64 in [0, u64::max_value()];
+ u128 as u128 in [0, u128::max_value()];
+ usize as usize in [0, usize::max_value()];
+
+ i8 as i8 in [i8::min_value(), i8::max_value()];
+ i16 as i16 in [i16::min_value(), i16::max_value()];
+ i32 as i32 in [i32::min_value(), i32::max_value()];
+ i64 as i64 in [i64::min_value(), i64::max_value()];
+ i128 as i128 in [i128::min_value(), i128::max_value()];
+ isize as isize in [isize::min_value(), isize::max_value()];
+
+ NonZeroU8 as u8 in [1, u8::max_value()];
+ NonZeroU16 as u16 in [1, u16::max_value()];
+ NonZeroU32 as u32 in [1, u32::max_value()];
+ NonZeroU64 as u64 in [1, u64::max_value()];
+ NonZeroU128 as u128 in [1, u128::max_value()];
+ NonZeroUsize as usize in [1, usize::max_value()];
+}
diff --git a/third_party/rust/bytemuck/src/lib.rs b/third_party/rust/bytemuck/src/lib.rs
new file mode 100644
index 0000000000..a90199d52a
--- /dev/null
+++ b/third_party/rust/bytemuck/src/lib.rs
@@ -0,0 +1,433 @@
+#![no_std]
+#![warn(missing_docs)]
+
+//! This crate gives small utilities for casting between plain data types.
+//!
+//! ## Basics
+//!
+//! Data comes in five basic forms in Rust, so we have five basic casting
+//! functions:
+//!
+//! * `T` uses [`cast`]
+//! * `&T` uses [`cast_ref`]
+//! * `&mut T` uses [`cast_mut`]
+//! * `&[T]` uses [`cast_slice`]
+//! * `&mut [T]` uses [`cast_slice_mut`]
+//!
+//! Some casts will never fail (eg: `cast::<u32, f32>` always works), other
+//! casts might fail (eg: `cast_ref::<[u8; 4], u32>` will fail if the reference
+//! isn't already aligned to 4). Each casting function has a "try" version which
+//! will return a `Result`, and the "normal" version which will simply panic on
+//! invalid input.
+//!
+//! ## Using Your Own Types
+//!
+//! All the functions here are guarded by the [`Pod`] trait, which is a
+//! sub-trait of the [`Zeroable`] trait.
+//!
+//! If you're very sure that your type is eligible, you can implement those
+//! traits for your type and then they'll have full casting support. However,
+//! these traits are `unsafe`, and you should carefully read the requirements
+//! before adding the them to your own types.
+//!
+//! ## Features
+//!
+//! * This crate is core only by default, but if you're using Rust 1.36 or later
+//! you can enable the `extern_crate_alloc` cargo feature for some additional
+//! methods related to `Box` and `Vec`. Note that the `docs.rs` documentation
+//! is always built with `extern_crate_alloc` cargo feature enabled.
+
+#[cfg(target_arch = "x86")]
+use core::arch::x86;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64;
+//
+use core::{marker::*, mem::*, num::*, ptr::*};
+
+// Used from macros to ensure we aren't using some locally defined name and
+// actually are referencing libcore. This also would allow pre-2018 edition
+// crates to use our macros, but I'm not sure how important that is.
+#[doc(hidden)]
+pub use ::core as __core;
+
+macro_rules! impl_unsafe_marker_for_array {
+ ( $marker:ident , $( $n:expr ),* ) => {
+ $(unsafe impl<T> $marker for [T; $n] where T: $marker {})*
+ }
+}
+
+#[cfg(feature = "extern_crate_alloc")]
+extern crate alloc;
+#[cfg(feature = "extern_crate_alloc")]
+pub mod allocation;
+#[cfg(feature = "extern_crate_alloc")]
+pub use allocation::*;
+
+mod zeroable;
+pub use zeroable::*;
+
+mod pod;
+pub use pod::*;
+
+mod contiguous;
+pub use contiguous::*;
+
+mod offset_of;
+pub use offset_of::*;
+
+mod transparent;
+pub use transparent::*;
+
+/*
+
+Note(Lokathor): We've switched all of the `unwrap` to `match` because there is
+apparently a bug: https://github.com/rust-lang/rust/issues/68667
+and it doesn't seem to show up in simple godbolt examples but has been reported
+as having an impact when there's a cast mixed in with other more complicated
+code around it. Rustc/LLVM ends up missing that the `Err` can't ever happen for
+particular type combinations, and then it doesn't fully eliminated the panic
+possibility code branch.
+
+*/
+
+/// Immediately panics.
+#[cold]
+#[inline(never)]
+fn something_went_wrong(src: &str, err: PodCastError) -> ! {
+ // Note(Lokathor): Keeping the panic here makes the panic _formatting_ go
+ // here too, which helps assembly readability and also helps keep down
+ // the inline pressure.
+ panic!("{src}>{err:?}", src = src, err = err)
+}
+
+/// Re-interprets `&T` as `&[u8]`.
+///
+/// Any ZST becomes an empty slice, and in that case the pointer value of that
+/// empty slice might not match the pointer value of the input reference.
+#[inline]
+pub fn bytes_of<T: Pod>(t: &T) -> &[u8] {
+ match try_cast_slice::<T, u8>(core::slice::from_ref(t)) {
+ Ok(s) => s,
+ Err(_) => unreachable!(),
+ }
+}
+
+/// Re-interprets `&mut T` as `&mut [u8]`.
+///
+/// Any ZST becomes an empty slice, and in that case the pointer value of that
+/// empty slice might not match the pointer value of the input reference.
+#[inline]
+pub fn bytes_of_mut<T: Pod>(t: &mut T) -> &mut [u8] {
+ match try_cast_slice_mut::<T, u8>(core::slice::from_mut(t)) {
+ Ok(s) => s,
+ Err(_) => unreachable!(),
+ }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes`] but will panic on error.
+#[inline]
+pub fn from_bytes<T: Pod>(s: &[u8]) -> &T {
+ match try_from_bytes(s) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("from_bytes", e),
+ }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes_mut`] but will panic on error.
+#[inline]
+pub fn from_bytes_mut<T: Pod>(s: &mut [u8]) -> &mut T {
+ match try_from_bytes_mut(s) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("from_bytes_mut", e),
+ }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+#[inline]
+pub fn try_from_bytes<T: Pod>(s: &[u8]) -> Result<&T, PodCastError> {
+ if s.len() != size_of::<T>() {
+ Err(PodCastError::SizeMismatch)
+ } else if (s.as_ptr() as usize) % align_of::<T>() != 0 {
+ Err(PodCastError::AlignmentMismatch)
+ } else {
+ Ok(unsafe { &*(s.as_ptr() as *const T) })
+ }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+#[inline]
+pub fn try_from_bytes_mut<T: Pod>(
+ s: &mut [u8],
+) -> Result<&mut T, PodCastError> {
+ if s.len() != size_of::<T>() {
+ Err(PodCastError::SizeMismatch)
+ } else if (s.as_ptr() as usize) % align_of::<T>() != 0 {
+ Err(PodCastError::AlignmentMismatch)
+ } else {
+ Ok(unsafe { &mut *(s.as_mut_ptr() as *mut T) })
+ }
+}
+
+/// The things that can go wrong when casting between [`Pod`] data forms.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum PodCastError {
+ /// You tried to cast a slice to an element type with a higher alignment
+ /// requirement but the slice wasn't aligned.
+ TargetAlignmentGreaterAndInputNotAligned,
+ /// If the element size changes then the output slice changes length
+ /// accordingly. If the output slice wouldn't be a whole number of elements
+ /// then the conversion fails.
+ OutputSliceWouldHaveSlop,
+ /// When casting a slice you can't convert between ZST elements and non-ZST
+ /// elements. When casting an individual `T`, `&T`, or `&mut T` value the
+ /// source size and destination size must be an exact match.
+ SizeMismatch,
+ /// For this type of cast the alignments must be exactly the same and they
+ /// were not so now you're sad.
+ AlignmentMismatch,
+}
+
+/// Cast `T` into `U`
+///
+/// ## Panics
+///
+/// This is [`try_cast`] but will panic on error.
+#[inline]
+pub fn cast<A: Pod, B: Pod>(a: A) -> B {
+ if size_of::<A>() == size_of::<B>() {
+ // Plz mr compiler, just notice that we can't ever hit Err in this case.
+ match try_cast(a) {
+ Ok(b) => b,
+ Err(_) => unreachable!(),
+ }
+ } else {
+ match try_cast(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast", e),
+ }
+ }
+}
+
+/// Cast `&mut T` into `&mut U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_mut`] but will panic on error.
+#[inline]
+pub fn cast_mut<A: Pod, B: Pod>(a: &mut A) -> &mut B {
+ if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
+ // Plz mr compiler, just notice that we can't ever hit Err in this case.
+ match try_cast_mut(a) {
+ Ok(b) => b,
+ Err(_) => unreachable!(),
+ }
+ } else {
+ match try_cast_mut(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_mut", e),
+ }
+ }
+}
+
+/// Cast `&T` into `&U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_ref`] but will panic on error.
+#[inline]
+pub fn cast_ref<A: Pod, B: Pod>(a: &A) -> &B {
+ if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
+ // Plz mr compiler, just notice that we can't ever hit Err in this case.
+ match try_cast_ref(a) {
+ Ok(b) => b,
+ Err(_) => unreachable!(),
+ }
+ } else {
+ match try_cast_ref(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_ref", e),
+ }
+ }
+}
+
+/// Cast `&[T]` into `&[U]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice`] but will panic on error.
+#[inline]
+pub fn cast_slice<A: Pod, B: Pod>(a: &[A]) -> &[B] {
+ match try_cast_slice(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_slice", e),
+ }
+}
+
+/// Cast `&mut [T]` into `&mut [U]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice_mut`] but will panic on error.
+#[inline]
+pub fn cast_slice_mut<A: Pod, B: Pod>(a: &mut [A]) -> &mut [B] {
+ match try_cast_slice_mut(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_slice_mut", e),
+ }
+}
+
+/// As `align_to`, but safe because of the [`Pod`] bound.
+#[inline]
+pub fn pod_align_to<T: Pod, U: Pod>(vals: &[T]) -> (&[T], &[U], &[T]) {
+ unsafe { vals.align_to::<U>() }
+}
+
+/// As `align_to_mut`, but safe because of the [`Pod`] bound.
+#[inline]
+pub fn pod_align_to_mut<T: Pod, U: Pod>(
+ vals: &mut [T],
+) -> (&mut [T], &mut [U], &mut [T]) {
+ unsafe { vals.align_to_mut::<U>() }
+}
+
+/// Try to cast `T` into `U`.
+///
+/// ## Failure
+///
+/// * If the types don't have the same size this fails.
+#[inline]
+pub fn try_cast<A: Pod, B: Pod>(a: A) -> Result<B, PodCastError> {
+ if size_of::<A>() == size_of::<B>() {
+ let mut b = B::zeroed();
+ // Note(Lokathor): We copy in terms of `u8` because that allows us to bypass
+ // any potential alignment difficulties.
+ let ap = &a as *const A as *const u8;
+ let bp = &mut b as *mut B as *mut u8;
+ unsafe { ap.copy_to_nonoverlapping(bp, size_of::<A>()) };
+ Ok(b)
+ } else {
+ Err(PodCastError::SizeMismatch)
+ }
+}
+
+/// Try to convert a `&T` into `&U`.
+///
+/// ## Failure
+///
+/// * If the reference isn't aligned in the new type
+/// * If the source type and target type aren't the same size.
+#[inline]
+pub fn try_cast_ref<A: Pod, B: Pod>(a: &A) -> Result<&B, PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && (a as *const A as usize) % align_of::<B>() != 0
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe { &*(a as *const A as *const B) })
+ } else {
+ Err(PodCastError::SizeMismatch)
+ }
+}
+
+/// Try to convert a `&mut T` into `&mut U`.
+///
+/// As [`try_cast_ref`], but `mut`.
+#[inline]
+pub fn try_cast_mut<A: Pod, B: Pod>(a: &mut A) -> Result<&mut B, PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && (a as *mut A as usize) % align_of::<B>() != 0
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe { &mut *(a as *mut A as *mut B) })
+ } else {
+ Err(PodCastError::SizeMismatch)
+ }
+}
+
+/// Try to convert `&[T]` into `&[U]` (possibly with a change in length).
+///
+/// * `input.as_ptr() as usize == output.as_ptr() as usize`
+/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
+///
+/// ## Failure
+///
+/// * If the target type has a greater alignment requirement and the input slice
+/// isn't aligned.
+/// * If the target element type is a different size from the current element
+/// type, and the output slice wouldn't be a whole number of elements when
+/// accounting for the size change (eg: 3 `u16` values is 1.5 `u32` values, so
+/// that's a failure).
+/// * Similarly, you can't convert between a
+/// [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
+/// and a non-ZST.
+#[inline]
+pub fn try_cast_slice<A: Pod, B: Pod>(a: &[A]) -> Result<&[B], PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && (a.as_ptr() as usize) % align_of::<B>() != 0
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, a.len()) })
+ } else if size_of::<A>() == 0 || size_of::<B>() == 0 {
+ Err(PodCastError::SizeMismatch)
+ } else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
+ let new_len = core::mem::size_of_val(a) / size_of::<B>();
+ Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) })
+ } else {
+ Err(PodCastError::OutputSliceWouldHaveSlop)
+ }
+}
+
+/// Try to convert `&mut [T]` into `&mut [U]` (possibly with a change in length).
+///
+/// As [`try_cast_slice`], but `&mut`.
+#[inline]
+pub fn try_cast_slice_mut<A: Pod, B: Pod>(
+ a: &mut [A],
+) -> Result<&mut [B], PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && (a.as_mut_ptr() as usize) % align_of::<B>() != 0
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe {
+ core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, a.len())
+ })
+ } else if size_of::<A>() == 0 || size_of::<B>() == 0 {
+ Err(PodCastError::SizeMismatch)
+ } else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
+ let new_len = core::mem::size_of_val(a) / size_of::<B>();
+ Ok(unsafe {
+ core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len)
+ })
+ } else {
+ Err(PodCastError::OutputSliceWouldHaveSlop)
+ }
+}
diff --git a/third_party/rust/bytemuck/src/offset_of.rs b/third_party/rust/bytemuck/src/offset_of.rs
new file mode 100644
index 0000000000..fa8572733b
--- /dev/null
+++ b/third_party/rust/bytemuck/src/offset_of.rs
@@ -0,0 +1,103 @@
+#![forbid(unsafe_code)]
+
+/// Find the offset in bytes of the given `$field` of `$Type`, using `$instance`
+/// as an already-initialized value to work with.
+///
+/// This is similar to the macro from `memoffset`, however it's fully well
+/// defined even in current versions of Rust (and uses no unsafe code).
+///
+/// It does by using the `$instance` argument to have an already-initialized
+/// instance of `$Type` rather than trying to find a way access the fields of an
+/// uninitialized one without hitting soundness problems. The value passed to
+/// the macro is referenced but not moved.
+///
+/// This means the API is more limited, but it's also sound even in rather
+/// extreme cases, like some of the examples.
+///
+/// ## Caveats
+///
+/// 1. The offset is in bytes, and so you will likely have to cast your base
+/// pointers to `*const u8`/`*mut u8` before getting field addresses.
+///
+/// 2. The offset values of repr(Rust) types are not stable, and may change
+/// wildly between releases of the compiler. Use repr(C) if you can.
+///
+/// 3. The value of the `$instance` parameter has no bearing on the output of
+/// this macro. It is just used to avoid soundness problems. The only
+/// requirement is that it be initialized. In particular, the value returned
+/// is not a field pointer, or anything like that.
+///
+/// ## Examples
+///
+/// ### Use with zeroable types
+/// A common requirement in GPU apis is to specify the layout of vertices. These
+/// will generally be [`Zeroable`] (if not [`Pod`]), and are a good fit for
+/// `offset_of!`.
+/// ```
+/// # use bytemuck::{Zeroable, offset_of};
+/// #[repr(C)]
+/// struct Vertex {
+/// pos: [f32; 2],
+/// uv: [u16; 2],
+/// color: [u8; 4],
+/// }
+/// unsafe impl Zeroable for Vertex {}
+///
+/// let pos = offset_of!(Zeroable::zeroed(), Vertex, pos);
+/// let uv = offset_of!(Zeroable::zeroed(), Vertex, uv);
+/// let color = offset_of!(Zeroable::zeroed(), Vertex, color);
+///
+/// assert_eq!(pos, 0);
+/// assert_eq!(uv, 8);
+/// assert_eq!(color, 12);
+/// ```
+///
+/// ### Use with other types
+///
+/// More esoteric uses are possible too, including with types generally not safe
+/// to otherwise use with bytemuck. `Strings`, `Vec`s, etc.
+///
+/// ```
+/// #[derive(Default)]
+/// struct Foo {
+/// a: u8,
+/// b: &'static str,
+/// c: i32,
+/// }
+///
+/// let a_offset = bytemuck::offset_of!(Default::default(), Foo, a);
+/// let b_offset = bytemuck::offset_of!(Default::default(), Foo, b);
+/// let c_offset = bytemuck::offset_of!(Default::default(), Foo, c);
+///
+/// assert_ne!(a_offset, b_offset);
+/// assert_ne!(b_offset, c_offset);
+/// // We can't check against hardcoded values for a repr(Rust) type,
+/// // but prove to ourself this way.
+///
+/// let foo = Foo::default();
+/// // Note: offsets are in bytes.
+/// let as_bytes = &foo as *const _ as *const u8;
+///
+/// // we're using wrapping_offset here becasue it's not worth
+/// // the unsafe block, but it would be valid to use `add` instead,
+/// // as it cannot overflow.
+/// assert_eq!(&foo.a as *const _ as usize, as_bytes.wrapping_add(a_offset) as usize);
+/// assert_eq!(&foo.b as *const _ as usize, as_bytes.wrapping_add(b_offset) as usize);
+/// assert_eq!(&foo.c as *const _ as usize, as_bytes.wrapping_add(c_offset) as usize);
+/// ```
+#[macro_export]
+macro_rules! offset_of {
+ ($instance:expr, $Type:path, $field:tt) => {{
+ // This helps us guard against field access going through a Deref impl.
+ #[allow(clippy::unneeded_field_pattern)]
+ let $Type { $field: _, .. };
+ let reference: &$Type = &$instance;
+ let address = reference as *const _ as usize;
+ let field_pointer = &reference.$field as *const _ as usize;
+ // These asserts/unwraps are compiled away at release, and defend against
+ // the case where somehow a deref impl is still invoked.
+ let result = field_pointer.checked_sub(address).unwrap();
+ assert!(result <= $crate::__core::mem::size_of::<$Type>());
+ result
+ }};
+}
diff --git a/third_party/rust/bytemuck/src/pod.rs b/third_party/rust/bytemuck/src/pod.rs
new file mode 100644
index 0000000000..e5cc6938ec
--- /dev/null
+++ b/third_party/rust/bytemuck/src/pod.rs
@@ -0,0 +1,99 @@
+use super::*;
+
+/// Marker trait for "plain old data".
+///
+/// The point of this trait is that once something is marked "plain old data"
+/// you can really go to town with the bit fiddling and bit casting. Therefore,
+/// it's a relatively strong claim to make about a type. Do not add this to your
+/// type casually.
+///
+/// **Reminder:** The results of casting around bytes between data types are
+/// _endian dependant_. Little-endian machines are the most common, but
+/// big-endian machines do exist (and big-endian is also used for "network
+/// order" bytes).
+///
+/// ## Safety
+///
+/// * The type must be inhabited (eg: no
+/// [Infallible](core::convert::Infallible)).
+/// * The type must allow any bit pattern (eg: no `bool` or `char`, which have
+/// illegal bit patterns).
+/// * The type must not contain any padding bytes, either in the middle or on
+/// the end (eg: no `#[repr(C)] struct Foo(u8, u16)`, which has padding in the
+/// middle, and also no `#[repr(C)] struct Foo(u16, u8)`, which has padding on
+/// the end).
+/// * The type needs to have all fields also be `Pod`.
+/// * The type needs to be `repr(C)` or `repr(transparent)`. In the case of
+/// `repr(C)`, the `packed` and `align` repr modifiers can be used as long as
+/// all other rules end up being followed.
+pub unsafe trait Pod: Zeroable + Copy + 'static {}
+
+unsafe impl Pod for () {}
+unsafe impl Pod for u8 {}
+unsafe impl Pod for i8 {}
+unsafe impl Pod for u16 {}
+unsafe impl Pod for i16 {}
+unsafe impl Pod for u32 {}
+unsafe impl Pod for i32 {}
+unsafe impl Pod for u64 {}
+unsafe impl Pod for i64 {}
+unsafe impl Pod for usize {}
+unsafe impl Pod for isize {}
+unsafe impl Pod for u128 {}
+unsafe impl Pod for i128 {}
+unsafe impl Pod for f32 {}
+unsafe impl Pod for f64 {}
+unsafe impl<T: Pod> Pod for Wrapping<T> {}
+
+unsafe impl Pod for Option<NonZeroI8> {}
+unsafe impl Pod for Option<NonZeroI16> {}
+unsafe impl Pod for Option<NonZeroI32> {}
+unsafe impl Pod for Option<NonZeroI64> {}
+unsafe impl Pod for Option<NonZeroI128> {}
+unsafe impl Pod for Option<NonZeroIsize> {}
+unsafe impl Pod for Option<NonZeroU8> {}
+unsafe impl Pod for Option<NonZeroU16> {}
+unsafe impl Pod for Option<NonZeroU32> {}
+unsafe impl Pod for Option<NonZeroU64> {}
+unsafe impl Pod for Option<NonZeroU128> {}
+unsafe impl Pod for Option<NonZeroUsize> {}
+
+unsafe impl<T: 'static> Pod for *mut T {}
+unsafe impl<T: 'static> Pod for *const T {}
+unsafe impl<T: 'static> Pod for Option<NonNull<T>> {}
+unsafe impl<T: Pod> Pod for PhantomData<T> {}
+unsafe impl<T: Pod> Pod for ManuallyDrop<T> {}
+
+// Note(Lokathor): MaybeUninit can NEVER be Pod.
+
+impl_unsafe_marker_for_array!(
+ Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
+ 512, 1024, 2048, 4096
+);
+
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m128i {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m128 {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m128d {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m256i {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m256 {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m256d {}
+
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m128i {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m128 {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m128d {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m256i {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m256 {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m256d {}
diff --git a/third_party/rust/bytemuck/src/transparent.rs b/third_party/rust/bytemuck/src/transparent.rs
new file mode 100644
index 0000000000..b77a8706fb
--- /dev/null
+++ b/third_party/rust/bytemuck/src/transparent.rs
@@ -0,0 +1,133 @@
+use super::*;
+
+/// A trait which indicates that a type is a `repr(transparent)` wrapper around
+/// the `Wrapped` value.
+///
+/// This allows safely creating references to `T` from those to the `Wrapped`
+/// type, using the `wrap_ref` and `wrap_mut` functions.
+///
+/// # Safety
+///
+/// The safety contract of `TransparentWrapper` is relatively simple:
+///
+/// For a given `Wrapper` which implements `TransparentWrapper<Wrapped>`:
+///
+/// 1. Wrapper must be a `#[repr(transparent)]` wrapper around `Wrapped`. This
+/// either means that it must be a `#[repr(transparent)]` struct which
+/// contains a either a field of type `Wrapped` (or a field of some other
+/// transparent wrapper for `Wrapped`) as the only non-ZST field.
+///
+/// 2. Any fields *other* than the `Wrapped` field must be trivially
+/// constructable ZSTs, for example `PhantomData`, `PhantomPinned`, etc.
+///
+/// 3. The `Wrapper` may not impose additional alignment requirements over
+/// `Wrapped`.
+/// - Note: this is currently guaranteed by repr(transparent), but there
+/// have been discussions of lifting it, so it's stated here explictly.
+///
+/// 4. The `wrap_ref` and `wrap_mut` functions on `TransparentWrapper` may not
+/// be overridden.
+///
+/// ## Caveats
+///
+/// If the wrapper imposes additional constraints upon the wrapped type which
+/// are required for safety, it's responsible for ensuring those still hold --
+/// this generally requires preventing access to instances of the wrapped type,
+/// as implementing `TransparentWrapper<U> for T` means anybody can call
+/// `T::cast_ref(any_instance_of_u)`.
+///
+/// For example, it would be invalid to implement TransparentWrapper for `str`
+/// to implement `TransparentWrapper` around `[u8]` because of this.
+///
+/// # Examples
+///
+/// ## Basic
+///
+/// ```
+/// use bytemuck::TransparentWrapper;
+/// # #[derive(Default)]
+/// # struct SomeStruct(u32);
+///
+/// #[repr(transparent)]
+/// struct MyWrapper(SomeStruct);
+///
+/// unsafe impl TransparentWrapper<SomeStruct> for MyWrapper {}
+///
+/// // interpret a reference to &SomeStruct as a &MyWrapper
+/// let thing = SomeStruct::default();
+/// let wrapped_ref: &MyWrapper = MyWrapper::wrap_ref(&thing);
+///
+/// // Works with &mut too.
+/// let mut mut_thing = SomeStruct::default();
+/// let wrapped_mut: &mut MyWrapper = MyWrapper::wrap_mut(&mut mut_thing);
+///
+/// # let _ = (wrapped_ref, wrapped_mut); // silence warnings
+/// ```
+///
+/// ## Use with dynamically sized types
+///
+/// ```
+/// use bytemuck::TransparentWrapper;
+///
+/// #[repr(transparent)]
+/// struct Slice<T>([T]);
+///
+/// unsafe impl<T> TransparentWrapper<[T]> for Slice<T> {}
+///
+/// let s = Slice::wrap_ref(&[1u32, 2, 3]);
+/// assert_eq!(&s.0, &[1, 2, 3]);
+///
+/// let mut buf = [1, 2, 3u8];
+/// let sm = Slice::wrap_mut(&mut buf);
+/// ```
+pub unsafe trait TransparentWrapper<Wrapped: ?Sized> {
+ /// Convert a reference to a wrapped type into a reference to the wrapper.
+ ///
+ /// This is a trait method so that you can write `MyType::wrap_ref(...)` in
+ /// your code. It is part of the safety contract for this trait that if you
+ /// implement `TransparentWrapper<_>` for your type you **must not** override
+ /// this method.
+ #[inline]
+ fn wrap_ref(s: &Wrapped) -> &Self {
+ unsafe {
+ assert!(size_of::<*const Wrapped>() == size_of::<*const Self>());
+ // Using a pointer cast doesn't work here because rustc can't tell that the
+ // vtables match (if we lifted the ?Sized restriction, this would go away),
+ // and transmute doesn't work for the usual reasons it doesn't work inside
+ // generic functions.
+ //
+ // SAFETY: The unsafe contract requires that these have identical
+ // representations. Using this transmute_copy instead of transmute here is
+ // annoying, but is required as `Self` and `Wrapped` have unspecified
+ // sizes still.
+ let wrapped_ptr = s as *const Wrapped;
+ let wrapper_ptr: *const Self = transmute_copy(&wrapped_ptr);
+ &*wrapper_ptr
+ }
+ }
+
+ /// Convert a mut reference to a wrapped type into a mut reference to the
+ /// wrapper.
+ ///
+ /// This is a trait method so that you can write `MyType::wrap_mut(...)` in
+ /// your code. It is part of the safety contract for this trait that if you implement
+ /// `TransparentWrapper<_>` for your type you **must not** override this method.
+ #[inline]
+ fn wrap_mut(s: &mut Wrapped) -> &mut Self {
+ unsafe {
+ assert!(size_of::<*mut Wrapped>() == size_of::<*mut Self>());
+ // Using a pointer cast doesn't work here because rustc can't tell that the
+ // vtables match (if we lifted the ?Sized restriction, this would go away),
+ // and transmute doesn't work for the usual reasons it doesn't work inside
+ // generic functions.
+ //
+ // SAFETY: The unsafe contract requires that these have identical
+ // representations. Using this transmute_copy instead of transmute here is
+ // annoying, but is required as `Self` and `Wrapped` have unspecified
+ // sizes still.
+ let wrapped_ptr = s as *mut Wrapped;
+ let wrapper_ptr: *mut Self = transmute_copy(&wrapped_ptr);
+ &mut *wrapper_ptr
+ }
+ }
+}
diff --git a/third_party/rust/bytemuck/src/zeroable.rs b/third_party/rust/bytemuck/src/zeroable.rs
new file mode 100644
index 0000000000..fb9620431e
--- /dev/null
+++ b/third_party/rust/bytemuck/src/zeroable.rs
@@ -0,0 +1,142 @@
+use super::*;
+
+/// Trait for types that can be safely created with
+/// [`zeroed`](core::mem::zeroed).
+///
+/// An all-zeroes value may or may not be the same value as the
+/// [Default](core::default::Default) value of the type.
+///
+/// ## Safety
+///
+/// * Your type must be inhabited (eg: no
+/// [Infallible](core::convert::Infallible)).
+/// * Your type must be allowed to be an "all zeroes" bit pattern (eg: no
+/// [`NonNull<T>`](core::ptr::NonNull)).
+pub unsafe trait Zeroable: Sized {
+ /// Calls [`zeroed`](core::mem::zeroed).
+ ///
+ /// This is a trait method so that you can write `MyType::zeroed()` in your
+ /// code. It is a contract of this trait that if you implement it on your type
+ /// you **must not** override this method.
+ #[inline]
+ fn zeroed() -> Self {
+ unsafe { core::mem::zeroed() }
+ }
+}
+unsafe impl Zeroable for () {}
+unsafe impl Zeroable for bool {}
+unsafe impl Zeroable for char {}
+unsafe impl Zeroable for u8 {}
+unsafe impl Zeroable for i8 {}
+unsafe impl Zeroable for u16 {}
+unsafe impl Zeroable for i16 {}
+unsafe impl Zeroable for u32 {}
+unsafe impl Zeroable for i32 {}
+unsafe impl Zeroable for u64 {}
+unsafe impl Zeroable for i64 {}
+unsafe impl Zeroable for usize {}
+unsafe impl Zeroable for isize {}
+unsafe impl Zeroable for u128 {}
+unsafe impl Zeroable for i128 {}
+unsafe impl Zeroable for f32 {}
+unsafe impl Zeroable for f64 {}
+unsafe impl<T: Zeroable> Zeroable for Wrapping<T> {}
+
+unsafe impl Zeroable for Option<NonZeroI8> {}
+unsafe impl Zeroable for Option<NonZeroI16> {}
+unsafe impl Zeroable for Option<NonZeroI32> {}
+unsafe impl Zeroable for Option<NonZeroI64> {}
+unsafe impl Zeroable for Option<NonZeroI128> {}
+unsafe impl Zeroable for Option<NonZeroIsize> {}
+unsafe impl Zeroable for Option<NonZeroU8> {}
+unsafe impl Zeroable for Option<NonZeroU16> {}
+unsafe impl Zeroable for Option<NonZeroU32> {}
+unsafe impl Zeroable for Option<NonZeroU64> {}
+unsafe impl Zeroable for Option<NonZeroU128> {}
+unsafe impl Zeroable for Option<NonZeroUsize> {}
+
+unsafe impl<T> Zeroable for *mut T {}
+unsafe impl<T> Zeroable for *const T {}
+unsafe impl<T> Zeroable for Option<NonNull<T>> {}
+unsafe impl<T: Zeroable> Zeroable for PhantomData<T> {}
+unsafe impl<T: Zeroable> Zeroable for ManuallyDrop<T> {}
+
+// 2.0: add MaybeUninit
+//unsafe impl<T> Zeroable for MaybeUninit<T> {}
+
+unsafe impl<A: Zeroable> Zeroable for (A,) {}
+unsafe impl<A: Zeroable, B: Zeroable> Zeroable for (A, B) {}
+unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable> Zeroable for (A, B, C) {}
+unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable> Zeroable
+ for (A, B, C, D)
+{
+}
+unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable>
+ Zeroable for (A, B, C, D, E)
+{
+}
+unsafe impl<
+ A: Zeroable,
+ B: Zeroable,
+ C: Zeroable,
+ D: Zeroable,
+ E: Zeroable,
+ F: Zeroable,
+ > Zeroable for (A, B, C, D, E, F)
+{
+}
+unsafe impl<
+ A: Zeroable,
+ B: Zeroable,
+ C: Zeroable,
+ D: Zeroable,
+ E: Zeroable,
+ F: Zeroable,
+ G: Zeroable,
+ > Zeroable for (A, B, C, D, E, F, G)
+{
+}
+unsafe impl<
+ A: Zeroable,
+ B: Zeroable,
+ C: Zeroable,
+ D: Zeroable,
+ E: Zeroable,
+ F: Zeroable,
+ G: Zeroable,
+ H: Zeroable,
+ > Zeroable for (A, B, C, D, E, F, G, H)
+{
+}
+
+impl_unsafe_marker_for_array!(
+ Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
+ 512, 1024, 2048, 4096
+);
+
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m128i {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m128 {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m128d {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m256i {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m256 {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m256d {}
+
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m128i {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m128 {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m128d {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m256i {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m256 {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m256d {}
diff --git a/third_party/rust/bytemuck/tests/cast_slice_tests.rs b/third_party/rust/bytemuck/tests/cast_slice_tests.rs
new file mode 100644
index 0000000000..1177a7fb0b
--- /dev/null
+++ b/third_party/rust/bytemuck/tests/cast_slice_tests.rs
@@ -0,0 +1,90 @@
+use core::mem::size_of;
+
+use bytemuck::*;
+
+#[test]
+fn test_try_cast_slice() {
+ // some align4 data
+ let u32_slice: &[u32] = &[4, 5, 6];
+ // the same data as align1
+ let the_bytes: &[u8] = try_cast_slice(u32_slice).unwrap();
+
+ assert_eq!(
+ u32_slice.as_ptr() as *const u32 as usize,
+ the_bytes.as_ptr() as *const u8 as usize
+ );
+ assert_eq!(
+ u32_slice.len() * size_of::<u32>(),
+ the_bytes.len() * size_of::<u8>()
+ );
+
+ // by taking one byte off the front, we're definitely mis-aligned for u32.
+ let mis_aligned_bytes = &the_bytes[1..];
+ assert_eq!(
+ try_cast_slice::<u8, u32>(mis_aligned_bytes),
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ );
+
+ // by taking one byte off the end, we're aligned but would have slop bytes for u32
+ let the_bytes_len_minus1 = the_bytes.len() - 1;
+ let slop_bytes = &the_bytes[..the_bytes_len_minus1];
+ assert_eq!(
+ try_cast_slice::<u8, u32>(slop_bytes),
+ Err(PodCastError::OutputSliceWouldHaveSlop)
+ );
+
+ // if we don't mess with it we can up-alignment cast
+ try_cast_slice::<u8, u32>(the_bytes).unwrap();
+}
+
+#[test]
+fn test_try_cast_slice_mut() {
+ // some align4 data
+ let u32_slice: &mut [u32] = &mut [4, 5, 6];
+ let u32_len = u32_slice.len();
+ let u32_ptr = u32_slice.as_ptr();
+
+ // the same data as align1
+ let the_bytes: &mut [u8] = try_cast_slice_mut(u32_slice).unwrap();
+ let the_bytes_len = the_bytes.len();
+ let the_bytes_ptr = the_bytes.as_ptr();
+
+ assert_eq!(
+ u32_ptr as *const u32 as usize,
+ the_bytes_ptr as *const u8 as usize
+ );
+ assert_eq!(u32_len * size_of::<u32>(), the_bytes_len * size_of::<u8>());
+
+ // by taking one byte off the front, we're definitely mis-aligned for u32.
+ let mis_aligned_bytes = &mut the_bytes[1..];
+ assert_eq!(
+ try_cast_slice_mut::<u8, u32>(mis_aligned_bytes),
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ );
+
+ // by taking one byte off the end, we're aligned but would have slop bytes for u32
+ let the_bytes_len_minus1 = the_bytes.len() - 1;
+ let slop_bytes = &mut the_bytes[..the_bytes_len_minus1];
+ assert_eq!(
+ try_cast_slice_mut::<u8, u32>(slop_bytes),
+ Err(PodCastError::OutputSliceWouldHaveSlop)
+ );
+
+ // if we don't mess with it we can up-alignment cast
+ try_cast_slice_mut::<u8, u32>(the_bytes).unwrap();
+}
+
+#[test]
+fn test_types() {
+ let _: i32 = cast(1.0_f32);
+ let _: &mut i32 = cast_mut(&mut 1.0_f32);
+ let _: &i32 = cast_ref(&1.0_f32);
+ let _: &[i32] = cast_slice(&[1.0_f32]);
+ let _: &mut [i32] = cast_slice_mut(&mut [1.0_f32]);
+ //
+ let _: Result<i32, PodCastError> = try_cast(1.0_f32);
+ let _: Result<&mut i32, PodCastError> = try_cast_mut(&mut 1.0_f32);
+ let _: Result<&i32, PodCastError> = try_cast_ref(&1.0_f32);
+ let _: Result<&[i32], PodCastError> = try_cast_slice(&[1.0_f32]);
+ let _: Result<&mut [i32], PodCastError> = try_cast_slice_mut(&mut [1.0_f32]);
+}
diff --git a/third_party/rust/bytemuck/tests/doc_tests.rs b/third_party/rust/bytemuck/tests/doc_tests.rs
new file mode 100644
index 0000000000..e5a80db44f
--- /dev/null
+++ b/third_party/rust/bytemuck/tests/doc_tests.rs
@@ -0,0 +1,121 @@
+//! Cargo miri doesn't run doctests yet, so we duplicate these here. It's
+//! probably not that important to sweat keeping these perfectly up to date, but
+//! we should try to catch the cases where the primary tests are doctests.
+use bytemuck::*;
+
+// Miri doesn't run on doctests, so... copypaste to the rescue.
+#[test]
+fn test_transparent_slice() {
+ #[repr(transparent)]
+ struct Slice<T>([T]);
+
+ unsafe impl<T> TransparentWrapper<[T]> for Slice<T> {}
+
+ let s = Slice::wrap_ref(&[1u32, 2, 3]);
+ assert_eq!(&s.0, &[1, 2, 3]);
+
+ let mut buf = [1, 2, 3u8];
+ let _sm = Slice::wrap_mut(&mut buf);
+}
+
+#[test]
+fn test_transparent_basic() {
+ #[derive(Default)]
+ struct SomeStruct(u32);
+
+ #[repr(transparent)]
+ struct MyWrapper(SomeStruct);
+
+ unsafe impl TransparentWrapper<SomeStruct> for MyWrapper {}
+
+ // interpret a reference to &SomeStruct as a &MyWrapper
+ let thing = SomeStruct::default();
+ let wrapped_ref: &MyWrapper = MyWrapper::wrap_ref(&thing);
+
+ // Works with &mut too.
+ let mut mut_thing = SomeStruct::default();
+ let wrapped_mut: &mut MyWrapper = MyWrapper::wrap_mut(&mut mut_thing);
+ let _ = (wrapped_ref, wrapped_mut);
+}
+
+// Work around miri not running doctests
+#[test]
+fn test_contiguous_doc() {
+ #[repr(u8)]
+ #[derive(Debug, Copy, Clone, PartialEq)]
+ enum Foo {
+ A = 0,
+ B = 1,
+ C = 2,
+ D = 3,
+ E = 4,
+ }
+ unsafe impl Contiguous for Foo {
+ type Int = u8;
+ const MIN_VALUE: u8 = Foo::A as u8;
+ const MAX_VALUE: u8 = Foo::E as u8;
+ }
+
+ assert_eq!(Foo::from_integer(3).unwrap(), Foo::D);
+ assert_eq!(Foo::from_integer(8), None);
+ assert_eq!(Foo::C.into_integer(), 2);
+ assert_eq!(Foo::B.into_integer(), Foo::B as u8);
+}
+
+#[test]
+fn test_offsetof_vertex() {
+ #[repr(C)]
+ struct Vertex {
+ pos: [f32; 2],
+ uv: [u16; 2],
+ color: [u8; 4],
+ }
+ unsafe impl Zeroable for Vertex {}
+
+ let pos = offset_of!(Zeroable::zeroed(), Vertex, pos);
+ let uv = offset_of!(Zeroable::zeroed(), Vertex, uv);
+ let color = offset_of!(Zeroable::zeroed(), Vertex, color);
+
+ assert_eq!(pos, 0);
+ assert_eq!(uv, 8);
+ assert_eq!(color, 12);
+}
+
+#[test]
+fn test_offsetof_nonpod() {
+ #[derive(Default)]
+ struct Foo {
+ a: u8,
+ b: &'static str,
+ c: i32,
+ }
+
+ let a_offset = offset_of!(Default::default(), Foo, a);
+ let b_offset = offset_of!(Default::default(), Foo, b);
+ let c_offset = offset_of!(Default::default(), Foo, c);
+
+ assert_ne!(a_offset, b_offset);
+ assert_ne!(b_offset, c_offset);
+ // We can't check against hardcoded values for a repr(Rust) type,
+ // but prove to ourself this way.
+
+ let foo = Foo::default();
+ // Note: offsets are in bytes.
+ let as_bytes = &foo as *const _ as *const u8;
+
+ // we're using wrapping_offset here becasue it's not worth
+ // the unsafe block, but it would be valid to use `add` instead,
+ // as it cannot overflow.
+ assert_eq!(
+ &foo.a as *const _ as usize,
+ as_bytes.wrapping_add(a_offset) as usize
+ );
+ assert_eq!(
+ &foo.b as *const _ as usize,
+ as_bytes.wrapping_add(b_offset) as usize
+ );
+ assert_eq!(
+ &foo.c as *const _ as usize,
+ as_bytes.wrapping_add(c_offset) as usize
+ );
+}
diff --git a/third_party/rust/bytemuck/tests/std_tests.rs b/third_party/rust/bytemuck/tests/std_tests.rs
new file mode 100644
index 0000000000..0e0fb48710
--- /dev/null
+++ b/third_party/rust/bytemuck/tests/std_tests.rs
@@ -0,0 +1,29 @@
+//! The integration tests seem to always have `std` linked, so things that would
+//! depend on that can go here.
+
+use bytemuck::*;
+
+#[test]
+fn test_transparent_vtabled() {
+ use core::fmt::Display;
+
+ #[repr(transparent)]
+ struct DisplayTraitObj(dyn Display);
+
+ unsafe impl TransparentWrapper<dyn Display> for DisplayTraitObj {}
+
+ impl Display for DisplayTraitObj {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ self.0.fmt(f)
+ }
+ }
+
+ let v = DisplayTraitObj::wrap_ref(&5i32);
+ let s = format!("{}", v);
+ assert_eq!(s, "5");
+
+ let mut x = 100i32;
+ let v_mut = DisplayTraitObj::wrap_mut(&mut x);
+ let s = format!("{}", v_mut);
+ assert_eq!(s, "100");
+}