summaryrefslogtreecommitdiffstats
path: root/third_party/rust/bytemuck/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/bytemuck/src
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/bytemuck/src')
-rw-r--r--third_party/rust/bytemuck/src/allocation.rs119
-rw-r--r--third_party/rust/bytemuck/src/contiguous.rs203
-rw-r--r--third_party/rust/bytemuck/src/lib.rs433
-rw-r--r--third_party/rust/bytemuck/src/offset_of.rs103
-rw-r--r--third_party/rust/bytemuck/src/pod.rs99
-rw-r--r--third_party/rust/bytemuck/src/transparent.rs133
-rw-r--r--third_party/rust/bytemuck/src/zeroable.rs142
7 files changed, 1232 insertions, 0 deletions
diff --git a/third_party/rust/bytemuck/src/allocation.rs b/third_party/rust/bytemuck/src/allocation.rs
new file mode 100644
index 0000000000..0676f3f4bf
--- /dev/null
+++ b/third_party/rust/bytemuck/src/allocation.rs
@@ -0,0 +1,119 @@
+//! Stuff to boost things in the `alloc` crate.
+//!
+//! * You must enable the `extern_crate_alloc` feature of `bytemuck` or you will
+//! not be able to use this module!
+
+use super::*;
+use alloc::{
+ alloc::{alloc_zeroed, Layout},
+ boxed::Box,
+ vec::Vec,
+};
+
+/// As [`try_cast_box`](try_cast_box), but unwraps for you.
+#[inline]
+pub fn cast_box<A: Pod, B: Pod>(input: Box<A>) -> Box<B> {
+ try_cast_box(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a [`Box`](alloc::boxed::Box).
+///
+/// On failure you get back an error along with the starting `Box`.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Box` must have the exact same
+/// alignment.
+/// * The start and end size of the `Box` must have the exact same size.
+#[inline]
+pub fn try_cast_box<A: Pod, B: Pod>(
+ input: Box<A>,
+) -> Result<Box<B>, (PodCastError, Box<A>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Note(Lokathor): This is much simpler than with the Vec casting!
+ let ptr: *mut B = Box::into_raw(input) as *mut B;
+ Ok(unsafe { Box::from_raw(ptr) })
+ }
+}
+
+/// Allocates a `Box<T>` with all of the contents being zeroed out.
+///
+/// This uses the global allocator to create a zeroed allocation and _then_
+/// turns it into a Box. In other words, it's 100% assured that the zeroed data
+/// won't be put temporarily on the stack. You can make a box of any size
+/// without fear of a stack overflow.
+///
+/// ## Failure
+///
+/// This fails if the allocation fails.
+#[inline]
+pub fn try_zeroed_box<T: Zeroable>() -> Result<Box<T>, ()> {
+ if size_of::<T>() == 0 {
+ return Ok(Box::new(T::zeroed()));
+ }
+ let layout =
+ Layout::from_size_align(size_of::<T>(), align_of::<T>()).unwrap();
+ let ptr = unsafe { alloc_zeroed(layout) };
+ if ptr.is_null() {
+ // we don't know what the error is because `alloc_zeroed` is a dumb API
+ Err(())
+ } else {
+ Ok(unsafe { Box::<T>::from_raw(ptr as *mut T) })
+ }
+}
+
+/// As [`try_zeroed_box`], but unwraps for you.
+#[inline]
+pub fn zeroed_box<T: Zeroable>() -> Box<T> {
+ try_zeroed_box().unwrap()
+}
+
+/// As [`try_cast_vec`](try_cast_vec), but unwraps for you.
+#[inline]
+pub fn cast_vec<A: Pod, B: Pod>(input: Vec<A>) -> Vec<B> {
+ try_cast_vec(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a [`Vec`](alloc::vec::Vec).
+///
+/// On failure you get back an error along with the starting `Vec`.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Vec` must have the exact same
+/// alignment.
+/// * The start and end size of the `Vec` must have the exact same size.
+/// * In the future this second restriction might be lessened by having the
+/// capacity and length get adjusted during transmutation, but for now it's
+/// absolute.
+#[inline]
+pub fn try_cast_vec<A: Pod, B: Pod>(
+ input: Vec<A>,
+) -> Result<Vec<B>, (PodCastError, Vec<A>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ // Note(Lokathor): Under some conditions it would be possible to cast
+ // between Vec content types of the same alignment but different sizes by
+ // changing the capacity and len values in the output Vec. However, we will
+ // not attempt that for now.
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Note(Lokathor): First we record the length and capacity, which don't have
+ // any secret provenance metadata.
+ let length: usize = input.len();
+ let capacity: usize = input.capacity();
+ // Note(Lokathor): Next we "pre-forget" the old Vec by wrapping with
+ // ManuallyDrop, because if we used `core::mem::forget` after taking the
+ // pointer then that would invalidate our pointer. In nightly there's a
+ // "into raw parts" method, which we can switch this too eventually.
+ let mut manual_drop_vec = ManuallyDrop::new(input);
+ let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr();
+ let ptr: *mut B = vec_ptr as *mut B;
+ Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) })
+ }
+}
diff --git a/third_party/rust/bytemuck/src/contiguous.rs b/third_party/rust/bytemuck/src/contiguous.rs
new file mode 100644
index 0000000000..30709a7cb3
--- /dev/null
+++ b/third_party/rust/bytemuck/src/contiguous.rs
@@ -0,0 +1,203 @@
+use super::*;
+use core::mem::{size_of, transmute_copy};
+
+/// A trait indicating that:
+///
+/// 1. A type has an equivalent representation to some known integral type.
+/// 2. All instances of this type fall in a fixed range of values.
+/// 3. Within that range, there are no gaps.
+///
+/// This is generally useful for fieldless enums (aka "c-style" enums), however
+/// it's important that it only be used for those with an explicit `#[repr]`, as
+/// `#[repr(Rust)]` fieldess enums have an unspecified layout.
+///
+/// Additionally, you shouldn't assume that all implementations are enums. Any
+/// type which meets the requirements above while following the rules under
+/// "Safety" below is valid.
+///
+/// # Example
+///
+/// ```
+/// # use bytemuck::Contiguous;
+/// #[repr(u8)]
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// enum Foo {
+/// A = 0,
+/// B = 1,
+/// C = 2,
+/// D = 3,
+/// E = 4,
+/// }
+/// unsafe impl Contiguous for Foo {
+/// type Int = u8;
+/// const MIN_VALUE: u8 = Foo::A as u8;
+/// const MAX_VALUE: u8 = Foo::E as u8;
+/// }
+/// assert_eq!(Foo::from_integer(3).unwrap(), Foo::D);
+/// assert_eq!(Foo::from_integer(8), None);
+/// assert_eq!(Foo::C.into_integer(), 2);
+/// ```
+/// # Safety
+///
+/// This is an unsafe trait, and incorrectly implementing it is undefined
+/// behavior.
+///
+/// Informally, by implementing it, you're asserting that `C` is identical to
+/// the integral type `C::Int`, and that every `C` falls between `C::MIN_VALUE`
+/// and `C::MAX_VALUE` exactly once, without any gaps.
+///
+/// Precisely, the guarantees you must uphold when implementing `Contiguous` for
+/// some type `C` are:
+///
+/// 1. The size of `C` and `C::Int` must be the same, and neither may be a ZST.
+/// (Note: alignment is explicitly allowed to differ)
+///
+/// 2. `C::Int` must be a primitive integer, and not a wrapper type. In the
+/// future, this may be lifted to include cases where the behavior is
+/// identical for a relevant set of traits (Ord, arithmetic, ...).
+///
+/// 3. All `C::Int`s which are in the *inclusive* range between `C::MIN_VALUE`
+/// and `C::MAX_VALUE` are bitwise identical to unique valid instances of
+/// `C`.
+///
+/// 4. There exist no instances of `C` such that their bitpatterns, when
+/// interpreted as instances of `C::Int`, fall outside of the `MAX_VALUE` /
+/// `MIN_VALUE` range -- It is legal for unsafe code to assume that if it
+/// gets a `C` that implements `Contiguous`, it is in the appropriate range.
+///
+/// 5. Finally, you promise not to provide overridden implementations of
+/// `Contiguous::from_integer` and `Contiguous::into_integer`.
+///
+/// For clarity, the following rules could be derived from the above, but are
+/// listed explicitly:
+///
+/// - `C::MAX_VALUE` must be greater or equal to `C::MIN_VALUE` (therefore, `C`
+/// must be an inhabited type).
+///
+/// - There exist no two values between `MIN_VALUE` and `MAX_VALUE` such that
+/// when interpreted as a `C` they are considered identical (by, say, match).
+pub unsafe trait Contiguous: Copy + 'static {
+ /// The primitive integer type with an identical representation to this
+ /// type.
+ ///
+ /// Contiguous is broadly intended for use with fieldless enums, and for
+ /// these the correct integer type is easy: The enum should have a
+ /// `#[repr(Int)]` or `#[repr(C)]` attribute, (if it does not, it is
+ /// *unsound* to implement `Contiguous`!).
+ ///
+ /// - For `#[repr(Int)]`, use the listed `Int`. e.g. `#[repr(u8)]` should
+ /// use `type Int = u8`.
+ ///
+ /// - For `#[repr(C)]`, use whichever type the C compiler will use to
+ /// represent the given enum. This is usually `c_int` (from `std::os::raw`
+ /// or `libc`), but it's up to you to make the determination as the
+ /// implementer of the unsafe trait.
+ ///
+ /// For precise rules, see the list under "Safety" above.
+ type Int: Copy + Ord;
+
+ /// The upper *inclusive* bound for valid instances of this type.
+ const MAX_VALUE: Self::Int;
+
+ /// The lower *inclusive* bound for valid instances of this type.
+ const MIN_VALUE: Self::Int;
+
+ /// If `value` is within the range for valid instances of this type,
+ /// returns `Some(converted_value)`, otherwise, returns `None`.
+ ///
+ /// This is a trait method so that you can write `value.into_integer()` in
+ /// your code. It is a contract of this trait that if you implement
+ /// `Contiguous` on your type you **must not** override this method.
+ ///
+ /// # Panics
+ ///
+ /// We will not panic for any correct implementation of `Contiguous`, but
+ /// *may* panic if we detect an incorrect one.
+ ///
+ /// This is undefined behavior regardless, so it could have been the nasal
+ /// demons at that point anyway ;).
+ #[inline]
+ fn from_integer(value: Self::Int) -> Option<Self> {
+ // Guard against an illegal implementation of Contiguous. Annoyingly we
+ // can't rely on `transmute` to do this for us (see below), but
+ // whatever, this gets compiled into nothing in release.
+ assert!(size_of::<Self>() == size_of::<Self::Int>());
+ if Self::MIN_VALUE <= value && value <= Self::MAX_VALUE {
+ // SAFETY: We've checked their bounds (and their size, even though
+ // they've sworn under the Oath Of Unsafe Rust that that already
+ // matched) so this is allowed by `Contiguous`'s unsafe contract.
+ //
+ // So, the `transmute_copy`. ideally we'd use transmute here, which
+ // is more obviously safe. Sadly, we can't, as these types still
+ // have unspecified sizes.
+ Some(unsafe { transmute_copy::<Self::Int, Self>(&value) })
+ } else {
+ None
+ }
+ }
+
+ /// Perform the conversion from `C` into the underlying integral type. This
+ /// mostly exists otherwise generic code would need unsafe for the `value as
+ /// integer`
+ ///
+ /// This is a trait method so that you can write `value.into_integer()` in
+ /// your code. It is a contract of this trait that if you implement
+ /// `Contiguous` on your type you **must not** override this method.
+ ///
+ /// # Panics
+ ///
+ /// We will not panic for any correct implementation of `Contiguous`, but
+ /// *may* panic if we detect an incorrect one.
+ ///
+ /// This is undefined behavior regardless, so it could have been the nasal
+ /// demons at that point anyway ;).
+ #[inline]
+ fn into_integer(self) -> Self::Int {
+ // Guard against an illegal implementation of Contiguous. Annoyingly we
+ // can't rely on `transmute` to do the size check for us (see
+ // `from_integer's comment`), but whatever, this gets compiled into
+ // nothing in release. Note that we don't check the result of cast
+ assert!(size_of::<Self>() == size_of::<Self::Int>());
+
+ // SAFETY: The unsafe contract requires that these have identical
+ // representations, and that the range be entirely valid. Using
+ // transmute_copy instead of transmute here is annoying, but is required
+ // as `Self` and `Self::Int` have unspecified sizes still.
+ unsafe { transmute_copy::<Self, Self::Int>(&self) }
+ }
+}
+
+macro_rules! impl_contiguous {
+ ($($src:ty as $repr:ident in [$min:expr, $max:expr];)*) => {$(
+ unsafe impl Contiguous for $src {
+ type Int = $repr;
+ const MAX_VALUE: $repr = $max;
+ const MIN_VALUE: $repr = $min;
+ }
+ )*};
+}
+
+impl_contiguous! {
+ bool as u8 in [0, 1];
+
+ u8 as u8 in [0, u8::max_value()];
+ u16 as u16 in [0, u16::max_value()];
+ u32 as u32 in [0, u32::max_value()];
+ u64 as u64 in [0, u64::max_value()];
+ u128 as u128 in [0, u128::max_value()];
+ usize as usize in [0, usize::max_value()];
+
+ i8 as i8 in [i8::min_value(), i8::max_value()];
+ i16 as i16 in [i16::min_value(), i16::max_value()];
+ i32 as i32 in [i32::min_value(), i32::max_value()];
+ i64 as i64 in [i64::min_value(), i64::max_value()];
+ i128 as i128 in [i128::min_value(), i128::max_value()];
+ isize as isize in [isize::min_value(), isize::max_value()];
+
+ NonZeroU8 as u8 in [1, u8::max_value()];
+ NonZeroU16 as u16 in [1, u16::max_value()];
+ NonZeroU32 as u32 in [1, u32::max_value()];
+ NonZeroU64 as u64 in [1, u64::max_value()];
+ NonZeroU128 as u128 in [1, u128::max_value()];
+ NonZeroUsize as usize in [1, usize::max_value()];
+}
diff --git a/third_party/rust/bytemuck/src/lib.rs b/third_party/rust/bytemuck/src/lib.rs
new file mode 100644
index 0000000000..a90199d52a
--- /dev/null
+++ b/third_party/rust/bytemuck/src/lib.rs
@@ -0,0 +1,433 @@
+#![no_std]
+#![warn(missing_docs)]
+
+//! This crate gives small utilities for casting between plain data types.
+//!
+//! ## Basics
+//!
+//! Data comes in five basic forms in Rust, so we have five basic casting
+//! functions:
+//!
+//! * `T` uses [`cast`]
+//! * `&T` uses [`cast_ref`]
+//! * `&mut T` uses [`cast_mut`]
+//! * `&[T]` uses [`cast_slice`]
+//! * `&mut [T]` uses [`cast_slice_mut`]
+//!
+//! Some casts will never fail (eg: `cast::<u32, f32>` always works), other
+//! casts might fail (eg: `cast_ref::<[u8; 4], u32>` will fail if the reference
+//! isn't already aligned to 4). Each casting function has a "try" version which
+//! will return a `Result`, and the "normal" version which will simply panic on
+//! invalid input.
+//!
+//! ## Using Your Own Types
+//!
+//! All the functions here are guarded by the [`Pod`] trait, which is a
+//! sub-trait of the [`Zeroable`] trait.
+//!
+//! If you're very sure that your type is eligible, you can implement those
+//! traits for your type and then they'll have full casting support. However,
+//! these traits are `unsafe`, and you should carefully read the requirements
+//! before adding the them to your own types.
+//!
+//! ## Features
+//!
+//! * This crate is core only by default, but if you're using Rust 1.36 or later
+//! you can enable the `extern_crate_alloc` cargo feature for some additional
+//! methods related to `Box` and `Vec`. Note that the `docs.rs` documentation
+//! is always built with `extern_crate_alloc` cargo feature enabled.
+
+#[cfg(target_arch = "x86")]
+use core::arch::x86;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64;
+//
+use core::{marker::*, mem::*, num::*, ptr::*};
+
+// Used from macros to ensure we aren't using some locally defined name and
+// actually are referencing libcore. This also would allow pre-2018 edition
+// crates to use our macros, but I'm not sure how important that is.
+#[doc(hidden)]
+pub use ::core as __core;
+
+macro_rules! impl_unsafe_marker_for_array {
+ ( $marker:ident , $( $n:expr ),* ) => {
+ $(unsafe impl<T> $marker for [T; $n] where T: $marker {})*
+ }
+}
+
+#[cfg(feature = "extern_crate_alloc")]
+extern crate alloc;
+#[cfg(feature = "extern_crate_alloc")]
+pub mod allocation;
+#[cfg(feature = "extern_crate_alloc")]
+pub use allocation::*;
+
+mod zeroable;
+pub use zeroable::*;
+
+mod pod;
+pub use pod::*;
+
+mod contiguous;
+pub use contiguous::*;
+
+mod offset_of;
+pub use offset_of::*;
+
+mod transparent;
+pub use transparent::*;
+
+/*
+
+Note(Lokathor): We've switched all of the `unwrap` to `match` because there is
+apparently a bug: https://github.com/rust-lang/rust/issues/68667
+and it doesn't seem to show up in simple godbolt examples but has been reported
+as having an impact when there's a cast mixed in with other more complicated
+code around it. Rustc/LLVM ends up missing that the `Err` can't ever happen for
+particular type combinations, and then it doesn't fully eliminated the panic
+possibility code branch.
+
+*/
+
+/// Immediately panics.
+#[cold]
+#[inline(never)]
+fn something_went_wrong(src: &str, err: PodCastError) -> ! {
+ // Note(Lokathor): Keeping the panic here makes the panic _formatting_ go
+ // here too, which helps assembly readability and also helps keep down
+ // the inline pressure.
+ panic!("{src}>{err:?}", src = src, err = err)
+}
+
+/// Re-interprets `&T` as `&[u8]`.
+///
+/// Any ZST becomes an empty slice, and in that case the pointer value of that
+/// empty slice might not match the pointer value of the input reference.
+#[inline]
+pub fn bytes_of<T: Pod>(t: &T) -> &[u8] {
+ match try_cast_slice::<T, u8>(core::slice::from_ref(t)) {
+ Ok(s) => s,
+ Err(_) => unreachable!(),
+ }
+}
+
+/// Re-interprets `&mut T` as `&mut [u8]`.
+///
+/// Any ZST becomes an empty slice, and in that case the pointer value of that
+/// empty slice might not match the pointer value of the input reference.
+#[inline]
+pub fn bytes_of_mut<T: Pod>(t: &mut T) -> &mut [u8] {
+ match try_cast_slice_mut::<T, u8>(core::slice::from_mut(t)) {
+ Ok(s) => s,
+ Err(_) => unreachable!(),
+ }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes`] but will panic on error.
+#[inline]
+pub fn from_bytes<T: Pod>(s: &[u8]) -> &T {
+ match try_from_bytes(s) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("from_bytes", e),
+ }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes_mut`] but will panic on error.
+#[inline]
+pub fn from_bytes_mut<T: Pod>(s: &mut [u8]) -> &mut T {
+ match try_from_bytes_mut(s) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("from_bytes_mut", e),
+ }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+#[inline]
+pub fn try_from_bytes<T: Pod>(s: &[u8]) -> Result<&T, PodCastError> {
+ if s.len() != size_of::<T>() {
+ Err(PodCastError::SizeMismatch)
+ } else if (s.as_ptr() as usize) % align_of::<T>() != 0 {
+ Err(PodCastError::AlignmentMismatch)
+ } else {
+ Ok(unsafe { &*(s.as_ptr() as *const T) })
+ }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+#[inline]
+pub fn try_from_bytes_mut<T: Pod>(
+ s: &mut [u8],
+) -> Result<&mut T, PodCastError> {
+ if s.len() != size_of::<T>() {
+ Err(PodCastError::SizeMismatch)
+ } else if (s.as_ptr() as usize) % align_of::<T>() != 0 {
+ Err(PodCastError::AlignmentMismatch)
+ } else {
+ Ok(unsafe { &mut *(s.as_mut_ptr() as *mut T) })
+ }
+}
+
+/// The things that can go wrong when casting between [`Pod`] data forms.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum PodCastError {
+ /// You tried to cast a slice to an element type with a higher alignment
+ /// requirement but the slice wasn't aligned.
+ TargetAlignmentGreaterAndInputNotAligned,
+ /// If the element size changes then the output slice changes length
+ /// accordingly. If the output slice wouldn't be a whole number of elements
+ /// then the conversion fails.
+ OutputSliceWouldHaveSlop,
+ /// When casting a slice you can't convert between ZST elements and non-ZST
+ /// elements. When casting an individual `T`, `&T`, or `&mut T` value the
+ /// source size and destination size must be an exact match.
+ SizeMismatch,
+ /// For this type of cast the alignments must be exactly the same and they
+ /// were not so now you're sad.
+ AlignmentMismatch,
+}
+
+/// Cast `T` into `U`
+///
+/// ## Panics
+///
+/// This is [`try_cast`] but will panic on error.
+#[inline]
+pub fn cast<A: Pod, B: Pod>(a: A) -> B {
+ if size_of::<A>() == size_of::<B>() {
+ // Plz mr compiler, just notice that we can't ever hit Err in this case.
+ match try_cast(a) {
+ Ok(b) => b,
+ Err(_) => unreachable!(),
+ }
+ } else {
+ match try_cast(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast", e),
+ }
+ }
+}
+
+/// Cast `&mut T` into `&mut U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_mut`] but will panic on error.
+#[inline]
+pub fn cast_mut<A: Pod, B: Pod>(a: &mut A) -> &mut B {
+ if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
+ // Plz mr compiler, just notice that we can't ever hit Err in this case.
+ match try_cast_mut(a) {
+ Ok(b) => b,
+ Err(_) => unreachable!(),
+ }
+ } else {
+ match try_cast_mut(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_mut", e),
+ }
+ }
+}
+
+/// Cast `&T` into `&U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_ref`] but will panic on error.
+#[inline]
+pub fn cast_ref<A: Pod, B: Pod>(a: &A) -> &B {
+ if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
+ // Plz mr compiler, just notice that we can't ever hit Err in this case.
+ match try_cast_ref(a) {
+ Ok(b) => b,
+ Err(_) => unreachable!(),
+ }
+ } else {
+ match try_cast_ref(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_ref", e),
+ }
+ }
+}
+
+/// Cast `&[T]` into `&[U]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice`] but will panic on error.
+#[inline]
+pub fn cast_slice<A: Pod, B: Pod>(a: &[A]) -> &[B] {
+ match try_cast_slice(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_slice", e),
+ }
+}
+
+/// Cast `&mut [T]` into `&mut [U]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice_mut`] but will panic on error.
+#[inline]
+pub fn cast_slice_mut<A: Pod, B: Pod>(a: &mut [A]) -> &mut [B] {
+ match try_cast_slice_mut(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_slice_mut", e),
+ }
+}
+
+/// As `align_to`, but safe because of the [`Pod`] bound.
+#[inline]
+pub fn pod_align_to<T: Pod, U: Pod>(vals: &[T]) -> (&[T], &[U], &[T]) {
+ unsafe { vals.align_to::<U>() }
+}
+
+/// As `align_to_mut`, but safe because of the [`Pod`] bound.
+#[inline]
+pub fn pod_align_to_mut<T: Pod, U: Pod>(
+ vals: &mut [T],
+) -> (&mut [T], &mut [U], &mut [T]) {
+ unsafe { vals.align_to_mut::<U>() }
+}
+
+/// Try to cast `T` into `U`.
+///
+/// ## Failure
+///
+/// * If the types don't have the same size this fails.
+#[inline]
+pub fn try_cast<A: Pod, B: Pod>(a: A) -> Result<B, PodCastError> {
+ if size_of::<A>() == size_of::<B>() {
+ let mut b = B::zeroed();
+ // Note(Lokathor): We copy in terms of `u8` because that allows us to bypass
+ // any potential alignment difficulties.
+ let ap = &a as *const A as *const u8;
+ let bp = &mut b as *mut B as *mut u8;
+ unsafe { ap.copy_to_nonoverlapping(bp, size_of::<A>()) };
+ Ok(b)
+ } else {
+ Err(PodCastError::SizeMismatch)
+ }
+}
+
+/// Try to convert a `&T` into `&U`.
+///
+/// ## Failure
+///
+/// * If the reference isn't aligned in the new type
+/// * If the source type and target type aren't the same size.
+#[inline]
+pub fn try_cast_ref<A: Pod, B: Pod>(a: &A) -> Result<&B, PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && (a as *const A as usize) % align_of::<B>() != 0
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe { &*(a as *const A as *const B) })
+ } else {
+ Err(PodCastError::SizeMismatch)
+ }
+}
+
+/// Try to convert a `&mut T` into `&mut U`.
+///
+/// As [`try_cast_ref`], but `mut`.
+#[inline]
+pub fn try_cast_mut<A: Pod, B: Pod>(a: &mut A) -> Result<&mut B, PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && (a as *mut A as usize) % align_of::<B>() != 0
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe { &mut *(a as *mut A as *mut B) })
+ } else {
+ Err(PodCastError::SizeMismatch)
+ }
+}
+
+/// Try to convert `&[T]` into `&[U]` (possibly with a change in length).
+///
+/// * `input.as_ptr() as usize == output.as_ptr() as usize`
+/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
+///
+/// ## Failure
+///
+/// * If the target type has a greater alignment requirement and the input slice
+/// isn't aligned.
+/// * If the target element type is a different size from the current element
+/// type, and the output slice wouldn't be a whole number of elements when
+/// accounting for the size change (eg: 3 `u16` values is 1.5 `u32` values, so
+/// that's a failure).
+/// * Similarly, you can't convert between a
+/// [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
+/// and a non-ZST.
+#[inline]
+pub fn try_cast_slice<A: Pod, B: Pod>(a: &[A]) -> Result<&[B], PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && (a.as_ptr() as usize) % align_of::<B>() != 0
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, a.len()) })
+ } else if size_of::<A>() == 0 || size_of::<B>() == 0 {
+ Err(PodCastError::SizeMismatch)
+ } else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
+ let new_len = core::mem::size_of_val(a) / size_of::<B>();
+ Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) })
+ } else {
+ Err(PodCastError::OutputSliceWouldHaveSlop)
+ }
+}
+
+/// Try to convert `&mut [T]` into `&mut [U]` (possibly with a change in length).
+///
+/// As [`try_cast_slice`], but `&mut`.
+#[inline]
+pub fn try_cast_slice_mut<A: Pod, B: Pod>(
+ a: &mut [A],
+) -> Result<&mut [B], PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && (a.as_mut_ptr() as usize) % align_of::<B>() != 0
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe {
+ core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, a.len())
+ })
+ } else if size_of::<A>() == 0 || size_of::<B>() == 0 {
+ Err(PodCastError::SizeMismatch)
+ } else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
+ let new_len = core::mem::size_of_val(a) / size_of::<B>();
+ Ok(unsafe {
+ core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len)
+ })
+ } else {
+ Err(PodCastError::OutputSliceWouldHaveSlop)
+ }
+}
diff --git a/third_party/rust/bytemuck/src/offset_of.rs b/third_party/rust/bytemuck/src/offset_of.rs
new file mode 100644
index 0000000000..fa8572733b
--- /dev/null
+++ b/third_party/rust/bytemuck/src/offset_of.rs
@@ -0,0 +1,103 @@
+#![forbid(unsafe_code)]
+
+/// Find the offset in bytes of the given `$field` of `$Type`, using `$instance`
+/// as an already-initialized value to work with.
+///
+/// This is similar to the macro from `memoffset`, however it's fully well
+/// defined even in current versions of Rust (and uses no unsafe code).
+///
+/// It does by using the `$instance` argument to have an already-initialized
+/// instance of `$Type` rather than trying to find a way access the fields of an
+/// uninitialized one without hitting soundness problems. The value passed to
+/// the macro is referenced but not moved.
+///
+/// This means the API is more limited, but it's also sound even in rather
+/// extreme cases, like some of the examples.
+///
+/// ## Caveats
+///
+/// 1. The offset is in bytes, and so you will likely have to cast your base
+/// pointers to `*const u8`/`*mut u8` before getting field addresses.
+///
+/// 2. The offset values of repr(Rust) types are not stable, and may change
+/// wildly between releases of the compiler. Use repr(C) if you can.
+///
+/// 3. The value of the `$instance` parameter has no bearing on the output of
+/// this macro. It is just used to avoid soundness problems. The only
+/// requirement is that it be initialized. In particular, the value returned
+/// is not a field pointer, or anything like that.
+///
+/// ## Examples
+///
+/// ### Use with zeroable types
+/// A common requirement in GPU apis is to specify the layout of vertices. These
+/// will generally be [`Zeroable`] (if not [`Pod`]), and are a good fit for
+/// `offset_of!`.
+/// ```
+/// # use bytemuck::{Zeroable, offset_of};
+/// #[repr(C)]
+/// struct Vertex {
+/// pos: [f32; 2],
+/// uv: [u16; 2],
+/// color: [u8; 4],
+/// }
+/// unsafe impl Zeroable for Vertex {}
+///
+/// let pos = offset_of!(Zeroable::zeroed(), Vertex, pos);
+/// let uv = offset_of!(Zeroable::zeroed(), Vertex, uv);
+/// let color = offset_of!(Zeroable::zeroed(), Vertex, color);
+///
+/// assert_eq!(pos, 0);
+/// assert_eq!(uv, 8);
+/// assert_eq!(color, 12);
+/// ```
+///
+/// ### Use with other types
+///
+/// More esoteric uses are possible too, including with types generally not safe
+/// to otherwise use with bytemuck. `Strings`, `Vec`s, etc.
+///
+/// ```
+/// #[derive(Default)]
+/// struct Foo {
+/// a: u8,
+/// b: &'static str,
+/// c: i32,
+/// }
+///
+/// let a_offset = bytemuck::offset_of!(Default::default(), Foo, a);
+/// let b_offset = bytemuck::offset_of!(Default::default(), Foo, b);
+/// let c_offset = bytemuck::offset_of!(Default::default(), Foo, c);
+///
+/// assert_ne!(a_offset, b_offset);
+/// assert_ne!(b_offset, c_offset);
+/// // We can't check against hardcoded values for a repr(Rust) type,
+/// // but prove to ourself this way.
+///
+/// let foo = Foo::default();
+/// // Note: offsets are in bytes.
+/// let as_bytes = &foo as *const _ as *const u8;
+///
+/// // we're using wrapping_offset here becasue it's not worth
+/// // the unsafe block, but it would be valid to use `add` instead,
+/// // as it cannot overflow.
+/// assert_eq!(&foo.a as *const _ as usize, as_bytes.wrapping_add(a_offset) as usize);
+/// assert_eq!(&foo.b as *const _ as usize, as_bytes.wrapping_add(b_offset) as usize);
+/// assert_eq!(&foo.c as *const _ as usize, as_bytes.wrapping_add(c_offset) as usize);
+/// ```
+#[macro_export]
+macro_rules! offset_of {
+ ($instance:expr, $Type:path, $field:tt) => {{
+ // This helps us guard against field access going through a Deref impl.
+ #[allow(clippy::unneeded_field_pattern)]
+ let $Type { $field: _, .. };
+ let reference: &$Type = &$instance;
+ let address = reference as *const _ as usize;
+ let field_pointer = &reference.$field as *const _ as usize;
+ // These asserts/unwraps are compiled away at release, and defend against
+ // the case where somehow a deref impl is still invoked.
+ let result = field_pointer.checked_sub(address).unwrap();
+ assert!(result <= $crate::__core::mem::size_of::<$Type>());
+ result
+ }};
+}
diff --git a/third_party/rust/bytemuck/src/pod.rs b/third_party/rust/bytemuck/src/pod.rs
new file mode 100644
index 0000000000..e5cc6938ec
--- /dev/null
+++ b/third_party/rust/bytemuck/src/pod.rs
@@ -0,0 +1,99 @@
+use super::*;
+
+/// Marker trait for "plain old data".
+///
+/// The point of this trait is that once something is marked "plain old data"
+/// you can really go to town with the bit fiddling and bit casting. Therefore,
+/// it's a relatively strong claim to make about a type. Do not add this to your
+/// type casually.
+///
+/// **Reminder:** The results of casting around bytes between data types are
+/// _endian dependant_. Little-endian machines are the most common, but
+/// big-endian machines do exist (and big-endian is also used for "network
+/// order" bytes).
+///
+/// ## Safety
+///
+/// * The type must be inhabited (eg: no
+/// [Infallible](core::convert::Infallible)).
+/// * The type must allow any bit pattern (eg: no `bool` or `char`, which have
+/// illegal bit patterns).
+/// * The type must not contain any padding bytes, either in the middle or on
+/// the end (eg: no `#[repr(C)] struct Foo(u8, u16)`, which has padding in the
+/// middle, and also no `#[repr(C)] struct Foo(u16, u8)`, which has padding on
+/// the end).
+/// * The type needs to have all fields also be `Pod`.
+/// * The type needs to be `repr(C)` or `repr(transparent)`. In the case of
+/// `repr(C)`, the `packed` and `align` repr modifiers can be used as long as
+/// all other rules end up being followed.
+pub unsafe trait Pod: Zeroable + Copy + 'static {}
+
+unsafe impl Pod for () {}
+unsafe impl Pod for u8 {}
+unsafe impl Pod for i8 {}
+unsafe impl Pod for u16 {}
+unsafe impl Pod for i16 {}
+unsafe impl Pod for u32 {}
+unsafe impl Pod for i32 {}
+unsafe impl Pod for u64 {}
+unsafe impl Pod for i64 {}
+unsafe impl Pod for usize {}
+unsafe impl Pod for isize {}
+unsafe impl Pod for u128 {}
+unsafe impl Pod for i128 {}
+unsafe impl Pod for f32 {}
+unsafe impl Pod for f64 {}
+unsafe impl<T: Pod> Pod for Wrapping<T> {}
+
+unsafe impl Pod for Option<NonZeroI8> {}
+unsafe impl Pod for Option<NonZeroI16> {}
+unsafe impl Pod for Option<NonZeroI32> {}
+unsafe impl Pod for Option<NonZeroI64> {}
+unsafe impl Pod for Option<NonZeroI128> {}
+unsafe impl Pod for Option<NonZeroIsize> {}
+unsafe impl Pod for Option<NonZeroU8> {}
+unsafe impl Pod for Option<NonZeroU16> {}
+unsafe impl Pod for Option<NonZeroU32> {}
+unsafe impl Pod for Option<NonZeroU64> {}
+unsafe impl Pod for Option<NonZeroU128> {}
+unsafe impl Pod for Option<NonZeroUsize> {}
+
+unsafe impl<T: 'static> Pod for *mut T {}
+unsafe impl<T: 'static> Pod for *const T {}
+unsafe impl<T: 'static> Pod for Option<NonNull<T>> {}
+unsafe impl<T: Pod> Pod for PhantomData<T> {}
+unsafe impl<T: Pod> Pod for ManuallyDrop<T> {}
+
+// Note(Lokathor): MaybeUninit can NEVER be Pod.
+
+impl_unsafe_marker_for_array!(
+ Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
+ 512, 1024, 2048, 4096
+);
+
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m128i {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m128 {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m128d {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m256i {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m256 {}
+#[cfg(target_arch = "x86")]
+unsafe impl Pod for x86::__m256d {}
+
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m128i {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m128 {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m128d {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m256i {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m256 {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Pod for x86_64::__m256d {}
diff --git a/third_party/rust/bytemuck/src/transparent.rs b/third_party/rust/bytemuck/src/transparent.rs
new file mode 100644
index 0000000000..b77a8706fb
--- /dev/null
+++ b/third_party/rust/bytemuck/src/transparent.rs
@@ -0,0 +1,133 @@
+use super::*;
+
+/// A trait which indicates that a type is a `repr(transparent)` wrapper around
+/// the `Wrapped` value.
+///
+/// This allows safely creating references to `T` from those to the `Wrapped`
+/// type, using the `wrap_ref` and `wrap_mut` functions.
+///
+/// # Safety
+///
+/// The safety contract of `TransparentWrapper` is relatively simple:
+///
+/// For a given `Wrapper` which implements `TransparentWrapper<Wrapped>`:
+///
+/// 1. Wrapper must be a `#[repr(transparent)]` wrapper around `Wrapped`. This
+/// either means that it must be a `#[repr(transparent)]` struct which
+/// contains a either a field of type `Wrapped` (or a field of some other
+/// transparent wrapper for `Wrapped`) as the only non-ZST field.
+///
+/// 2. Any fields *other* than the `Wrapped` field must be trivially
+/// constructable ZSTs, for example `PhantomData`, `PhantomPinned`, etc.
+///
+/// 3. The `Wrapper` may not impose additional alignment requirements over
+/// `Wrapped`.
+/// - Note: this is currently guaranteed by repr(transparent), but there
+/// have been discussions of lifting it, so it's stated here explictly.
+///
+/// 4. The `wrap_ref` and `wrap_mut` functions on `TransparentWrapper` may not
+/// be overridden.
+///
+/// ## Caveats
+///
+/// If the wrapper imposes additional constraints upon the wrapped type which
+/// are required for safety, it's responsible for ensuring those still hold --
+/// this generally requires preventing access to instances of the wrapped type,
+/// as implementing `TransparentWrapper<U> for T` means anybody can call
+/// `T::cast_ref(any_instance_of_u)`.
+///
+/// For example, it would be invalid to implement TransparentWrapper for `str`
+/// to implement `TransparentWrapper` around `[u8]` because of this.
+///
+/// # Examples
+///
+/// ## Basic
+///
+/// ```
+/// use bytemuck::TransparentWrapper;
+/// # #[derive(Default)]
+/// # struct SomeStruct(u32);
+///
+/// #[repr(transparent)]
+/// struct MyWrapper(SomeStruct);
+///
+/// unsafe impl TransparentWrapper<SomeStruct> for MyWrapper {}
+///
+/// // interpret a reference to &SomeStruct as a &MyWrapper
+/// let thing = SomeStruct::default();
+/// let wrapped_ref: &MyWrapper = MyWrapper::wrap_ref(&thing);
+///
+/// // Works with &mut too.
+/// let mut mut_thing = SomeStruct::default();
+/// let wrapped_mut: &mut MyWrapper = MyWrapper::wrap_mut(&mut mut_thing);
+///
+/// # let _ = (wrapped_ref, wrapped_mut); // silence warnings
+/// ```
+///
+/// ## Use with dynamically sized types
+///
+/// ```
+/// use bytemuck::TransparentWrapper;
+///
+/// #[repr(transparent)]
+/// struct Slice<T>([T]);
+///
+/// unsafe impl<T> TransparentWrapper<[T]> for Slice<T> {}
+///
+/// let s = Slice::wrap_ref(&[1u32, 2, 3]);
+/// assert_eq!(&s.0, &[1, 2, 3]);
+///
+/// let mut buf = [1, 2, 3u8];
+/// let sm = Slice::wrap_mut(&mut buf);
+/// ```
+pub unsafe trait TransparentWrapper<Wrapped: ?Sized> {
+ /// Convert a reference to a wrapped type into a reference to the wrapper.
+ ///
+ /// This is a trait method so that you can write `MyType::wrap_ref(...)` in
+ /// your code. It is part of the safety contract for this trait that if you
+ /// implement `TransparentWrapper<_>` for your type you **must not** override
+ /// this method.
+ #[inline]
+ fn wrap_ref(s: &Wrapped) -> &Self {
+ unsafe {
+ assert!(size_of::<*const Wrapped>() == size_of::<*const Self>());
+ // Using a pointer cast doesn't work here because rustc can't tell that the
+ // vtables match (if we lifted the ?Sized restriction, this would go away),
+ // and transmute doesn't work for the usual reasons it doesn't work inside
+ // generic functions.
+ //
+ // SAFETY: The unsafe contract requires that these have identical
+ // representations. Using this transmute_copy instead of transmute here is
+ // annoying, but is required as `Self` and `Wrapped` have unspecified
+ // sizes still.
+ let wrapped_ptr = s as *const Wrapped;
+ let wrapper_ptr: *const Self = transmute_copy(&wrapped_ptr);
+ &*wrapper_ptr
+ }
+ }
+
+ /// Convert a mut reference to a wrapped type into a mut reference to the
+ /// wrapper.
+ ///
+ /// This is a trait method so that you can write `MyType::wrap_mut(...)` in
+ /// your code. It is part of the safety contract for this trait that if you implement
+ /// `TransparentWrapper<_>` for your type you **must not** override this method.
+ #[inline]
+ fn wrap_mut(s: &mut Wrapped) -> &mut Self {
+ unsafe {
+ assert!(size_of::<*mut Wrapped>() == size_of::<*mut Self>());
+ // Using a pointer cast doesn't work here because rustc can't tell that the
+ // vtables match (if we lifted the ?Sized restriction, this would go away),
+ // and transmute doesn't work for the usual reasons it doesn't work inside
+ // generic functions.
+ //
+ // SAFETY: The unsafe contract requires that these have identical
+ // representations. Using this transmute_copy instead of transmute here is
+ // annoying, but is required as `Self` and `Wrapped` have unspecified
+ // sizes still.
+ let wrapped_ptr = s as *mut Wrapped;
+ let wrapper_ptr: *mut Self = transmute_copy(&wrapped_ptr);
+ &mut *wrapper_ptr
+ }
+ }
+}
diff --git a/third_party/rust/bytemuck/src/zeroable.rs b/third_party/rust/bytemuck/src/zeroable.rs
new file mode 100644
index 0000000000..fb9620431e
--- /dev/null
+++ b/third_party/rust/bytemuck/src/zeroable.rs
@@ -0,0 +1,142 @@
+use super::*;
+
+/// Trait for types that can be safely created with
+/// [`zeroed`](core::mem::zeroed).
+///
+/// An all-zeroes value may or may not be the same value as the
+/// [Default](core::default::Default) value of the type.
+///
+/// ## Safety
+///
+/// * Your type must be inhabited (eg: no
+/// [Infallible](core::convert::Infallible)).
+/// * Your type must be allowed to be an "all zeroes" bit pattern (eg: no
+/// [`NonNull<T>`](core::ptr::NonNull)).
+pub unsafe trait Zeroable: Sized {
+ /// Calls [`zeroed`](core::mem::zeroed).
+ ///
+ /// This is a trait method so that you can write `MyType::zeroed()` in your
+ /// code. It is a contract of this trait that if you implement it on your type
+ /// you **must not** override this method.
+ #[inline]
+ fn zeroed() -> Self {
+ unsafe { core::mem::zeroed() }
+ }
+}
+unsafe impl Zeroable for () {}
+unsafe impl Zeroable for bool {}
+unsafe impl Zeroable for char {}
+unsafe impl Zeroable for u8 {}
+unsafe impl Zeroable for i8 {}
+unsafe impl Zeroable for u16 {}
+unsafe impl Zeroable for i16 {}
+unsafe impl Zeroable for u32 {}
+unsafe impl Zeroable for i32 {}
+unsafe impl Zeroable for u64 {}
+unsafe impl Zeroable for i64 {}
+unsafe impl Zeroable for usize {}
+unsafe impl Zeroable for isize {}
+unsafe impl Zeroable for u128 {}
+unsafe impl Zeroable for i128 {}
+unsafe impl Zeroable for f32 {}
+unsafe impl Zeroable for f64 {}
+unsafe impl<T: Zeroable> Zeroable for Wrapping<T> {}
+
+unsafe impl Zeroable for Option<NonZeroI8> {}
+unsafe impl Zeroable for Option<NonZeroI16> {}
+unsafe impl Zeroable for Option<NonZeroI32> {}
+unsafe impl Zeroable for Option<NonZeroI64> {}
+unsafe impl Zeroable for Option<NonZeroI128> {}
+unsafe impl Zeroable for Option<NonZeroIsize> {}
+unsafe impl Zeroable for Option<NonZeroU8> {}
+unsafe impl Zeroable for Option<NonZeroU16> {}
+unsafe impl Zeroable for Option<NonZeroU32> {}
+unsafe impl Zeroable for Option<NonZeroU64> {}
+unsafe impl Zeroable for Option<NonZeroU128> {}
+unsafe impl Zeroable for Option<NonZeroUsize> {}
+
+unsafe impl<T> Zeroable for *mut T {}
+unsafe impl<T> Zeroable for *const T {}
+unsafe impl<T> Zeroable for Option<NonNull<T>> {}
+unsafe impl<T: Zeroable> Zeroable for PhantomData<T> {}
+unsafe impl<T: Zeroable> Zeroable for ManuallyDrop<T> {}
+
+// 2.0: add MaybeUninit
+//unsafe impl<T> Zeroable for MaybeUninit<T> {}
+
+unsafe impl<A: Zeroable> Zeroable for (A,) {}
+unsafe impl<A: Zeroable, B: Zeroable> Zeroable for (A, B) {}
+unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable> Zeroable for (A, B, C) {}
+unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable> Zeroable
+ for (A, B, C, D)
+{
+}
+unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable>
+ Zeroable for (A, B, C, D, E)
+{
+}
+unsafe impl<
+ A: Zeroable,
+ B: Zeroable,
+ C: Zeroable,
+ D: Zeroable,
+ E: Zeroable,
+ F: Zeroable,
+ > Zeroable for (A, B, C, D, E, F)
+{
+}
+unsafe impl<
+ A: Zeroable,
+ B: Zeroable,
+ C: Zeroable,
+ D: Zeroable,
+ E: Zeroable,
+ F: Zeroable,
+ G: Zeroable,
+ > Zeroable for (A, B, C, D, E, F, G)
+{
+}
+unsafe impl<
+ A: Zeroable,
+ B: Zeroable,
+ C: Zeroable,
+ D: Zeroable,
+ E: Zeroable,
+ F: Zeroable,
+ G: Zeroable,
+ H: Zeroable,
+ > Zeroable for (A, B, C, D, E, F, G, H)
+{
+}
+
+impl_unsafe_marker_for_array!(
+ Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
+ 512, 1024, 2048, 4096
+);
+
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m128i {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m128 {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m128d {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m256i {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m256 {}
+#[cfg(target_arch = "x86")]
+unsafe impl Zeroable for x86::__m256d {}
+
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m128i {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m128 {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m128d {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m256i {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m256 {}
+#[cfg(target_arch = "x86_64")]
+unsafe impl Zeroable for x86_64::__m256d {}