summaryrefslogtreecommitdiffstats
path: root/third_party/rust/zerocopy/src
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/rust/zerocopy/src/byteorder.rs1075
-rw-r--r--third_party/rust/zerocopy/src/lib.rs8256
-rw-r--r--third_party/rust/zerocopy/src/macro_util.rs670
-rw-r--r--third_party/rust/zerocopy/src/macros.rs417
-rw-r--r--third_party/rust/zerocopy/src/post_monomorphization_compile_fail_tests.rs118
-rw-r--r--third_party/rust/zerocopy/src/third_party/rust/LICENSE-APACHE176
-rw-r--r--third_party/rust/zerocopy/src/third_party/rust/LICENSE-MIT23
-rw-r--r--third_party/rust/zerocopy/src/third_party/rust/README.fuchsia7
-rw-r--r--third_party/rust/zerocopy/src/third_party/rust/layout.rs45
-rw-r--r--third_party/rust/zerocopy/src/util.rs808
-rw-r--r--third_party/rust/zerocopy/src/wrappers.rs503
11 files changed, 12098 insertions, 0 deletions
diff --git a/third_party/rust/zerocopy/src/byteorder.rs b/third_party/rust/zerocopy/src/byteorder.rs
new file mode 100644
index 0000000000..2769410451
--- /dev/null
+++ b/third_party/rust/zerocopy/src/byteorder.rs
@@ -0,0 +1,1075 @@
+// Copyright 2019 The Fuchsia Authors
+//
+// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
+// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
+// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
+// This file may not be copied, modified, or distributed except according to
+// those terms.
+
+//! Byte order-aware numeric primitives.
+//!
+//! This module contains equivalents of the native multi-byte integer types with
+//! no alignment requirement and supporting byte order conversions.
+//!
+//! For each native multi-byte integer type - `u16`, `i16`, `u32`, etc - and
+//! floating point type - `f32` and `f64` - an equivalent type is defined by
+//! this module - [`U16`], [`I16`], [`U32`], [`F64`], etc. Unlike their native
+//! counterparts, these types have alignment 1, and take a type parameter
+//! specifying the byte order in which the bytes are stored in memory. Each type
+//! implements the [`FromBytes`], [`AsBytes`], and [`Unaligned`] traits.
+//!
+//! These two properties, taken together, make these types useful for defining
+//! data structures whose memory layout matches a wire format such as that of a
+//! network protocol or a file format. Such formats often have multi-byte values
+//! at offsets that do not respect the alignment requirements of the equivalent
+//! native types, and stored in a byte order not necessarily the same as that of
+//! the target platform.
+//!
+//! Type aliases are provided for common byte orders in the [`big_endian`],
+//! [`little_endian`], [`network_endian`], and [`native_endian`] submodules.
+//!
+//! # Example
+//!
+//! One use of these types is for representing network packet formats, such as
+//! UDP:
+//!
+//! ```rust,edition2021
+//! # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them
+//! use zerocopy::{AsBytes, ByteSlice, FromBytes, FromZeroes, Ref, Unaligned};
+//! use zerocopy::byteorder::network_endian::U16;
+//!
+//! #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+//! #[repr(C)]
+//! struct UdpHeader {
+//! src_port: U16,
+//! dst_port: U16,
+//! length: U16,
+//! checksum: U16,
+//! }
+//!
+//! struct UdpPacket<B: ByteSlice> {
+//! header: Ref<B, UdpHeader>,
+//! body: B,
+//! }
+//!
+//! impl<B: ByteSlice> UdpPacket<B> {
+//! fn parse(bytes: B) -> Option<UdpPacket<B>> {
+//! let (header, body) = Ref::new_from_prefix(bytes)?;
+//! Some(UdpPacket { header, body })
+//! }
+//!
+//! fn src_port(&self) -> u16 {
+//! self.header.src_port.get()
+//! }
+//!
+//! // more getters...
+//! }
+//! # }
+//! ```
+
+use core::{
+ convert::{TryFrom, TryInto},
+ fmt::{self, Binary, Debug, Display, Formatter, LowerHex, Octal, UpperHex},
+ marker::PhantomData,
+ num::TryFromIntError,
+};
+
+// We don't reexport `WriteBytesExt` or `ReadBytesExt` because those are only
+// available with the `std` feature enabled, and zerocopy is `no_std` by
+// default.
+pub use ::byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian, NetworkEndian, BE, LE};
+
+use super::*;
+
+macro_rules! impl_fmt_trait {
+ ($name:ident, $native:ident, $trait:ident) => {
+ impl<O: ByteOrder> $trait for $name<O> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ $trait::fmt(&self.get(), f)
+ }
+ }
+ };
+}
+
+macro_rules! impl_fmt_traits {
+ ($name:ident, $native:ident, "floating point number") => {
+ impl_fmt_trait!($name, $native, Display);
+ };
+ ($name:ident, $native:ident, "unsigned integer") => {
+ impl_fmt_traits!($name, $native, @all_types);
+ };
+ ($name:ident, $native:ident, "signed integer") => {
+ impl_fmt_traits!($name, $native, @all_types);
+ };
+ ($name:ident, $native:ident, @all_types) => {
+ impl_fmt_trait!($name, $native, Display);
+ impl_fmt_trait!($name, $native, Octal);
+ impl_fmt_trait!($name, $native, LowerHex);
+ impl_fmt_trait!($name, $native, UpperHex);
+ impl_fmt_trait!($name, $native, Binary);
+ };
+}
+
+macro_rules! impl_ops_traits {
+ ($name:ident, $native:ident, "floating point number") => {
+ impl_ops_traits!($name, $native, @all_types);
+ impl_ops_traits!($name, $native, @signed_integer_floating_point);
+ };
+ ($name:ident, $native:ident, "unsigned integer") => {
+ impl_ops_traits!($name, $native, @signed_unsigned_integer);
+ impl_ops_traits!($name, $native, @all_types);
+ };
+ ($name:ident, $native:ident, "signed integer") => {
+ impl_ops_traits!($name, $native, @signed_unsigned_integer);
+ impl_ops_traits!($name, $native, @signed_integer_floating_point);
+ impl_ops_traits!($name, $native, @all_types);
+ };
+ ($name:ident, $native:ident, @signed_unsigned_integer) => {
+ impl_ops_traits!(@without_byteorder_swap $name, $native, BitAnd, bitand, BitAndAssign, bitand_assign);
+ impl_ops_traits!(@without_byteorder_swap $name, $native, BitOr, bitor, BitOrAssign, bitor_assign);
+ impl_ops_traits!(@without_byteorder_swap $name, $native, BitXor, bitxor, BitXorAssign, bitxor_assign);
+ impl_ops_traits!(@with_byteorder_swap $name, $native, Shl, shl, ShlAssign, shl_assign);
+ impl_ops_traits!(@with_byteorder_swap $name, $native, Shr, shr, ShrAssign, shr_assign);
+
+ impl<O> core::ops::Not for $name<O> {
+ type Output = $name<O>;
+
+ #[inline(always)]
+ fn not(self) -> $name<O> {
+ let self_native = $native::from_ne_bytes(self.0);
+ $name((!self_native).to_ne_bytes(), PhantomData)
+ }
+ }
+ };
+ ($name:ident, $native:ident, @signed_integer_floating_point) => {
+ impl<O: ByteOrder> core::ops::Neg for $name<O> {
+ type Output = $name<O>;
+
+ #[inline(always)]
+ fn neg(self) -> $name<O> {
+ let self_native: $native = self.get();
+ #[allow(clippy::arithmetic_side_effects)]
+ $name::<O>::new(-self_native)
+ }
+ }
+ };
+ ($name:ident, $native:ident, @all_types) => {
+ impl_ops_traits!(@with_byteorder_swap $name, $native, Add, add, AddAssign, add_assign);
+ impl_ops_traits!(@with_byteorder_swap $name, $native, Div, div, DivAssign, div_assign);
+ impl_ops_traits!(@with_byteorder_swap $name, $native, Mul, mul, MulAssign, mul_assign);
+ impl_ops_traits!(@with_byteorder_swap $name, $native, Rem, rem, RemAssign, rem_assign);
+ impl_ops_traits!(@with_byteorder_swap $name, $native, Sub, sub, SubAssign, sub_assign);
+ };
+ (@with_byteorder_swap $name:ident, $native:ident, $trait:ident, $method:ident, $trait_assign:ident, $method_assign:ident) => {
+ impl<O: ByteOrder> core::ops::$trait for $name<O> {
+ type Output = $name<O>;
+
+ #[inline(always)]
+ fn $method(self, rhs: $name<O>) -> $name<O> {
+ let self_native: $native = self.get();
+ let rhs_native: $native = rhs.get();
+ let result_native = core::ops::$trait::$method(self_native, rhs_native);
+ $name::<O>::new(result_native)
+ }
+ }
+
+ impl<O: ByteOrder> core::ops::$trait_assign for $name<O> {
+ #[inline(always)]
+ fn $method_assign(&mut self, rhs: $name<O>) {
+ *self = core::ops::$trait::$method(*self, rhs);
+ }
+ }
+ };
+ // Implement traits in terms of the same trait on the native type, but
+ // without performing a byte order swap. This only works for bitwise
+ // operations like `&`, `|`, etc.
+ (@without_byteorder_swap $name:ident, $native:ident, $trait:ident, $method:ident, $trait_assign:ident, $method_assign:ident) => {
+ impl<O: ByteOrder> core::ops::$trait for $name<O> {
+ type Output = $name<O>;
+
+ #[inline(always)]
+ fn $method(self, rhs: $name<O>) -> $name<O> {
+ let self_native = $native::from_ne_bytes(self.0);
+ let rhs_native = $native::from_ne_bytes(rhs.0);
+ let result_native = core::ops::$trait::$method(self_native, rhs_native);
+ $name(result_native.to_ne_bytes(), PhantomData)
+ }
+ }
+
+ impl<O: ByteOrder> core::ops::$trait_assign for $name<O> {
+ #[inline(always)]
+ fn $method_assign(&mut self, rhs: $name<O>) {
+ *self = core::ops::$trait::$method(*self, rhs);
+ }
+ }
+ };
+}
+
+macro_rules! doc_comment {
+ ($x:expr, $($tt:tt)*) => {
+ #[doc = $x]
+ $($tt)*
+ };
+}
+
+macro_rules! define_max_value_constant {
+ ($name:ident, $bytes:expr, "unsigned integer") => {
+ /// The maximum value.
+ ///
+ /// This constant should be preferred to constructing a new value using
+ /// `new`, as `new` may perform an endianness swap depending on the
+ /// endianness `O` and the endianness of the platform.
+ pub const MAX_VALUE: $name<O> = $name([0xFFu8; $bytes], PhantomData);
+ };
+ // We don't provide maximum and minimum value constants for signed values
+ // and floats because there's no way to do it generically - it would require
+ // a different value depending on the value of the `ByteOrder` type
+ // parameter. Currently, one workaround would be to provide implementations
+ // for concrete implementations of that trait. In the long term, if we are
+ // ever able to make the `new` constructor a const fn, we could use that
+ // instead.
+ ($name:ident, $bytes:expr, "signed integer") => {};
+ ($name:ident, $bytes:expr, "floating point number") => {};
+}
+
+macro_rules! define_type {
+ ($article:ident,
+ $name:ident,
+ $native:ident,
+ $bits:expr,
+ $bytes:expr,
+ $read_method:ident,
+ $write_method:ident,
+ $number_kind:tt,
+ [$($larger_native:ty),*],
+ [$($larger_native_try:ty),*],
+ [$($larger_byteorder:ident),*],
+ [$($larger_byteorder_try:ident),*]) => {
+ doc_comment! {
+ concat!("A ", stringify!($bits), "-bit ", $number_kind,
+ " stored in a given byte order.
+
+`", stringify!($name), "` is like the native `", stringify!($native), "` type with
+two major differences: First, it has no alignment requirement (its alignment is 1).
+Second, the endianness of its memory layout is given by the type parameter `O`,
+which can be any type which implements [`ByteOrder`]. In particular, this refers
+to [`BigEndian`], [`LittleEndian`], [`NativeEndian`], and [`NetworkEndian`].
+
+", stringify!($article), " `", stringify!($name), "` can be constructed using
+the [`new`] method, and its contained value can be obtained as a native
+`",stringify!($native), "` using the [`get`] method, or updated in place with
+the [`set`] method. In all cases, if the endianness `O` is not the same as the
+endianness of the current platform, an endianness swap will be performed in
+order to uphold the invariants that a) the layout of `", stringify!($name), "`
+has endianness `O` and that, b) the layout of `", stringify!($native), "` has
+the platform's native endianness.
+
+`", stringify!($name), "` implements [`FromBytes`], [`AsBytes`], and [`Unaligned`],
+making it useful for parsing and serialization. See the module documentation for an
+example of how it can be used for parsing UDP packets.
+
+[`new`]: crate::byteorder::", stringify!($name), "::new
+[`get`]: crate::byteorder::", stringify!($name), "::get
+[`set`]: crate::byteorder::", stringify!($name), "::set
+[`FromBytes`]: crate::FromBytes
+[`AsBytes`]: crate::AsBytes
+[`Unaligned`]: crate::Unaligned"),
+ #[derive(Copy, Clone, Eq, PartialEq, Hash)]
+ #[cfg_attr(any(feature = "derive", test), derive(KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned))]
+ #[repr(transparent)]
+ pub struct $name<O>([u8; $bytes], PhantomData<O>);
+ }
+
+ #[cfg(not(any(feature = "derive", test)))]
+ impl_known_layout!(O => $name<O>);
+
+ safety_comment! {
+ /// SAFETY:
+ /// `$name<O>` is `repr(transparent)`, and so it has the same layout
+ /// as its only non-zero field, which is a `u8` array. `u8` arrays
+ /// are `FromZeroes`, `FromBytes`, `AsBytes`, and `Unaligned`.
+ impl_or_verify!(O => FromZeroes for $name<O>);
+ impl_or_verify!(O => FromBytes for $name<O>);
+ impl_or_verify!(O => AsBytes for $name<O>);
+ impl_or_verify!(O => Unaligned for $name<O>);
+ }
+
+ impl<O> Default for $name<O> {
+ #[inline(always)]
+ fn default() -> $name<O> {
+ $name::ZERO
+ }
+ }
+
+ impl<O> $name<O> {
+ /// The value zero.
+ ///
+ /// This constant should be preferred to constructing a new value
+ /// using `new`, as `new` may perform an endianness swap depending
+ /// on the endianness and platform.
+ pub const ZERO: $name<O> = $name([0u8; $bytes], PhantomData);
+
+ define_max_value_constant!($name, $bytes, $number_kind);
+
+ /// Constructs a new value from bytes which are already in the
+ /// endianness `O`.
+ #[inline(always)]
+ pub const fn from_bytes(bytes: [u8; $bytes]) -> $name<O> {
+ $name(bytes, PhantomData)
+ }
+ }
+
+ impl<O: ByteOrder> $name<O> {
+ // TODO(joshlf): Make these const fns if the `ByteOrder` methods
+ // ever become const fns.
+
+ /// Constructs a new value, possibly performing an endianness swap
+ /// to guarantee that the returned value has endianness `O`.
+ #[inline(always)]
+ pub fn new(n: $native) -> $name<O> {
+ let mut out = $name::default();
+ O::$write_method(&mut out.0[..], n);
+ out
+ }
+
+ /// Returns the value as a primitive type, possibly performing an
+ /// endianness swap to guarantee that the return value has the
+ /// endianness of the native platform.
+ #[inline(always)]
+ pub fn get(self) -> $native {
+ O::$read_method(&self.0[..])
+ }
+
+ /// Updates the value in place as a primitive type, possibly
+ /// performing an endianness swap to guarantee that the stored value
+ /// has the endianness `O`.
+ #[inline(always)]
+ pub fn set(&mut self, n: $native) {
+ O::$write_method(&mut self.0[..], n);
+ }
+ }
+
+ // The reasoning behind which traits to implement here is to only
+ // implement traits which won't cause inference issues. Notably,
+ // comparison traits like PartialEq and PartialOrd tend to cause
+ // inference issues.
+
+ impl<O: ByteOrder> From<$name<O>> for [u8; $bytes] {
+ #[inline(always)]
+ fn from(x: $name<O>) -> [u8; $bytes] {
+ x.0
+ }
+ }
+
+ impl<O: ByteOrder> From<[u8; $bytes]> for $name<O> {
+ #[inline(always)]
+ fn from(bytes: [u8; $bytes]) -> $name<O> {
+ $name(bytes, PhantomData)
+ }
+ }
+
+ impl<O: ByteOrder> From<$name<O>> for $native {
+ #[inline(always)]
+ fn from(x: $name<O>) -> $native {
+ x.get()
+ }
+ }
+
+ impl<O: ByteOrder> From<$native> for $name<O> {
+ #[inline(always)]
+ fn from(x: $native) -> $name<O> {
+ $name::new(x)
+ }
+ }
+
+ $(
+ impl<O: ByteOrder> From<$name<O>> for $larger_native {
+ #[inline(always)]
+ fn from(x: $name<O>) -> $larger_native {
+ x.get().into()
+ }
+ }
+ )*
+
+ $(
+ impl<O: ByteOrder> TryFrom<$larger_native_try> for $name<O> {
+ type Error = TryFromIntError;
+ #[inline(always)]
+ fn try_from(x: $larger_native_try) -> Result<$name<O>, TryFromIntError> {
+ $native::try_from(x).map($name::new)
+ }
+ }
+ )*
+
+ $(
+ impl<O: ByteOrder, P: ByteOrder> From<$name<O>> for $larger_byteorder<P> {
+ #[inline(always)]
+ fn from(x: $name<O>) -> $larger_byteorder<P> {
+ $larger_byteorder::new(x.get().into())
+ }
+ }
+ )*
+
+ $(
+ impl<O: ByteOrder, P: ByteOrder> TryFrom<$larger_byteorder_try<P>> for $name<O> {
+ type Error = TryFromIntError;
+ #[inline(always)]
+ fn try_from(x: $larger_byteorder_try<P>) -> Result<$name<O>, TryFromIntError> {
+ x.get().try_into().map($name::new)
+ }
+ }
+ )*
+
+ impl<O: ByteOrder> AsRef<[u8; $bytes]> for $name<O> {
+ #[inline(always)]
+ fn as_ref(&self) -> &[u8; $bytes] {
+ &self.0
+ }
+ }
+
+ impl<O: ByteOrder> AsMut<[u8; $bytes]> for $name<O> {
+ #[inline(always)]
+ fn as_mut(&mut self) -> &mut [u8; $bytes] {
+ &mut self.0
+ }
+ }
+
+ impl<O: ByteOrder> PartialEq<$name<O>> for [u8; $bytes] {
+ #[inline(always)]
+ fn eq(&self, other: &$name<O>) -> bool {
+ self.eq(&other.0)
+ }
+ }
+
+ impl<O: ByteOrder> PartialEq<[u8; $bytes]> for $name<O> {
+ #[inline(always)]
+ fn eq(&self, other: &[u8; $bytes]) -> bool {
+ self.0.eq(other)
+ }
+ }
+
+ impl_fmt_traits!($name, $native, $number_kind);
+ impl_ops_traits!($name, $native, $number_kind);
+
+ impl<O: ByteOrder> Debug for $name<O> {
+ #[inline]
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ // This results in a format like "U16(42)".
+ f.debug_tuple(stringify!($name)).field(&self.get()).finish()
+ }
+ }
+ };
+}
+
+define_type!(
+ A,
+ U16,
+ u16,
+ 16,
+ 2,
+ read_u16,
+ write_u16,
+ "unsigned integer",
+ [u32, u64, u128, usize],
+ [u32, u64, u128, usize],
+ [U32, U64, U128],
+ [U32, U64, U128]
+);
+define_type!(
+ A,
+ U32,
+ u32,
+ 32,
+ 4,
+ read_u32,
+ write_u32,
+ "unsigned integer",
+ [u64, u128],
+ [u64, u128],
+ [U64, U128],
+ [U64, U128]
+);
+define_type!(
+ A,
+ U64,
+ u64,
+ 64,
+ 8,
+ read_u64,
+ write_u64,
+ "unsigned integer",
+ [u128],
+ [u128],
+ [U128],
+ [U128]
+);
+define_type!(A, U128, u128, 128, 16, read_u128, write_u128, "unsigned integer", [], [], [], []);
+define_type!(
+ An,
+ I16,
+ i16,
+ 16,
+ 2,
+ read_i16,
+ write_i16,
+ "signed integer",
+ [i32, i64, i128, isize],
+ [i32, i64, i128, isize],
+ [I32, I64, I128],
+ [I32, I64, I128]
+);
+define_type!(
+ An,
+ I32,
+ i32,
+ 32,
+ 4,
+ read_i32,
+ write_i32,
+ "signed integer",
+ [i64, i128],
+ [i64, i128],
+ [I64, I128],
+ [I64, I128]
+);
+define_type!(
+ An,
+ I64,
+ i64,
+ 64,
+ 8,
+ read_i64,
+ write_i64,
+ "signed integer",
+ [i128],
+ [i128],
+ [I128],
+ [I128]
+);
+define_type!(An, I128, i128, 128, 16, read_i128, write_i128, "signed integer", [], [], [], []);
+define_type!(
+ An,
+ F32,
+ f32,
+ 32,
+ 4,
+ read_f32,
+ write_f32,
+ "floating point number",
+ [f64],
+ [],
+ [F64],
+ []
+);
+define_type!(An, F64, f64, 64, 8, read_f64, write_f64, "floating point number", [], [], [], []);
+
+macro_rules! module {
+ ($name:ident, $trait:ident, $endianness_str:expr) => {
+ /// Numeric primitives stored in
+ #[doc = $endianness_str]
+ /// byte order.
+ pub mod $name {
+ use byteorder::$trait;
+
+ module!(@ty U16, $trait, "16-bit unsigned integer", $endianness_str);
+ module!(@ty U32, $trait, "32-bit unsigned integer", $endianness_str);
+ module!(@ty U64, $trait, "64-bit unsigned integer", $endianness_str);
+ module!(@ty U128, $trait, "128-bit unsigned integer", $endianness_str);
+ module!(@ty I16, $trait, "16-bit signed integer", $endianness_str);
+ module!(@ty I32, $trait, "32-bit signed integer", $endianness_str);
+ module!(@ty I64, $trait, "64-bit signed integer", $endianness_str);
+ module!(@ty I128, $trait, "128-bit signed integer", $endianness_str);
+ module!(@ty F32, $trait, "32-bit floating point number", $endianness_str);
+ module!(@ty F64, $trait, "64-bit floating point number", $endianness_str);
+ }
+ };
+ (@ty $ty:ident, $trait:ident, $desc_str:expr, $endianness_str:expr) => {
+ /// A
+ #[doc = $desc_str]
+ /// stored in
+ #[doc = $endianness_str]
+ /// byte order.
+ pub type $ty = crate::byteorder::$ty<$trait>;
+ };
+}
+
+module!(big_endian, BigEndian, "big-endian");
+module!(little_endian, LittleEndian, "little-endian");
+module!(network_endian, NetworkEndian, "network-endian");
+module!(native_endian, NativeEndian, "native-endian");
+
+#[cfg(any(test, kani))]
+mod tests {
+ use ::byteorder::NativeEndian;
+
+ use {
+ super::*,
+ crate::{AsBytes, FromBytes, Unaligned},
+ };
+
+ #[cfg(not(kani))]
+ mod compatibility {
+ pub(super) use rand::{
+ distributions::{Distribution, Standard},
+ rngs::SmallRng,
+ Rng, SeedableRng,
+ };
+
+ pub(crate) trait Arbitrary {}
+
+ impl<T> Arbitrary for T {}
+ }
+
+ #[cfg(kani)]
+ mod compatibility {
+ pub(crate) use kani::Arbitrary;
+
+ pub(crate) struct SmallRng;
+
+ impl SmallRng {
+ pub(crate) fn seed_from_u64(_state: u64) -> Self {
+ Self
+ }
+ }
+
+ pub(crate) trait Rng {
+ fn sample<T, D: Distribution<T>>(&mut self, _distr: D) -> T
+ where
+ T: Arbitrary,
+ {
+ kani::any()
+ }
+ }
+
+ impl Rng for SmallRng {}
+
+ pub(crate) trait Distribution<T> {}
+ impl<T, U> Distribution<T> for U {}
+
+ pub(crate) struct Standard;
+ }
+
+ use compatibility::*;
+
+ // A native integer type (u16, i32, etc).
+ trait Native: Arbitrary + FromBytes + AsBytes + Copy + PartialEq + Debug {
+ const ZERO: Self;
+ const MAX_VALUE: Self;
+
+ type Distribution: Distribution<Self>;
+ const DIST: Self::Distribution;
+
+ fn rand<R: Rng>(rng: &mut R) -> Self {
+ rng.sample(Self::DIST)
+ }
+
+ #[cfg(kani)]
+ fn any() -> Self {
+ kani::any()
+ }
+
+ fn checked_add(self, rhs: Self) -> Option<Self>;
+ fn checked_div(self, rhs: Self) -> Option<Self>;
+ fn checked_mul(self, rhs: Self) -> Option<Self>;
+ fn checked_rem(self, rhs: Self) -> Option<Self>;
+ fn checked_sub(self, rhs: Self) -> Option<Self>;
+ fn checked_shl(self, rhs: Self) -> Option<Self>;
+ fn checked_shr(self, rhs: Self) -> Option<Self>;
+
+ fn is_nan(self) -> bool;
+
+ /// For `f32` and `f64`, NaN values are not considered equal to
+ /// themselves. This method is like `assert_eq!`, but it treats NaN
+ /// values as equal.
+ fn assert_eq_or_nan(self, other: Self) {
+ let slf = (!self.is_nan()).then(|| self);
+ let other = (!other.is_nan()).then(|| other);
+ assert_eq!(slf, other);
+ }
+ }
+
+ trait ByteArray:
+ FromBytes + AsBytes + Copy + AsRef<[u8]> + AsMut<[u8]> + Debug + Default + Eq
+ {
+ /// Invert the order of the bytes in the array.
+ fn invert(self) -> Self;
+ }
+
+ trait ByteOrderType: FromBytes + AsBytes + Unaligned + Copy + Eq + Debug {
+ type Native: Native;
+ type ByteArray: ByteArray;
+
+ const ZERO: Self;
+
+ fn new(native: Self::Native) -> Self;
+ fn get(self) -> Self::Native;
+ fn set(&mut self, native: Self::Native);
+ fn from_bytes(bytes: Self::ByteArray) -> Self;
+ fn into_bytes(self) -> Self::ByteArray;
+
+ /// For `f32` and `f64`, NaN values are not considered equal to
+ /// themselves. This method is like `assert_eq!`, but it treats NaN
+ /// values as equal.
+ fn assert_eq_or_nan(self, other: Self) {
+ let slf = (!self.get().is_nan()).then(|| self);
+ let other = (!other.get().is_nan()).then(|| other);
+ assert_eq!(slf, other);
+ }
+ }
+
+ trait ByteOrderTypeUnsigned: ByteOrderType {
+ const MAX_VALUE: Self;
+ }
+
+ macro_rules! impl_byte_array {
+ ($bytes:expr) => {
+ impl ByteArray for [u8; $bytes] {
+ fn invert(mut self) -> [u8; $bytes] {
+ self.reverse();
+ self
+ }
+ }
+ };
+ }
+
+ impl_byte_array!(2);
+ impl_byte_array!(4);
+ impl_byte_array!(8);
+ impl_byte_array!(16);
+
+ macro_rules! impl_byte_order_type_unsigned {
+ ($name:ident, unsigned) => {
+ impl<O: ByteOrder> ByteOrderTypeUnsigned for $name<O> {
+ const MAX_VALUE: $name<O> = $name::MAX_VALUE;
+ }
+ };
+ ($name:ident, signed) => {};
+ }
+
+ macro_rules! impl_traits {
+ ($name:ident, $native:ident, $bytes:expr, $sign:ident $(, @$float:ident)?) => {
+ impl Native for $native {
+ // For some types, `0 as $native` is required (for example, when
+ // `$native` is a floating-point type; `0` is an integer), but
+ // for other types, it's a trivial cast. In all cases, Clippy
+ // thinks it's dangerous.
+ #[allow(trivial_numeric_casts, clippy::as_conversions)]
+ const ZERO: $native = 0 as $native;
+ const MAX_VALUE: $native = $native::MAX;
+
+ type Distribution = Standard;
+ const DIST: Standard = Standard;
+
+ impl_traits!(@float_dependent_methods $(@$float)?);
+ }
+
+ impl<O: ByteOrder> ByteOrderType for $name<O> {
+ type Native = $native;
+ type ByteArray = [u8; $bytes];
+
+ const ZERO: $name<O> = $name::ZERO;
+
+ fn new(native: $native) -> $name<O> {
+ $name::new(native)
+ }
+
+ fn get(self) -> $native {
+ $name::get(self)
+ }
+
+ fn set(&mut self, native: $native) {
+ $name::set(self, native)
+ }
+
+ fn from_bytes(bytes: [u8; $bytes]) -> $name<O> {
+ $name::from(bytes)
+ }
+
+ fn into_bytes(self) -> [u8; $bytes] {
+ <[u8; $bytes]>::from(self)
+ }
+ }
+
+ impl_byte_order_type_unsigned!($name, $sign);
+ };
+ (@float_dependent_methods) => {
+ fn checked_add(self, rhs: Self) -> Option<Self> { self.checked_add(rhs) }
+ fn checked_div(self, rhs: Self) -> Option<Self> { self.checked_div(rhs) }
+ fn checked_mul(self, rhs: Self) -> Option<Self> { self.checked_mul(rhs) }
+ fn checked_rem(self, rhs: Self) -> Option<Self> { self.checked_rem(rhs) }
+ fn checked_sub(self, rhs: Self) -> Option<Self> { self.checked_sub(rhs) }
+ fn checked_shl(self, rhs: Self) -> Option<Self> { self.checked_shl(rhs.try_into().unwrap_or(u32::MAX)) }
+ fn checked_shr(self, rhs: Self) -> Option<Self> { self.checked_shr(rhs.try_into().unwrap_or(u32::MAX)) }
+ fn is_nan(self) -> bool { false }
+ };
+ (@float_dependent_methods @float) => {
+ fn checked_add(self, rhs: Self) -> Option<Self> { Some(self + rhs) }
+ fn checked_div(self, rhs: Self) -> Option<Self> { Some(self / rhs) }
+ fn checked_mul(self, rhs: Self) -> Option<Self> { Some(self * rhs) }
+ fn checked_rem(self, rhs: Self) -> Option<Self> { Some(self % rhs) }
+ fn checked_sub(self, rhs: Self) -> Option<Self> { Some(self - rhs) }
+ fn checked_shl(self, _rhs: Self) -> Option<Self> { unimplemented!() }
+ fn checked_shr(self, _rhs: Self) -> Option<Self> { unimplemented!() }
+ fn is_nan(self) -> bool { self.is_nan() }
+ };
+ }
+
+ impl_traits!(U16, u16, 2, unsigned);
+ impl_traits!(U32, u32, 4, unsigned);
+ impl_traits!(U64, u64, 8, unsigned);
+ impl_traits!(U128, u128, 16, unsigned);
+ impl_traits!(I16, i16, 2, signed);
+ impl_traits!(I32, i32, 4, signed);
+ impl_traits!(I64, i64, 8, signed);
+ impl_traits!(I128, i128, 16, signed);
+ impl_traits!(F32, f32, 4, signed, @float);
+ impl_traits!(F64, f64, 8, signed, @float);
+
+ macro_rules! call_for_unsigned_types {
+ ($fn:ident, $byteorder:ident) => {
+ $fn::<U16<$byteorder>>();
+ $fn::<U32<$byteorder>>();
+ $fn::<U64<$byteorder>>();
+ $fn::<U128<$byteorder>>();
+ };
+ }
+
+ macro_rules! call_for_signed_types {
+ ($fn:ident, $byteorder:ident) => {
+ $fn::<I16<$byteorder>>();
+ $fn::<I32<$byteorder>>();
+ $fn::<I64<$byteorder>>();
+ $fn::<I128<$byteorder>>();
+ };
+ }
+
+ macro_rules! call_for_float_types {
+ ($fn:ident, $byteorder:ident) => {
+ $fn::<F32<$byteorder>>();
+ $fn::<F64<$byteorder>>();
+ };
+ }
+
+ macro_rules! call_for_all_types {
+ ($fn:ident, $byteorder:ident) => {
+ call_for_unsigned_types!($fn, $byteorder);
+ call_for_signed_types!($fn, $byteorder);
+ call_for_float_types!($fn, $byteorder);
+ };
+ }
+
+ #[cfg(target_endian = "big")]
+ type NonNativeEndian = LittleEndian;
+ #[cfg(target_endian = "little")]
+ type NonNativeEndian = BigEndian;
+
+ // We use a `u64` seed so that we can use `SeedableRng::seed_from_u64`.
+ // `SmallRng`'s `SeedableRng::Seed` differs by platform, so if we wanted to
+ // call `SeedableRng::from_seed`, which takes a `Seed`, we would need
+ // conditional compilation by `target_pointer_width`.
+ const RNG_SEED: u64 = 0x7A03CAE2F32B5B8F;
+
+ const RAND_ITERS: usize = if cfg!(any(miri, kani)) {
+ // The tests below which use this constant used to take a very long time
+ // on Miri, which slows down local development and CI jobs. We're not
+ // using Miri to check for the correctness of our code, but rather its
+ // soundness, and at least in the context of these particular tests, a
+ // single loop iteration is just as good for surfacing UB as multiple
+ // iterations are.
+ //
+ // As of the writing of this comment, here's one set of measurements:
+ //
+ // $ # RAND_ITERS == 1
+ // $ cargo miri test -- -Z unstable-options --report-time endian
+ // test byteorder::tests::test_native_endian ... ok <0.049s>
+ // test byteorder::tests::test_non_native_endian ... ok <0.061s>
+ //
+ // $ # RAND_ITERS == 1024
+ // $ cargo miri test -- -Z unstable-options --report-time endian
+ // test byteorder::tests::test_native_endian ... ok <25.716s>
+ // test byteorder::tests::test_non_native_endian ... ok <38.127s>
+ 1
+ } else {
+ 1024
+ };
+
+ #[cfg_attr(test, test)]
+ #[cfg_attr(kani, kani::proof)]
+ fn test_zero() {
+ fn test_zero<T: ByteOrderType>() {
+ assert_eq!(T::ZERO.get(), T::Native::ZERO);
+ }
+
+ call_for_all_types!(test_zero, NativeEndian);
+ call_for_all_types!(test_zero, NonNativeEndian);
+ }
+
+ #[cfg_attr(test, test)]
+ #[cfg_attr(kani, kani::proof)]
+ fn test_max_value() {
+ fn test_max_value<T: ByteOrderTypeUnsigned>() {
+ assert_eq!(T::MAX_VALUE.get(), T::Native::MAX_VALUE);
+ }
+
+ call_for_unsigned_types!(test_max_value, NativeEndian);
+ call_for_unsigned_types!(test_max_value, NonNativeEndian);
+ }
+
+ #[cfg_attr(test, test)]
+ #[cfg_attr(kani, kani::proof)]
+ fn test_endian() {
+ fn test<T: ByteOrderType>(invert: bool) {
+ let mut r = SmallRng::seed_from_u64(RNG_SEED);
+ for _ in 0..RAND_ITERS {
+ let native = T::Native::rand(&mut r);
+ let mut bytes = T::ByteArray::default();
+ bytes.as_bytes_mut().copy_from_slice(native.as_bytes());
+ if invert {
+ bytes = bytes.invert();
+ }
+ let mut from_native = T::new(native);
+ let from_bytes = T::from_bytes(bytes);
+
+ from_native.assert_eq_or_nan(from_bytes);
+ from_native.get().assert_eq_or_nan(native);
+ from_bytes.get().assert_eq_or_nan(native);
+
+ assert_eq!(from_native.into_bytes(), bytes);
+ assert_eq!(from_bytes.into_bytes(), bytes);
+
+ let updated = T::Native::rand(&mut r);
+ from_native.set(updated);
+ from_native.get().assert_eq_or_nan(updated);
+ }
+ }
+
+ fn test_native<T: ByteOrderType>() {
+ test::<T>(false);
+ }
+
+ fn test_non_native<T: ByteOrderType>() {
+ test::<T>(true);
+ }
+
+ call_for_all_types!(test_native, NativeEndian);
+ call_for_all_types!(test_non_native, NonNativeEndian);
+ }
+
+ #[test]
+ fn test_ops_impls() {
+ // Test implementations of traits in `core::ops`. Some of these are
+ // fairly banal, but some are optimized to perform the operation without
+ // swapping byte order (namely, bit-wise operations which are identical
+ // regardless of byte order). These are important to test, and while
+ // we're testing those anyway, it's trivial to test all of the impls.
+
+ fn test<T, F, G, H>(op: F, op_native: G, op_native_checked: Option<H>)
+ where
+ T: ByteOrderType,
+ F: Fn(T, T) -> T,
+ G: Fn(T::Native, T::Native) -> T::Native,
+ H: Fn(T::Native, T::Native) -> Option<T::Native>,
+ {
+ let mut r = SmallRng::seed_from_u64(RNG_SEED);
+ for _ in 0..RAND_ITERS {
+ let n0 = T::Native::rand(&mut r);
+ let n1 = T::Native::rand(&mut r);
+ let t0 = T::new(n0);
+ let t1 = T::new(n1);
+
+ // If this operation would overflow/underflow, skip it rather
+ // than attempt to catch and recover from panics.
+ if matches!(&op_native_checked, Some(checked) if checked(n0, n1).is_none()) {
+ continue;
+ }
+
+ let n_res = op_native(n0, n1);
+ let t_res = op(t0, t1);
+
+ // For `f32` and `f64`, NaN values are not considered equal to
+ // themselves. We store `Option<f32>`/`Option<f64>` and store
+ // NaN as `None` so they can still be compared.
+ let n_res = (!T::Native::is_nan(n_res)).then(|| n_res);
+ let t_res = (!T::Native::is_nan(t_res.get())).then(|| t_res.get());
+ assert_eq!(n_res, t_res);
+ }
+ }
+
+ macro_rules! test {
+ (@binary $trait:ident, $method:ident $([$checked_method:ident])?, $($call_for_macros:ident),*) => {{
+ test!(
+ @inner $trait,
+ core::ops::$trait::$method,
+ core::ops::$trait::$method,
+ {
+ #[allow(unused_mut, unused_assignments)]
+ let mut op_native_checked = None::<fn(T::Native, T::Native) -> Option<T::Native>>;
+ $(
+ op_native_checked = Some(T::Native::$checked_method);
+ )?
+ op_native_checked
+ },
+ $($call_for_macros),*
+ );
+ }};
+ (@unary $trait:ident, $method:ident $([$checked_method:ident])?, $($call_for_macros:ident),*) => {{
+ test!(
+ @inner $trait,
+ |slf, _rhs| core::ops::$trait::$method(slf),
+ |slf, _rhs| core::ops::$trait::$method(slf),
+ {
+ #[allow(unused_mut, unused_assignments)]
+ let mut op_native_checked = None::<fn(T::Native, T::Native) -> Option<T::Native>>;
+ $(
+ op_native_checked = Some(|slf, _rhs| T::Native::$checked_method(slf));
+ )?
+ op_native_checked
+ },
+ $($call_for_macros),*
+ );
+ }};
+ (@inner $trait:ident, $op:expr, $op_native:expr, $op_native_checked:expr, $($call_for_macros:ident),*) => {{
+ fn t<T: ByteOrderType + core::ops::$trait<Output = T>>()
+ where
+ T::Native: core::ops::$trait<Output = T::Native>,
+ {
+ test::<T, _, _, _>(
+ $op,
+ $op_native,
+ $op_native_checked,
+ );
+ }
+
+ $(
+ $call_for_macros!(t, NativeEndian);
+ $call_for_macros!(t, NonNativeEndian);
+ )*
+ }};
+ }
+
+ test!(@binary Add, add[checked_add], call_for_all_types);
+ test!(@binary Div, div[checked_div], call_for_all_types);
+ test!(@binary Mul, mul[checked_mul], call_for_all_types);
+ test!(@binary Rem, rem[checked_rem], call_for_all_types);
+ test!(@binary Sub, sub[checked_sub], call_for_all_types);
+
+ test!(@binary BitAnd, bitand, call_for_unsigned_types, call_for_signed_types);
+ test!(@binary BitOr, bitor, call_for_unsigned_types, call_for_signed_types);
+ test!(@binary BitXor, bitxor, call_for_unsigned_types, call_for_signed_types);
+ test!(@binary Shl, shl[checked_shl], call_for_unsigned_types, call_for_signed_types);
+ test!(@binary Shr, shr[checked_shr], call_for_unsigned_types, call_for_signed_types);
+
+ test!(@unary Not, not, call_for_signed_types, call_for_unsigned_types);
+ test!(@unary Neg, neg, call_for_signed_types, call_for_float_types);
+ }
+
+ #[test]
+ fn test_debug_impl() {
+ // Ensure that Debug applies format options to the inner value.
+ let val = U16::<LE>::new(10);
+ assert_eq!(format!("{:?}", val), "U16(10)");
+ assert_eq!(format!("{:03?}", val), "U16(010)");
+ assert_eq!(format!("{:x?}", val), "U16(a)");
+ }
+}
diff --git a/third_party/rust/zerocopy/src/lib.rs b/third_party/rust/zerocopy/src/lib.rs
new file mode 100644
index 0000000000..1e826439ff
--- /dev/null
+++ b/third_party/rust/zerocopy/src/lib.rs
@@ -0,0 +1,8256 @@
+// Copyright 2018 The Fuchsia Authors
+//
+// Licensed under the 2-Clause BSD License <LICENSE-BSD or
+// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
+// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
+// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
+// This file may not be copied, modified, or distributed except according to
+// those terms.
+
+// After updating the following doc comment, make sure to run the following
+// command to update `README.md` based on its contents:
+//
+// ./generate-readme.sh > README.md
+
+//! *<span style="font-size: 100%; color:grey;">Want to help improve zerocopy?
+//! Fill out our [user survey][user-survey]!</span>*
+//!
+//! ***<span style="font-size: 140%">Fast, safe, <span
+//! style="color:red;">compile error</span>. Pick two.</span>***
+//!
+//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
+//! so you don't have to.
+//!
+//! # Overview
+//!
+//! Zerocopy provides four core marker traits, each of which can be derived
+//! (e.g., `#[derive(FromZeroes)]`):
+//! - [`FromZeroes`] indicates that a sequence of zero bytes represents a valid
+//! instance of a type
+//! - [`FromBytes`] indicates that a type may safely be converted from an
+//! arbitrary byte sequence
+//! - [`AsBytes`] indicates that a type may safely be converted *to* a byte
+//! sequence
+//! - [`Unaligned`] indicates that a type's alignment requirement is 1
+//!
+//! Types which implement a subset of these traits can then be converted to/from
+//! byte sequences with little to no runtime overhead.
+//!
+//! Zerocopy also provides byte-order aware integer types that support these
+//! conversions; see the [`byteorder`] module. These types are especially useful
+//! for network parsing.
+//!
+//! [user-survey]: https://docs.google.com/forms/d/e/1FAIpQLSdzBNTN9tzwsmtyZxRFNL02K36IWCdHWW2ZBckyQS2xiO3i8Q/viewform?usp=published_options
+//!
+//! # Cargo Features
+//!
+//! - **`alloc`**
+//! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
+//! the `alloc` crate is added as a dependency, and some allocation-related
+//! functionality is added.
+//!
+//! - **`byteorder`** (enabled by default)
+//! Adds the [`byteorder`] module and a dependency on the `byteorder` crate.
+//! The `byteorder` module provides byte order-aware equivalents of the
+//! multi-byte primitive numerical types. Unlike their primitive equivalents,
+//! the types in this module have no alignment requirement and support byte
+//! order conversions. This can be useful in handling file formats, network
+//! packet layouts, etc which don't provide alignment guarantees and which may
+//! use a byte order different from that of the execution platform.
+//!
+//! - **`derive`**
+//! Provides derives for the core marker traits via the `zerocopy-derive`
+//! crate. These derives are re-exported from `zerocopy`, so it is not
+//! necessary to depend on `zerocopy-derive` directly.
+//!
+//! However, you may experience better compile times if you instead directly
+//! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
+//! since doing so will allow Rust to compile these crates in parallel. To do
+//! so, do *not* enable the `derive` feature, and list both dependencies in
+//! your `Cargo.toml` with the same leading non-zero version number; e.g:
+//!
+//! ```toml
+//! [dependencies]
+//! zerocopy = "0.X"
+//! zerocopy-derive = "0.X"
+//! ```
+//!
+//! - **`simd`**
+//! When the `simd` feature is enabled, `FromZeroes`, `FromBytes`, and
+//! `AsBytes` impls are emitted for all stable SIMD types which exist on the
+//! target platform. Note that the layout of SIMD types is not yet stabilized,
+//! so these impls may be removed in the future if layout changes make them
+//! invalid. For more information, see the Unsafe Code Guidelines Reference
+//! page on the [layout of packed SIMD vectors][simd-layout].
+//!
+//! - **`simd-nightly`**
+//! Enables the `simd` feature and adds support for SIMD types which are only
+//! available on nightly. Since these types are unstable, support for any type
+//! may be removed at any point in the future.
+//!
+//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
+//!
+//! # Security Ethos
+//!
+//! Zerocopy is expressly designed for use in security-critical contexts. We
+//! strive to ensure that that zerocopy code is sound under Rust's current
+//! memory model, and *any future memory model*. We ensure this by:
+//! - **...not 'guessing' about Rust's semantics.**
+//! We annotate `unsafe` code with a precise rationale for its soundness that
+//! cites a relevant section of Rust's official documentation. When Rust's
+//! documented semantics are unclear, we work with the Rust Operational
+//! Semantics Team to clarify Rust's documentation.
+//! - **...rigorously testing our implementation.**
+//! We run tests using [Miri], ensuring that zerocopy is sound across a wide
+//! array of supported target platforms of varying endianness and pointer
+//! width, and across both current and experimental memory models of Rust.
+//! - **...formally proving the correctness of our implementation.**
+//! We apply formal verification tools like [Kani][kani] to prove zerocopy's
+//! correctness.
+//!
+//! For more information, see our full [soundness policy].
+//!
+//! [Miri]: https://github.com/rust-lang/miri
+//! [Kani]: https://github.com/model-checking/kani
+//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
+//!
+//! # Relationship to Project Safe Transmute
+//!
+//! [Project Safe Transmute] is an official initiative of the Rust Project to
+//! develop language-level support for safer transmutation. The Project consults
+//! with crates like zerocopy to identify aspects of safer transmutation that
+//! would benefit from compiler support, and has developed an [experimental,
+//! compiler-supported analysis][mcp-transmutability] which determines whether,
+//! for a given type, any value of that type may be soundly transmuted into
+//! another type. Once this functionality is sufficiently mature, zerocopy
+//! intends to replace its internal transmutability analysis (implemented by our
+//! custom derives) with the compiler-supported one. This change will likely be
+//! an implementation detail that is invisible to zerocopy's users.
+//!
+//! Project Safe Transmute will not replace the need for most of zerocopy's
+//! higher-level abstractions. The experimental compiler analysis is a tool for
+//! checking the soundness of `unsafe` code, not a tool to avoid writing
+//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
+//! will still be required in order to provide higher-level abstractions on top
+//! of the building block provided by Project Safe Transmute.
+//!
+//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
+//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
+//!
+//! # MSRV
+//!
+//! See our [MSRV policy].
+//!
+//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
+//!
+//! # Changelog
+//!
+//! Zerocopy uses [GitHub Releases].
+//!
+//! [GitHub Releases]: https://github.com/google/zerocopy/releases
+
+// Sometimes we want to use lints which were added after our MSRV.
+// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
+// this attribute, any unknown lint would cause a CI failure when testing with
+// our MSRV.
+#![allow(unknown_lints)]
+#![deny(renamed_and_removed_lints)]
+#![deny(
+ anonymous_parameters,
+ deprecated_in_future,
+ illegal_floating_point_literal_pattern,
+ late_bound_lifetime_arguments,
+ missing_copy_implementations,
+ missing_debug_implementations,
+ missing_docs,
+ path_statements,
+ patterns_in_fns_without_body,
+ rust_2018_idioms,
+ trivial_numeric_casts,
+ unreachable_pub,
+ unsafe_op_in_unsafe_fn,
+ unused_extern_crates,
+ unused_qualifications,
+ variant_size_differences
+)]
+#![cfg_attr(
+ __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
+ deny(fuzzy_provenance_casts, lossy_provenance_casts)
+)]
+#![deny(
+ clippy::all,
+ clippy::alloc_instead_of_core,
+ clippy::arithmetic_side_effects,
+ clippy::as_underscore,
+ clippy::assertions_on_result_states,
+ clippy::as_conversions,
+ clippy::correctness,
+ clippy::dbg_macro,
+ clippy::decimal_literal_representation,
+ clippy::get_unwrap,
+ clippy::indexing_slicing,
+ clippy::missing_inline_in_public_items,
+ clippy::missing_safety_doc,
+ clippy::obfuscated_if_else,
+ clippy::perf,
+ clippy::print_stdout,
+ clippy::std_instead_of_core,
+ clippy::style,
+ clippy::suspicious,
+ clippy::todo,
+ clippy::undocumented_unsafe_blocks,
+ clippy::unimplemented,
+ clippy::unnested_or_patterns,
+ clippy::unwrap_used,
+ clippy::use_debug
+)]
+#![deny(
+ rustdoc::bare_urls,
+ rustdoc::broken_intra_doc_links,
+ rustdoc::invalid_codeblock_attributes,
+ rustdoc::invalid_html_tags,
+ rustdoc::invalid_rust_codeblocks,
+ rustdoc::missing_crate_level_docs,
+ rustdoc::private_intra_doc_links
+)]
+// In test code, it makes sense to weight more heavily towards concise, readable
+// code over correct or debuggable code.
+#![cfg_attr(any(test, kani), allow(
+ // In tests, you get line numbers and have access to source code, so panic
+ // messages are less important. You also often unwrap a lot, which would
+ // make expect'ing instead very verbose.
+ clippy::unwrap_used,
+ // In tests, there's no harm to "panic risks" - the worst that can happen is
+ // that your test will fail, and you'll fix it. By contrast, panic risks in
+ // production code introduce the possibly of code panicking unexpectedly "in
+ // the field".
+ clippy::arithmetic_side_effects,
+ clippy::indexing_slicing,
+))]
+#![cfg_attr(not(test), no_std)]
+#![cfg_attr(feature = "simd-nightly", feature(stdsimd))]
+#![cfg_attr(doc_cfg, feature(doc_cfg))]
+#![cfg_attr(
+ __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
+ feature(layout_for_ptr, strict_provenance)
+)]
+
+// This is a hack to allow zerocopy-derive derives to work in this crate. They
+// assume that zerocopy is linked as an extern crate, so they access items from
+// it as `zerocopy::Xxx`. This makes that still work.
+#[cfg(any(feature = "derive", test))]
+extern crate self as zerocopy;
+
+#[macro_use]
+mod macros;
+
+#[cfg(feature = "byteorder")]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
+pub mod byteorder;
+#[doc(hidden)]
+pub mod macro_util;
+mod post_monomorphization_compile_fail_tests;
+mod util;
+// TODO(#252): If we make this pub, come up with a better name.
+mod wrappers;
+
+#[cfg(feature = "byteorder")]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
+pub use crate::byteorder::*;
+pub use crate::wrappers::*;
+
+#[cfg(any(feature = "derive", test))]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
+pub use zerocopy_derive::Unaligned;
+
+// `pub use` separately here so that we can mark it `#[doc(hidden)]`.
+//
+// TODO(#29): Remove this or add a doc comment.
+#[cfg(any(feature = "derive", test))]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
+#[doc(hidden)]
+pub use zerocopy_derive::KnownLayout;
+
+use core::{
+ cell::{self, RefMut},
+ cmp::Ordering,
+ fmt::{self, Debug, Display, Formatter},
+ hash::Hasher,
+ marker::PhantomData,
+ mem::{self, ManuallyDrop, MaybeUninit},
+ num::{
+ NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
+ NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
+ },
+ ops::{Deref, DerefMut},
+ ptr::{self, NonNull},
+ slice,
+};
+
+#[cfg(feature = "alloc")]
+extern crate alloc;
+#[cfg(feature = "alloc")]
+use alloc::{boxed::Box, vec::Vec};
+
+#[cfg(any(feature = "alloc", kani))]
+use core::alloc::Layout;
+
+// Used by `TryFromBytes::is_bit_valid`.
+#[doc(hidden)]
+pub use crate::util::ptr::Ptr;
+
+// For each polyfill, as soon as the corresponding feature is stable, the
+// polyfill import will be unused because method/function resolution will prefer
+// the inherent method/function over a trait method/function. Thus, we suppress
+// the `unused_imports` warning.
+//
+// See the documentation on `util::polyfills` for more information.
+#[allow(unused_imports)]
+use crate::util::polyfills::NonNullExt as _;
+
+#[rustversion::nightly]
+#[cfg(all(test, not(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)))]
+const _: () = {
+ #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS\""]
+ const _WARNING: () = ();
+ #[warn(deprecated)]
+ _WARNING
+};
+
+/// The target pointer width, counted in bits.
+const POINTER_WIDTH_BITS: usize = mem::size_of::<usize>() * 8;
+
+/// The layout of a type which might be dynamically-sized.
+///
+/// `DstLayout` describes the layout of sized types, slice types, and "slice
+/// DSTs" - ie, those that are known by the type system to have a trailing slice
+/// (as distinguished from `dyn Trait` types - such types *might* have a
+/// trailing slice type, but the type system isn't aware of it).
+///
+/// # Safety
+///
+/// Unlike [`core::alloc::Layout`], `DstLayout` is only used to describe full
+/// Rust types - ie, those that satisfy the layout requirements outlined by
+/// [the reference]. Callers may assume that an instance of `DstLayout`
+/// satisfies any conditions imposed on Rust types by the reference.
+///
+/// If `layout: DstLayout` describes a type, `T`, then it is guaranteed that:
+/// - `layout.align` is equal to `T`'s alignment
+/// - If `layout.size_info` is `SizeInfo::Sized { size }`, then `T: Sized` and
+/// `size_of::<T>() == size`
+/// - If `layout.size_info` is `SizeInfo::SliceDst(slice_layout)`, then
+/// - `T` is a slice DST
+/// - The `size` of an instance of `T` with `elems` trailing slice elements is
+/// equal to `slice_layout.offset + slice_layout.elem_size * elems` rounded up
+/// to the nearest multiple of `layout.align`. Any bytes in the range
+/// `[slice_layout.offset + slice_layout.elem_size * elems, size)` are padding
+/// and must not be assumed to be initialized.
+///
+/// [the reference]: https://doc.rust-lang.org/reference/type-layout.html
+#[doc(hidden)]
+#[allow(missing_debug_implementations, missing_copy_implementations)]
+#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
+pub struct DstLayout {
+ align: NonZeroUsize,
+ size_info: SizeInfo,
+}
+
+#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
+enum SizeInfo<E = usize> {
+ Sized { _size: usize },
+ SliceDst(TrailingSliceLayout<E>),
+}
+
+#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
+struct TrailingSliceLayout<E = usize> {
+ // The offset of the first byte of the trailing slice field. Note that this
+ // is NOT the same as the minimum size of the type. For example, consider
+ // the following type:
+ //
+ // struct Foo {
+ // a: u16,
+ // b: u8,
+ // c: [u8],
+ // }
+ //
+ // In `Foo`, `c` is at byte offset 3. When `c.len() == 0`, `c` is followed
+ // by a padding byte.
+ _offset: usize,
+ // The size of the element type of the trailing slice field.
+ _elem_size: E,
+}
+
+impl SizeInfo {
+ /// Attempts to create a `SizeInfo` from `Self` in which `elem_size` is a
+ /// `NonZeroUsize`. If `elem_size` is 0, returns `None`.
+ #[allow(unused)]
+ const fn try_to_nonzero_elem_size(&self) -> Option<SizeInfo<NonZeroUsize>> {
+ Some(match *self {
+ SizeInfo::Sized { _size } => SizeInfo::Sized { _size },
+ SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => {
+ if let Some(_elem_size) = NonZeroUsize::new(_elem_size) {
+ SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
+ } else {
+ return None;
+ }
+ }
+ })
+ }
+}
+
+#[doc(hidden)]
+#[derive(Copy, Clone)]
+#[cfg_attr(test, derive(Debug))]
+#[allow(missing_debug_implementations)]
+pub enum _CastType {
+ _Prefix,
+ _Suffix,
+}
+
+impl DstLayout {
+ /// The minimum possible alignment of a type.
+ const MIN_ALIGN: NonZeroUsize = match NonZeroUsize::new(1) {
+ Some(min_align) => min_align,
+ None => unreachable!(),
+ };
+
+ /// The maximum theoretic possible alignment of a type.
+ ///
+ /// For compatibility with future Rust versions, this is defined as the
+ /// maximum power-of-two that fits into a `usize`. See also
+ /// [`DstLayout::CURRENT_MAX_ALIGN`].
+ const THEORETICAL_MAX_ALIGN: NonZeroUsize =
+ match NonZeroUsize::new(1 << (POINTER_WIDTH_BITS - 1)) {
+ Some(max_align) => max_align,
+ None => unreachable!(),
+ };
+
+ /// The current, documented max alignment of a type \[1\].
+ ///
+ /// \[1\] Per <https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers>:
+ ///
+ /// The alignment value must be a power of two from 1 up to
+ /// 2<sup>29</sup>.
+ #[cfg(not(kani))]
+ const CURRENT_MAX_ALIGN: NonZeroUsize = match NonZeroUsize::new(1 << 28) {
+ Some(max_align) => max_align,
+ None => unreachable!(),
+ };
+
+ /// Constructs a `DstLayout` for a zero-sized type with `repr_align`
+ /// alignment (or 1). If `repr_align` is provided, then it must be a power
+ /// of two.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the supplied `repr_align` is not a power of two.
+ ///
+ /// # Safety
+ ///
+ /// Unsafe code may assume that the contract of this function is satisfied.
+ #[doc(hidden)]
+ #[inline]
+ pub const fn new_zst(repr_align: Option<NonZeroUsize>) -> DstLayout {
+ let align = match repr_align {
+ Some(align) => align,
+ None => Self::MIN_ALIGN,
+ };
+
+ assert!(align.is_power_of_two());
+
+ DstLayout { align, size_info: SizeInfo::Sized { _size: 0 } }
+ }
+
+ /// Constructs a `DstLayout` which describes `T`.
+ ///
+ /// # Safety
+ ///
+ /// Unsafe code may assume that `DstLayout` is the correct layout for `T`.
+ #[doc(hidden)]
+ #[inline]
+ pub const fn for_type<T>() -> DstLayout {
+ // SAFETY: `align` is correct by construction. `T: Sized`, and so it is
+ // sound to initialize `size_info` to `SizeInfo::Sized { size }`; the
+ // `size` field is also correct by construction.
+ DstLayout {
+ align: match NonZeroUsize::new(mem::align_of::<T>()) {
+ Some(align) => align,
+ None => unreachable!(),
+ },
+ size_info: SizeInfo::Sized { _size: mem::size_of::<T>() },
+ }
+ }
+
+ /// Constructs a `DstLayout` which describes `[T]`.
+ ///
+ /// # Safety
+ ///
+ /// Unsafe code may assume that `DstLayout` is the correct layout for `[T]`.
+ const fn for_slice<T>() -> DstLayout {
+ // SAFETY: The alignment of a slice is equal to the alignment of its
+ // element type, and so `align` is initialized correctly.
+ //
+ // Since this is just a slice type, there is no offset between the
+ // beginning of the type and the beginning of the slice, so it is
+ // correct to set `offset: 0`. The `elem_size` is correct by
+ // construction. Since `[T]` is a (degenerate case of a) slice DST, it
+ // is correct to initialize `size_info` to `SizeInfo::SliceDst`.
+ DstLayout {
+ align: match NonZeroUsize::new(mem::align_of::<T>()) {
+ Some(align) => align,
+ None => unreachable!(),
+ },
+ size_info: SizeInfo::SliceDst(TrailingSliceLayout {
+ _offset: 0,
+ _elem_size: mem::size_of::<T>(),
+ }),
+ }
+ }
+
+ /// Like `Layout::extend`, this creates a layout that describes a record
+ /// whose layout consists of `self` followed by `next` that includes the
+ /// necessary inter-field padding, but not any trailing padding.
+ ///
+ /// In order to match the layout of a `#[repr(C)]` struct, this method
+ /// should be invoked for each field in declaration order. To add trailing
+ /// padding, call `DstLayout::pad_to_align` after extending the layout for
+ /// all fields. If `self` corresponds to a type marked with
+ /// `repr(packed(N))`, then `repr_packed` should be set to `Some(N)`,
+ /// otherwise `None`.
+ ///
+ /// This method cannot be used to match the layout of a record with the
+ /// default representation, as that representation is mostly unspecified.
+ ///
+ /// # Safety
+ ///
+ /// If a (potentially hypothetical) valid `repr(C)` Rust type begins with
+ /// fields whose layout are `self`, and those fields are immediately
+ /// followed by a field whose layout is `field`, then unsafe code may rely
+ /// on `self.extend(field, repr_packed)` producing a layout that correctly
+ /// encompasses those two components.
+ ///
+ /// We make no guarantees to the behavior of this method if these fragments
+ /// cannot appear in a valid Rust type (e.g., the concatenation of the
+ /// layouts would lead to a size larger than `isize::MAX`).
+ #[doc(hidden)]
+ #[inline]
+ pub const fn extend(self, field: DstLayout, repr_packed: Option<NonZeroUsize>) -> Self {
+ use util::{core_layout::padding_needed_for, max, min};
+
+ // If `repr_packed` is `None`, there are no alignment constraints, and
+ // the value can be defaulted to `THEORETICAL_MAX_ALIGN`.
+ let max_align = match repr_packed {
+ Some(max_align) => max_align,
+ None => Self::THEORETICAL_MAX_ALIGN,
+ };
+
+ assert!(max_align.is_power_of_two());
+
+ // We use Kani to prove that this method is robust to future increases
+ // in Rust's maximum allowed alignment. However, if such a change ever
+ // actually occurs, we'd like to be notified via assertion failures.
+ #[cfg(not(kani))]
+ {
+ debug_assert!(self.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
+ debug_assert!(field.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
+ if let Some(repr_packed) = repr_packed {
+ debug_assert!(repr_packed.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
+ }
+ }
+
+ // The field's alignment is clamped by `repr_packed` (i.e., the
+ // `repr(packed(N))` attribute, if any) [1].
+ //
+ // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
+ //
+ // The alignments of each field, for the purpose of positioning
+ // fields, is the smaller of the specified alignment and the alignment
+ // of the field's type.
+ let field_align = min(field.align, max_align);
+
+ // The struct's alignment is the maximum of its previous alignment and
+ // `field_align`.
+ let align = max(self.align, field_align);
+
+ let size_info = match self.size_info {
+ // If the layout is already a DST, we panic; DSTs cannot be extended
+ // with additional fields.
+ SizeInfo::SliceDst(..) => panic!("Cannot extend a DST with additional fields."),
+
+ SizeInfo::Sized { _size: preceding_size } => {
+ // Compute the minimum amount of inter-field padding needed to
+ // satisfy the field's alignment, and offset of the trailing
+ // field. [1]
+ //
+ // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
+ //
+ // Inter-field padding is guaranteed to be the minimum
+ // required in order to satisfy each field's (possibly
+ // altered) alignment.
+ let padding = padding_needed_for(preceding_size, field_align);
+
+ // This will not panic (and is proven to not panic, with Kani)
+ // if the layout components can correspond to a leading layout
+ // fragment of a valid Rust type, but may panic otherwise (e.g.,
+ // combining or aligning the components would create a size
+ // exceeding `isize::MAX`).
+ let offset = match preceding_size.checked_add(padding) {
+ Some(offset) => offset,
+ None => panic!("Adding padding to `self`'s size overflows `usize`."),
+ };
+
+ match field.size_info {
+ SizeInfo::Sized { _size: field_size } => {
+ // If the trailing field is sized, the resulting layout
+ // will be sized. Its size will be the sum of the
+ // preceeding layout, the size of the new field, and the
+ // size of inter-field padding between the two.
+ //
+ // This will not panic (and is proven with Kani to not
+ // panic) if the layout components can correspond to a
+ // leading layout fragment of a valid Rust type, but may
+ // panic otherwise (e.g., combining or aligning the
+ // components would create a size exceeding
+ // `usize::MAX`).
+ let size = match offset.checked_add(field_size) {
+ Some(size) => size,
+ None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
+ };
+ SizeInfo::Sized { _size: size }
+ }
+ SizeInfo::SliceDst(TrailingSliceLayout {
+ _offset: trailing_offset,
+ _elem_size,
+ }) => {
+ // If the trailing field is dynamically sized, so too
+ // will the resulting layout. The offset of the trailing
+ // slice component is the sum of the offset of the
+ // trailing field and the trailing slice offset within
+ // that field.
+ //
+ // This will not panic (and is proven with Kani to not
+ // panic) if the layout components can correspond to a
+ // leading layout fragment of a valid Rust type, but may
+ // panic otherwise (e.g., combining or aligning the
+ // components would create a size exceeding
+ // `usize::MAX`).
+ let offset = match offset.checked_add(trailing_offset) {
+ Some(offset) => offset,
+ None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
+ };
+ SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size })
+ }
+ }
+ }
+ };
+
+ DstLayout { align, size_info }
+ }
+
+ /// Like `Layout::pad_to_align`, this routine rounds the size of this layout
+ /// up to the nearest multiple of this type's alignment or `repr_packed`
+ /// (whichever is less). This method leaves DST layouts unchanged, since the
+ /// trailing padding of DSTs is computed at runtime.
+ ///
+ /// In order to match the layout of a `#[repr(C)]` struct, this method
+ /// should be invoked after the invocations of [`DstLayout::extend`]. If
+ /// `self` corresponds to a type marked with `repr(packed(N))`, then
+ /// `repr_packed` should be set to `Some(N)`, otherwise `None`.
+ ///
+ /// This method cannot be used to match the layout of a record with the
+ /// default representation, as that representation is mostly unspecified.
+ ///
+ /// # Safety
+ ///
+ /// If a (potentially hypothetical) valid `repr(C)` type begins with fields
+ /// whose layout are `self` followed only by zero or more bytes of trailing
+ /// padding (not included in `self`), then unsafe code may rely on
+ /// `self.pad_to_align(repr_packed)` producing a layout that correctly
+ /// encapsulates the layout of that type.
+ ///
+ /// We make no guarantees to the behavior of this method if `self` cannot
+ /// appear in a valid Rust type (e.g., because the addition of trailing
+ /// padding would lead to a size larger than `isize::MAX`).
+ #[doc(hidden)]
+ #[inline]
+ pub const fn pad_to_align(self) -> Self {
+ use util::core_layout::padding_needed_for;
+
+ let size_info = match self.size_info {
+ // For sized layouts, we add the minimum amount of trailing padding
+ // needed to satisfy alignment.
+ SizeInfo::Sized { _size: unpadded_size } => {
+ let padding = padding_needed_for(unpadded_size, self.align);
+ let size = match unpadded_size.checked_add(padding) {
+ Some(size) => size,
+ None => panic!("Adding padding caused size to overflow `usize`."),
+ };
+ SizeInfo::Sized { _size: size }
+ }
+ // For DST layouts, trailing padding depends on the length of the
+ // trailing DST and is computed at runtime. This does not alter the
+ // offset or element size of the layout, so we leave `size_info`
+ // unchanged.
+ size_info @ SizeInfo::SliceDst(_) => size_info,
+ };
+
+ DstLayout { align: self.align, size_info }
+ }
+
+ /// Validates that a cast is sound from a layout perspective.
+ ///
+ /// Validates that the size and alignment requirements of a type with the
+ /// layout described in `self` would not be violated by performing a
+ /// `cast_type` cast from a pointer with address `addr` which refers to a
+ /// memory region of size `bytes_len`.
+ ///
+ /// If the cast is valid, `validate_cast_and_convert_metadata` returns
+ /// `(elems, split_at)`. If `self` describes a dynamically-sized type, then
+ /// `elems` is the maximum number of trailing slice elements for which a
+ /// cast would be valid (for sized types, `elem` is meaningless and should
+ /// be ignored). `split_at` is the index at which to split the memory region
+ /// in order for the prefix (suffix) to contain the result of the cast, and
+ /// in order for the remaining suffix (prefix) to contain the leftover
+ /// bytes.
+ ///
+ /// There are three conditions under which a cast can fail:
+ /// - The smallest possible value for the type is larger than the provided
+ /// memory region
+ /// - A prefix cast is requested, and `addr` does not satisfy `self`'s
+ /// alignment requirement
+ /// - A suffix cast is requested, and `addr + bytes_len` does not satisfy
+ /// `self`'s alignment requirement (as a consequence, since all instances
+ /// of the type are a multiple of its alignment, no size for the type will
+ /// result in a starting address which is properly aligned)
+ ///
+ /// # Safety
+ ///
+ /// The caller may assume that this implementation is correct, and may rely
+ /// on that assumption for the soundness of their code. In particular, the
+ /// caller may assume that, if `validate_cast_and_convert_metadata` returns
+ /// `Some((elems, split_at))`, then:
+ /// - A pointer to the type (for dynamically sized types, this includes
+ /// `elems` as its pointer metadata) describes an object of size `size <=
+ /// bytes_len`
+ /// - If this is a prefix cast:
+ /// - `addr` satisfies `self`'s alignment
+ /// - `size == split_at`
+ /// - If this is a suffix cast:
+ /// - `split_at == bytes_len - size`
+ /// - `addr + split_at` satisfies `self`'s alignment
+ ///
+ /// Note that this method does *not* ensure that a pointer constructed from
+ /// its return values will be a valid pointer. In particular, this method
+ /// does not reason about `isize` overflow, which is a requirement of many
+ /// Rust pointer APIs, and may at some point be determined to be a validity
+ /// invariant of pointer types themselves. This should never be a problem so
+ /// long as the arguments to this method are derived from a known-valid
+ /// pointer (e.g., one derived from a safe Rust reference), but it is
+ /// nonetheless the caller's responsibility to justify that pointer
+ /// arithmetic will not overflow based on a safety argument *other than* the
+ /// mere fact that this method returned successfully.
+ ///
+ /// # Panics
+ ///
+ /// `validate_cast_and_convert_metadata` will panic if `self` describes a
+ /// DST whose trailing slice element is zero-sized.
+ ///
+ /// If `addr + bytes_len` overflows `usize`,
+ /// `validate_cast_and_convert_metadata` may panic, or it may return
+ /// incorrect results. No guarantees are made about when
+ /// `validate_cast_and_convert_metadata` will panic. The caller should not
+ /// rely on `validate_cast_and_convert_metadata` panicking in any particular
+ /// condition, even if `debug_assertions` are enabled.
+ #[allow(unused)]
+ const fn validate_cast_and_convert_metadata(
+ &self,
+ addr: usize,
+ bytes_len: usize,
+ cast_type: _CastType,
+ ) -> Option<(usize, usize)> {
+ // `debug_assert!`, but with `#[allow(clippy::arithmetic_side_effects)]`.
+ macro_rules! __debug_assert {
+ ($e:expr $(, $msg:expr)?) => {
+ debug_assert!({
+ #[allow(clippy::arithmetic_side_effects)]
+ let e = $e;
+ e
+ } $(, $msg)?);
+ };
+ }
+
+ // Note that, in practice, `self` is always a compile-time constant. We
+ // do this check earlier than needed to ensure that we always panic as a
+ // result of bugs in the program (such as calling this function on an
+ // invalid type) instead of allowing this panic to be hidden if the cast
+ // would have failed anyway for runtime reasons (such as a too-small
+ // memory region).
+ //
+ // TODO(#67): Once our MSRV is 1.65, use let-else:
+ // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
+ let size_info = match self.size_info.try_to_nonzero_elem_size() {
+ Some(size_info) => size_info,
+ None => panic!("attempted to cast to slice type with zero-sized element"),
+ };
+
+ // Precondition
+ __debug_assert!(addr.checked_add(bytes_len).is_some(), "`addr` + `bytes_len` > usize::MAX");
+
+ // Alignment checks go in their own block to avoid introducing variables
+ // into the top-level scope.
+ {
+ // We check alignment for `addr` (for prefix casts) or `addr +
+ // bytes_len` (for suffix casts). For a prefix cast, the correctness
+ // of this check is trivial - `addr` is the address the object will
+ // live at.
+ //
+ // For a suffix cast, we know that all valid sizes for the type are
+ // a multiple of the alignment (and by safety precondition, we know
+ // `DstLayout` may only describe valid Rust types). Thus, a
+ // validly-sized instance which lives at a validly-aligned address
+ // must also end at a validly-aligned address. Thus, if the end
+ // address for a suffix cast (`addr + bytes_len`) is not aligned,
+ // then no valid start address will be aligned either.
+ let offset = match cast_type {
+ _CastType::_Prefix => 0,
+ _CastType::_Suffix => bytes_len,
+ };
+
+ // Addition is guaranteed not to overflow because `offset <=
+ // bytes_len`, and `addr + bytes_len <= usize::MAX` is a
+ // precondition of this method. Modulus is guaranteed not to divide
+ // by 0 because `align` is non-zero.
+ #[allow(clippy::arithmetic_side_effects)]
+ if (addr + offset) % self.align.get() != 0 {
+ return None;
+ }
+ }
+
+ let (elems, self_bytes) = match size_info {
+ SizeInfo::Sized { _size: size } => {
+ if size > bytes_len {
+ return None;
+ }
+ (0, size)
+ }
+ SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size: elem_size }) => {
+ // Calculate the maximum number of bytes that could be consumed
+ // - any number of bytes larger than this will either not be a
+ // multiple of the alignment, or will be larger than
+ // `bytes_len`.
+ let max_total_bytes =
+ util::round_down_to_next_multiple_of_alignment(bytes_len, self.align);
+ // Calculate the maximum number of bytes that could be consumed
+ // by the trailing slice.
+ //
+ // TODO(#67): Once our MSRV is 1.65, use let-else:
+ // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
+ let max_slice_and_padding_bytes = match max_total_bytes.checked_sub(offset) {
+ Some(max) => max,
+ // `bytes_len` too small even for 0 trailing slice elements.
+ None => return None,
+ };
+
+ // Calculate the number of elements that fit in
+ // `max_slice_and_padding_bytes`; any remaining bytes will be
+ // considered padding.
+ //
+ // Guaranteed not to divide by zero: `elem_size` is non-zero.
+ #[allow(clippy::arithmetic_side_effects)]
+ let elems = max_slice_and_padding_bytes / elem_size.get();
+ // Guaranteed not to overflow on multiplication: `usize::MAX >=
+ // max_slice_and_padding_bytes >= (max_slice_and_padding_bytes /
+ // elem_size) * elem_size`.
+ //
+ // Guaranteed not to overflow on addition:
+ // - max_slice_and_padding_bytes == max_total_bytes - offset
+ // - elems * elem_size <= max_slice_and_padding_bytes == max_total_bytes - offset
+ // - elems * elem_size + offset <= max_total_bytes <= usize::MAX
+ #[allow(clippy::arithmetic_side_effects)]
+ let without_padding = offset + elems * elem_size.get();
+ // `self_bytes` is equal to the offset bytes plus the bytes
+ // consumed by the trailing slice plus any padding bytes
+ // required to satisfy the alignment. Note that we have computed
+ // the maximum number of trailing slice elements that could fit
+ // in `self_bytes`, so any padding is guaranteed to be less than
+ // the size of an extra element.
+ //
+ // Guaranteed not to overflow:
+ // - By previous comment: without_padding == elems * elem_size +
+ // offset <= max_total_bytes
+ // - By construction, `max_total_bytes` is a multiple of
+ // `self.align`.
+ // - At most, adding padding needed to round `without_padding`
+ // up to the next multiple of the alignment will bring
+ // `self_bytes` up to `max_total_bytes`.
+ #[allow(clippy::arithmetic_side_effects)]
+ let self_bytes = without_padding
+ + util::core_layout::padding_needed_for(without_padding, self.align);
+ (elems, self_bytes)
+ }
+ };
+
+ __debug_assert!(self_bytes <= bytes_len);
+
+ let split_at = match cast_type {
+ _CastType::_Prefix => self_bytes,
+ // Guaranteed not to underflow:
+ // - In the `Sized` branch, only returns `size` if `size <=
+ // bytes_len`.
+ // - In the `SliceDst` branch, calculates `self_bytes <=
+ // max_toatl_bytes`, which is upper-bounded by `bytes_len`.
+ #[allow(clippy::arithmetic_side_effects)]
+ _CastType::_Suffix => bytes_len - self_bytes,
+ };
+
+ Some((elems, split_at))
+ }
+}
+
+/// A trait which carries information about a type's layout that is used by the
+/// internals of this crate.
+///
+/// This trait is not meant for consumption by code outside of this crate. While
+/// the normal semver stability guarantees apply with respect to which types
+/// implement this trait and which trait implementations are implied by this
+/// trait, no semver stability guarantees are made regarding its internals; they
+/// may change at any time, and code which makes use of them may break.
+///
+/// # Safety
+///
+/// This trait does not convey any safety guarantees to code outside this crate.
+#[doc(hidden)] // TODO: Remove this once KnownLayout is used by other APIs
+pub unsafe trait KnownLayout {
+ // The `Self: Sized` bound makes it so that `KnownLayout` can still be
+ // object safe. It's not currently object safe thanks to `const LAYOUT`, and
+ // it likely won't be in the future, but there's no reason not to be
+ // forwards-compatible with object safety.
+ #[doc(hidden)]
+ fn only_derive_is_allowed_to_implement_this_trait()
+ where
+ Self: Sized;
+
+ #[doc(hidden)]
+ const LAYOUT: DstLayout;
+
+ /// SAFETY: The returned pointer has the same address and provenance as
+ /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
+ /// elements in its trailing slice. If `Self` is sized, `elems` is ignored.
+ #[doc(hidden)]
+ fn raw_from_ptr_len(bytes: NonNull<u8>, elems: usize) -> NonNull<Self>;
+}
+
+// SAFETY: Delegates safety to `DstLayout::for_slice`.
+unsafe impl<T: KnownLayout> KnownLayout for [T] {
+ #[allow(clippy::missing_inline_in_public_items)]
+ fn only_derive_is_allowed_to_implement_this_trait()
+ where
+ Self: Sized,
+ {
+ }
+ const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
+
+ // SAFETY: `.cast` preserves address and provenance. The returned pointer
+ // refers to an object with `elems` elements by construction.
+ #[inline(always)]
+ fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
+ // TODO(#67): Remove this allow. See NonNullExt for more details.
+ #[allow(unstable_name_collisions)]
+ NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
+ }
+}
+
+#[rustfmt::skip]
+impl_known_layout!(
+ (),
+ u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
+ bool, char,
+ NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
+ NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
+);
+#[rustfmt::skip]
+impl_known_layout!(
+ T => Option<T>,
+ T: ?Sized => PhantomData<T>,
+ T => Wrapping<T>,
+ T => MaybeUninit<T>,
+ T: ?Sized => *const T,
+ T: ?Sized => *mut T,
+);
+impl_known_layout!(const N: usize, T => [T; N]);
+
+safety_comment! {
+ /// SAFETY:
+ /// `str` and `ManuallyDrop<[T]>` [1] have the same representations as
+ /// `[u8]` and `[T]` repsectively. `str` has different bit validity than
+ /// `[u8]`, but that doesn't affect the soundness of this impl.
+ ///
+ /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
+ ///
+ /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
+ /// validity as `T`
+ ///
+ /// TODO(#429):
+ /// - Add quotes from docs.
+ /// - Once [1] (added in
+ /// https://github.com/rust-lang/rust/pull/115522) is available on stable,
+ /// quote the stable docs instead of the nightly docs.
+ unsafe_impl_known_layout!(#[repr([u8])] str);
+ unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
+}
+
+/// Analyzes whether a type is [`FromZeroes`].
+///
+/// This derive analyzes, at compile time, whether the annotated type satisfies
+/// the [safety conditions] of `FromZeroes` and implements `FromZeroes` if it is
+/// sound to do so. This derive can be applied to structs, enums, and unions;
+/// e.g.:
+///
+/// ```
+/// # use zerocopy_derive::FromZeroes;
+/// #[derive(FromZeroes)]
+/// struct MyStruct {
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(FromZeroes)]
+/// #[repr(u8)]
+/// enum MyEnum {
+/// # Variant0,
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(FromZeroes)]
+/// union MyUnion {
+/// # variant: u8,
+/// # /*
+/// ...
+/// # */
+/// }
+/// ```
+///
+/// [safety conditions]: trait@FromZeroes#safety
+///
+/// # Analysis
+///
+/// *This section describes, roughly, the analysis performed by this derive to
+/// determine whether it is sound to implement `FromZeroes` for a given type.
+/// Unless you are modifying the implementation of this derive, or attempting to
+/// manually implement `FromZeroes` for a type yourself, you don't need to read
+/// this section.*
+///
+/// If a type has the following properties, then this derive can implement
+/// `FromZeroes` for that type:
+///
+/// - If the type is a struct, all of its fields must be `FromZeroes`.
+/// - If the type is an enum, it must be C-like (meaning that all variants have
+/// no fields) and it must have a variant with a discriminant of `0`. See [the
+/// reference] for a description of how discriminant values are chosen.
+/// - The type must not contain any [`UnsafeCell`]s (this is required in order
+/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
+/// memory). The type may contain references or pointers to `UnsafeCell`s so
+/// long as those values can themselves be initialized from zeroes
+/// (`FromZeroes` is not currently implemented for, e.g.,
+/// `Option<&UnsafeCell<_>>`, but it could be one day).
+///
+/// This analysis is subject to change. Unsafe code may *only* rely on the
+/// documented [safety conditions] of `FromZeroes`, and must *not* rely on the
+/// implementation details of this derive.
+///
+/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
+/// [`UnsafeCell`]: core::cell::UnsafeCell
+///
+/// ## Why isn't an explicit representation required for structs?
+///
+/// Neither this derive, nor the [safety conditions] of `FromZeroes`, requires
+/// that structs are marked with `#[repr(C)]`.
+///
+/// Per the [Rust reference](reference),
+///
+/// > The representation of a type can change the padding between fields, but
+/// does not change the layout of the fields themselves.
+///
+/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
+///
+/// Since the layout of structs only consists of padding bytes and field bytes,
+/// a struct is soundly `FromZeroes` if:
+/// 1. its padding is soundly `FromZeroes`, and
+/// 2. its fields are soundly `FromZeroes`.
+///
+/// The answer to the first question is always yes: padding bytes do not have
+/// any validity constraints. A [discussion] of this question in the Unsafe Code
+/// Guidelines Working Group concluded that it would be virtually unimaginable
+/// for future versions of rustc to add validity constraints to padding bytes.
+///
+/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
+///
+/// Whether a struct is soundly `FromZeroes` therefore solely depends on whether
+/// its fields are `FromZeroes`.
+// TODO(#146): Document why we don't require an enum to have an explicit `repr`
+// attribute.
+#[cfg(any(feature = "derive", test))]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
+pub use zerocopy_derive::FromZeroes;
+
+/// Types whose validity can be checked at runtime, allowing them to be
+/// conditionally converted from byte slices.
+///
+/// WARNING: Do not implement this trait yourself! Instead, use
+/// `#[derive(TryFromBytes)]`.
+///
+/// `TryFromBytes` types can safely be deserialized from an untrusted sequence
+/// of bytes by performing a runtime check that the byte sequence contains a
+/// valid instance of `Self`.
+///
+/// `TryFromBytes` is ignorant of byte order. For byte order-aware types, see
+/// the [`byteorder`] module.
+///
+/// # What is a "valid instance"?
+///
+/// In Rust, each type has *bit validity*, which refers to the set of bit
+/// patterns which may appear in an instance of that type. It is impossible for
+/// safe Rust code to produce values which violate bit validity (ie, values
+/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
+/// invalid value, this is considered [undefined behavior].
+///
+/// Rust's bit validity rules are currently being decided, which means that some
+/// types have three classes of bit patterns: those which are definitely valid,
+/// and whose validity is documented in the language; those which may or may not
+/// be considered valid at some point in the future; and those which are
+/// definitely invalid.
+///
+/// Zerocopy takes a conservative approach, and only considers a bit pattern to
+/// be valid if its validity is a documenteed guarantee provided by the
+/// language.
+///
+/// For most use cases, Rust's current guarantees align with programmers'
+/// intuitions about what ought to be valid. As a result, zerocopy's
+/// conservatism should not affect most users. One notable exception is unions,
+/// whose bit validity is very up in the air; zerocopy does not permit
+/// implementing `TryFromBytes` for any union type.
+///
+/// If you are negatively affected by lack of support for a particular type,
+/// we encourage you to let us know by [filing an issue][github-repo].
+///
+/// # Safety
+///
+/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
+/// or representation of `T`. It merely provides the ability to perform a
+/// validity check at runtime via methods like [`try_from_ref`].
+///
+/// Currently, it is not possible to stably implement `TryFromBytes` other than
+/// by using `#[derive(TryFromBytes)]`. While there are `#[doc(hidden)]` items
+/// on this trait that provide well-defined safety invariants, no stability
+/// guarantees are made with respect to these items. In particular, future
+/// releases of zerocopy may make backwards-breaking changes to these items,
+/// including changes that only affect soundness, which may cause code which
+/// uses those items to silently become unsound.
+///
+/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
+/// [github-repo]: https://github.com/google/zerocopy
+/// [`try_from_ref`]: TryFromBytes::try_from_ref
+// TODO(#5): Update `try_from_ref` doc link once it exists
+#[doc(hidden)]
+pub unsafe trait TryFromBytes {
+ /// Does a given memory range contain a valid instance of `Self`?
+ ///
+ /// # Safety
+ ///
+ /// ## Preconditions
+ ///
+ /// The memory referenced by `candidate` may only be accessed via reads for
+ /// the duration of this method call. This prohibits writes through mutable
+ /// references and through [`UnsafeCell`]s. There may exist immutable
+ /// references to the same memory which contain `UnsafeCell`s so long as:
+ /// - Those `UnsafeCell`s exist at the same byte ranges as `UnsafeCell`s in
+ /// `Self`. This is a bidirectional property: `Self` may not contain
+ /// `UnsafeCell`s where other references to the same memory do not, and
+ /// vice-versa.
+ /// - Those `UnsafeCell`s are never used to perform mutation for the
+ /// duration of this method call.
+ ///
+ /// The memory referenced by `candidate` may not be referenced by any
+ /// mutable references even if these references are not used to perform
+ /// mutation.
+ ///
+ /// `candidate` is not required to refer to a valid `Self`. However, it must
+ /// satisfy the requirement that uninitialized bytes may only be present
+ /// where it is possible for them to be present in `Self`. This is a dynamic
+ /// property: if, at a particular byte offset, a valid enum discriminant is
+ /// set, the subsequent bytes may only have uninitialized bytes as
+ /// specificed by the corresponding enum.
+ ///
+ /// Formally, given `len = size_of_val_raw(candidate)`, at every byte
+ /// offset, `b`, in the range `[0, len)`:
+ /// - If, in all instances `s: Self` of length `len`, the byte at offset `b`
+ /// in `s` is initialized, then the byte at offset `b` within `*candidate`
+ /// must be initialized.
+ /// - Let `c` be the contents of the byte range `[0, b)` in `*candidate`.
+ /// Let `S` be the subset of valid instances of `Self` of length `len`
+ /// which contain `c` in the offset range `[0, b)`. If, for all instances
+ /// of `s: Self` in `S`, the byte at offset `b` in `s` is initialized,
+ /// then the byte at offset `b` in `*candidate` must be initialized.
+ ///
+ /// Pragmatically, this means that if `*candidate` is guaranteed to
+ /// contain an enum type at a particular offset, and the enum discriminant
+ /// stored in `*candidate` corresponds to a valid variant of that enum
+ /// type, then it is guaranteed that the appropriate bytes of `*candidate`
+ /// are initialized as defined by that variant's bit validity (although
+ /// note that the variant may contain another enum type, in which case the
+ /// same rules apply depending on the state of its discriminant, and so on
+ /// recursively).
+ ///
+ /// ## Postconditions
+ ///
+ /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
+ /// `*candidate` contains a valid `Self`.
+ ///
+ /// # Panics
+ ///
+ /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
+ /// `unsafe` code remains sound even in the face of `is_bit_valid`
+ /// panicking. (We support user-defined validation routines; so long as
+ /// these routines are not required to be `unsafe`, there is no way to
+ /// ensure that these do not generate panics.)
+ ///
+ /// [`UnsafeCell`]: core::cell::UnsafeCell
+ #[doc(hidden)]
+ unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool;
+
+ /// Attempts to interpret a byte slice as a `Self`.
+ ///
+ /// `try_from_ref` validates that `bytes` contains a valid `Self`, and that
+ /// it satisfies `Self`'s alignment requirement. If it does, then `bytes` is
+ /// reinterpreted as a `Self`.
+ ///
+ /// Note that Rust's bit validity rules are still being decided. As such,
+ /// there exist types whose bit validity is ambiguous. See the
+ /// `TryFromBytes` docs for a discussion of how these cases are handled.
+ // TODO(#251): In a future in which we distinguish between `FromBytes` and
+ // `RefFromBytes`, this requires `where Self: RefFromBytes` to disallow
+ // interior mutability.
+ #[inline]
+ #[doc(hidden)] // TODO(#5): Finalize name before remove this attribute.
+ fn try_from_ref(bytes: &[u8]) -> Option<&Self>
+ where
+ Self: KnownLayout,
+ {
+ let maybe_self = Ptr::from(bytes).try_cast_into_no_leftover::<Self>()?;
+
+ // SAFETY:
+ // - Since `bytes` is an immutable reference, we know that no mutable
+ // references exist to this memory region.
+ // - Since `[u8]` contains no `UnsafeCell`s, we know there are no
+ // `&UnsafeCell` references to this memory region.
+ // - Since we don't permit implementing `TryFromBytes` for types which
+ // contain `UnsafeCell`s, there are no `UnsafeCell`s in `Self`, and so
+ // the requirement that all references contain `UnsafeCell`s at the
+ // same offsets is trivially satisfied.
+ // - All bytes of `bytes` are initialized.
+ //
+ // This call may panic. If that happens, it doesn't cause any soundness
+ // issues, as we have not generated any invalid state which we need to
+ // fix before returning.
+ if unsafe { !Self::is_bit_valid(maybe_self) } {
+ return None;
+ }
+
+ // SAFETY:
+ // - Preconditions for `as_ref`:
+ // - `is_bit_valid` guarantees that `*maybe_self` contains a valid
+ // `Self`. Since `&[u8]` does not permit interior mutation, this
+ // cannot be invalidated after this method returns.
+ // - Since the argument and return types are immutable references,
+ // Rust will prevent the caller from producing any mutable
+ // references to the same memory region.
+ // - Since `Self` is not allowed to contain any `UnsafeCell`s and the
+ // same is true of `[u8]`, interior mutation is not possible. Thus,
+ // no mutation is possible. For the same reason, there is no
+ // mismatch between the two types in terms of which byte ranges are
+ // referenced as `UnsafeCell`s.
+ // - Since interior mutation isn't possible within `Self`, there's no
+ // way for the returned reference to be used to modify the byte range,
+ // and thus there's no way for the returned reference to be used to
+ // write an invalid `[u8]` which would be observable via the original
+ // `&[u8]`.
+ Some(unsafe { maybe_self.as_ref() })
+ }
+}
+
+/// Types for which a sequence of bytes all set to zero represents a valid
+/// instance of the type.
+///
+/// Any memory region of the appropriate length which is guaranteed to contain
+/// only zero bytes can be viewed as any `FromZeroes` type with no runtime
+/// overhead. This is useful whenever memory is known to be in a zeroed state,
+/// such memory returned from some allocation routines.
+///
+/// # Implementation
+///
+/// **Do not implement this trait yourself!** Instead, use
+/// [`#[derive(FromZeroes)]`][derive] (requires the `derive` Cargo feature);
+/// e.g.:
+///
+/// ```
+/// # use zerocopy_derive::FromZeroes;
+/// #[derive(FromZeroes)]
+/// struct MyStruct {
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(FromZeroes)]
+/// #[repr(u8)]
+/// enum MyEnum {
+/// # Variant0,
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(FromZeroes)]
+/// union MyUnion {
+/// # variant: u8,
+/// # /*
+/// ...
+/// # */
+/// }
+/// ```
+///
+/// This derive performs a sophisticated, compile-time safety analysis to
+/// determine whether a type is `FromZeroes`.
+///
+/// # Safety
+///
+/// *This section describes what is required in order for `T: FromZeroes`, and
+/// what unsafe code may assume of such types. If you don't plan on implementing
+/// `FromZeroes` manually, and you don't plan on writing unsafe code that
+/// operates on `FromZeroes` types, then you don't need to read this section.*
+///
+/// If `T: FromZeroes`, then unsafe code may assume that:
+/// - It is sound to treat any initialized sequence of zero bytes of length
+/// `size_of::<T>()` as a `T`.
+/// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to
+/// `align_of::<T>()`, and `b` contains only zero bytes, it is sound to
+/// construct a `t: &T` at the same address as `b`, and it is sound for both
+/// `b` and `t` to be live at the same time.
+///
+/// If a type is marked as `FromZeroes` which violates this contract, it may
+/// cause undefined behavior.
+///
+/// `#[derive(FromZeroes)]` only permits [types which satisfy these
+/// requirements][derive-analysis].
+///
+#[cfg_attr(
+ feature = "derive",
+ doc = "[derive]: zerocopy_derive::FromZeroes",
+ doc = "[derive-analysis]: zerocopy_derive::FromZeroes#analysis"
+)]
+#[cfg_attr(
+ not(feature = "derive"),
+ doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html"),
+ doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html#analysis"),
+)]
+pub unsafe trait FromZeroes {
+ // The `Self: Sized` bound makes it so that `FromZeroes` is still object
+ // safe.
+ #[doc(hidden)]
+ fn only_derive_is_allowed_to_implement_this_trait()
+ where
+ Self: Sized;
+
+ /// Overwrites `self` with zeroes.
+ ///
+ /// Sets every byte in `self` to 0. While this is similar to doing `*self =
+ /// Self::new_zeroed()`, it differs in that `zero` does not semantically
+ /// drop the current value and replace it with a new one - it simply
+ /// modifies the bytes of the existing value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use zerocopy::FromZeroes;
+ /// # use zerocopy_derive::*;
+ /// #
+ /// #[derive(FromZeroes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// let mut header = PacketHeader {
+ /// src_port: 100u16.to_be_bytes(),
+ /// dst_port: 200u16.to_be_bytes(),
+ /// length: 300u16.to_be_bytes(),
+ /// checksum: 400u16.to_be_bytes(),
+ /// };
+ ///
+ /// header.zero();
+ ///
+ /// assert_eq!(header.src_port, [0, 0]);
+ /// assert_eq!(header.dst_port, [0, 0]);
+ /// assert_eq!(header.length, [0, 0]);
+ /// assert_eq!(header.checksum, [0, 0]);
+ /// ```
+ #[inline(always)]
+ fn zero(&mut self) {
+ let slf: *mut Self = self;
+ let len = mem::size_of_val(self);
+ // SAFETY:
+ // - `self` is guaranteed by the type system to be valid for writes of
+ // size `size_of_val(self)`.
+ // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
+ // as required by `u8`.
+ // - Since `Self: FromZeroes`, the all-zeroes instance is a valid
+ // instance of `Self.`
+ //
+ // TODO(#429): Add references to docs and quotes.
+ unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
+ }
+
+ /// Creates an instance of `Self` from zeroed bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use zerocopy::FromZeroes;
+ /// # use zerocopy_derive::*;
+ /// #
+ /// #[derive(FromZeroes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// let header: PacketHeader = FromZeroes::new_zeroed();
+ ///
+ /// assert_eq!(header.src_port, [0, 0]);
+ /// assert_eq!(header.dst_port, [0, 0]);
+ /// assert_eq!(header.length, [0, 0]);
+ /// assert_eq!(header.checksum, [0, 0]);
+ /// ```
+ #[inline(always)]
+ fn new_zeroed() -> Self
+ where
+ Self: Sized,
+ {
+ // SAFETY: `FromZeroes` says that the all-zeroes bit pattern is legal.
+ unsafe { mem::zeroed() }
+ }
+
+ /// Creates a `Box<Self>` from zeroed bytes.
+ ///
+ /// This function is useful for allocating large values on the heap and
+ /// zero-initializing them, without ever creating a temporary instance of
+ /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
+ /// will allocate `[u8; 1048576]` directly on the heap; it does not require
+ /// storing `[u8; 1048576]` in a temporary variable on the stack.
+ ///
+ /// On systems that use a heap implementation that supports allocating from
+ /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
+ /// have performance benefits.
+ ///
+ /// Note that `Box<Self>` can be converted to `Arc<Self>` and other
+ /// container types without reallocation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if allocation of `size_of::<Self>()` bytes fails.
+ #[cfg(feature = "alloc")]
+ #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
+ #[inline]
+ fn new_box_zeroed() -> Box<Self>
+ where
+ Self: Sized,
+ {
+ // If `T` is a ZST, then return a proper boxed instance of it. There is
+ // no allocation, but `Box` does require a correct dangling pointer.
+ let layout = Layout::new::<Self>();
+ if layout.size() == 0 {
+ return Box::new(Self::new_zeroed());
+ }
+
+ // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
+ if ptr.is_null() {
+ alloc::alloc::handle_alloc_error(layout);
+ }
+ // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ Box::from_raw(ptr)
+ }
+ }
+
+ /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
+ ///
+ /// This function is useful for allocating large values of `[Self]` on the
+ /// heap and zero-initializing them, without ever creating a temporary
+ /// instance of `[Self; _]` on the stack. For example,
+ /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
+ /// the heap; it does not require storing the slice on the stack.
+ ///
+ /// On systems that use a heap implementation that supports allocating from
+ /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
+ /// benefits.
+ ///
+ /// If `Self` is a zero-sized type, then this function will return a
+ /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
+ /// actual information, but its `len()` property will report the correct
+ /// value.
+ ///
+ /// # Panics
+ ///
+ /// * Panics if `size_of::<Self>() * len` overflows.
+ /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
+ #[cfg(feature = "alloc")]
+ #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
+ #[inline]
+ fn new_box_slice_zeroed(len: usize) -> Box<[Self]>
+ where
+ Self: Sized,
+ {
+ let size = mem::size_of::<Self>()
+ .checked_mul(len)
+ .expect("mem::size_of::<Self>() * len overflows `usize`");
+ let align = mem::align_of::<Self>();
+ // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a
+ // bug in which sufficiently-large allocations (those which, when
+ // rounded up to the alignment, overflow `isize`) are not rejected,
+ // which can cause undefined behavior. See #64 for details.
+ //
+ // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion.
+ #[allow(clippy::as_conversions)]
+ let max_alloc = (isize::MAX as usize).saturating_sub(align);
+ assert!(size <= max_alloc);
+ // TODO(https://github.com/rust-lang/rust/issues/55724): Use
+ // `Layout::repeat` once it's stabilized.
+ let layout =
+ Layout::from_size_align(size, align).expect("total allocation size overflows `isize`");
+
+ let ptr = if layout.size() != 0 {
+ // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
+ if ptr.is_null() {
+ alloc::alloc::handle_alloc_error(layout);
+ }
+ ptr
+ } else {
+ // `Box<[T]>` does not allocate when `T` is zero-sized or when `len`
+ // is zero, but it does require a non-null dangling pointer for its
+ // allocation.
+ NonNull::<Self>::dangling().as_ptr()
+ };
+
+ // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ Box::from_raw(slice::from_raw_parts_mut(ptr, len))
+ }
+ }
+
+ /// Creates a `Vec<Self>` from zeroed bytes.
+ ///
+ /// This function is useful for allocating large values of `Vec`s and
+ /// zero-initializing them, without ever creating a temporary instance of
+ /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
+ /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
+ /// heap; it does not require storing intermediate values on the stack.
+ ///
+ /// On systems that use a heap implementation that supports allocating from
+ /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
+ ///
+ /// If `Self` is a zero-sized type, then this function will return a
+ /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
+ /// actual information, but its `len()` property will report the correct
+ /// value.
+ ///
+ /// # Panics
+ ///
+ /// * Panics if `size_of::<Self>() * len` overflows.
+ /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
+ #[cfg(feature = "alloc")]
+ #[cfg_attr(doc_cfg, doc(cfg(feature = "new_vec_zeroed")))]
+ #[inline(always)]
+ fn new_vec_zeroed(len: usize) -> Vec<Self>
+ where
+ Self: Sized,
+ {
+ Self::new_box_slice_zeroed(len).into()
+ }
+}
+
+/// Analyzes whether a type is [`FromBytes`].
+///
+/// This derive analyzes, at compile time, whether the annotated type satisfies
+/// the [safety conditions] of `FromBytes` and implements `FromBytes` if it is
+/// sound to do so. This derive can be applied to structs, enums, and unions;
+/// e.g.:
+///
+/// ```
+/// # use zerocopy_derive::{FromBytes, FromZeroes};
+/// #[derive(FromZeroes, FromBytes)]
+/// struct MyStruct {
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(FromZeroes, FromBytes)]
+/// #[repr(u8)]
+/// enum MyEnum {
+/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
+/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
+/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
+/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
+/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
+/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
+/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
+/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
+/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
+/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
+/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
+/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
+/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
+/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
+/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
+/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
+/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
+/// # VFF,
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(FromZeroes, FromBytes)]
+/// union MyUnion {
+/// # variant: u8,
+/// # /*
+/// ...
+/// # */
+/// }
+/// ```
+///
+/// [safety conditions]: trait@FromBytes#safety
+///
+/// # Analysis
+///
+/// *This section describes, roughly, the analysis performed by this derive to
+/// determine whether it is sound to implement `FromBytes` for a given type.
+/// Unless you are modifying the implementation of this derive, or attempting to
+/// manually implement `FromBytes` for a type yourself, you don't need to read
+/// this section.*
+///
+/// If a type has the following properties, then this derive can implement
+/// `FromBytes` for that type:
+///
+/// - If the type is a struct, all of its fields must be `FromBytes`.
+/// - If the type is an enum:
+/// - It must be a C-like enum (meaning that all variants have no fields).
+/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
+/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
+/// - The maximum number of discriminants must be used (so that every possible
+/// bit pattern is a valid one). Be very careful when using the `C`,
+/// `usize`, or `isize` representations, as their size is
+/// platform-dependent.
+/// - The type must not contain any [`UnsafeCell`]s (this is required in order
+/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
+/// memory). The type may contain references or pointers to `UnsafeCell`s so
+/// long as those values can themselves be initialized from zeroes
+/// (`FromBytes` is not currently implemented for, e.g., `Option<*const
+/// UnsafeCell<_>>`, but it could be one day).
+///
+/// [`UnsafeCell`]: core::cell::UnsafeCell
+///
+/// This analysis is subject to change. Unsafe code may *only* rely on the
+/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
+/// implementation details of this derive.
+///
+/// ## Why isn't an explicit representation required for structs?
+///
+/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
+/// that structs are marked with `#[repr(C)]`.
+///
+/// Per the [Rust reference](reference),
+///
+/// > The representation of a type can change the padding between fields, but
+/// does not change the layout of the fields themselves.
+///
+/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
+///
+/// Since the layout of structs only consists of padding bytes and field bytes,
+/// a struct is soundly `FromBytes` if:
+/// 1. its padding is soundly `FromBytes`, and
+/// 2. its fields are soundly `FromBytes`.
+///
+/// The answer to the first question is always yes: padding bytes do not have
+/// any validity constraints. A [discussion] of this question in the Unsafe Code
+/// Guidelines Working Group concluded that it would be virtually unimaginable
+/// for future versions of rustc to add validity constraints to padding bytes.
+///
+/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
+///
+/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
+/// its fields are `FromBytes`.
+// TODO(#146): Document why we don't require an enum to have an explicit `repr`
+// attribute.
+#[cfg(any(feature = "derive", test))]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
+pub use zerocopy_derive::FromBytes;
+
+/// Types for which any bit pattern is valid.
+///
+/// Any memory region of the appropriate length which contains initialized bytes
+/// can be viewed as any `FromBytes` type with no runtime overhead. This is
+/// useful for efficiently parsing bytes as structured data.
+///
+/// # Implementation
+///
+/// **Do not implement this trait yourself!** Instead, use
+/// [`#[derive(FromBytes)]`][derive] (requires the `derive` Cargo feature);
+/// e.g.:
+///
+/// ```
+/// # use zerocopy_derive::{FromBytes, FromZeroes};
+/// #[derive(FromZeroes, FromBytes)]
+/// struct MyStruct {
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(FromZeroes, FromBytes)]
+/// #[repr(u8)]
+/// enum MyEnum {
+/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
+/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
+/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
+/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
+/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
+/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
+/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
+/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
+/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
+/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
+/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
+/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
+/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
+/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
+/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
+/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
+/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
+/// # VFF,
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(FromZeroes, FromBytes)]
+/// union MyUnion {
+/// # variant: u8,
+/// # /*
+/// ...
+/// # */
+/// }
+/// ```
+///
+/// This derive performs a sophisticated, compile-time safety analysis to
+/// determine whether a type is `FromBytes`.
+///
+/// # Safety
+///
+/// *This section describes what is required in order for `T: FromBytes`, and
+/// what unsafe code may assume of such types. If you don't plan on implementing
+/// `FromBytes` manually, and you don't plan on writing unsafe code that
+/// operates on `FromBytes` types, then you don't need to read this section.*
+///
+/// If `T: FromBytes`, then unsafe code may assume that:
+/// - It is sound to treat any initialized sequence of bytes of length
+/// `size_of::<T>()` as a `T`.
+/// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to
+/// `align_of::<T>()` it is sound to construct a `t: &T` at the same address
+/// as `b`, and it is sound for both `b` and `t` to be live at the same time.
+///
+/// If a type is marked as `FromBytes` which violates this contract, it may
+/// cause undefined behavior.
+///
+/// `#[derive(FromBytes)]` only permits [types which satisfy these
+/// requirements][derive-analysis].
+///
+#[cfg_attr(
+ feature = "derive",
+ doc = "[derive]: zerocopy_derive::FromBytes",
+ doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
+)]
+#[cfg_attr(
+ not(feature = "derive"),
+ doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
+ doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
+)]
+pub unsafe trait FromBytes: FromZeroes {
+ // The `Self: Sized` bound makes it so that `FromBytes` is still object
+ // safe.
+ #[doc(hidden)]
+ fn only_derive_is_allowed_to_implement_this_trait()
+ where
+ Self: Sized;
+
+ /// Interprets the given `bytes` as a `&Self` without copying.
+ ///
+ /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to
+ /// `align_of::<Self>()`, this returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// // These bytes encode a `PacketHeader`.
+ /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
+ ///
+ /// let header = PacketHeader::ref_from(bytes).unwrap();
+ ///
+ /// assert_eq!(header.src_port, [0, 1]);
+ /// assert_eq!(header.dst_port, [2, 3]);
+ /// assert_eq!(header.length, [4, 5]);
+ /// assert_eq!(header.checksum, [6, 7]);
+ /// ```
+ #[inline]
+ fn ref_from(bytes: &[u8]) -> Option<&Self>
+ where
+ Self: Sized,
+ {
+ Ref::<&[u8], Self>::new(bytes).map(Ref::into_ref)
+ }
+
+ /// Interprets the prefix of the given `bytes` as a `&Self` without copying.
+ ///
+ /// `ref_from_prefix` returns a reference to the first `size_of::<Self>()`
+ /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not
+ /// aligned to `align_of::<Self>()`, this returns `None`.
+ ///
+ /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use
+ /// [`Ref::into_ref`] to get a `&Self` with the same lifetime.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode a `PacketHeader`.
+ /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
+ ///
+ /// let header = PacketHeader::ref_from_prefix(bytes).unwrap();
+ ///
+ /// assert_eq!(header.src_port, [0, 1]);
+ /// assert_eq!(header.dst_port, [2, 3]);
+ /// assert_eq!(header.length, [4, 5]);
+ /// assert_eq!(header.checksum, [6, 7]);
+ /// ```
+ #[inline]
+ fn ref_from_prefix(bytes: &[u8]) -> Option<&Self>
+ where
+ Self: Sized,
+ {
+ Ref::<&[u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_ref())
+ }
+
+ /// Interprets the suffix of the given `bytes` as a `&Self` without copying.
+ ///
+ /// `ref_from_suffix` returns a reference to the last `size_of::<Self>()`
+ /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of
+ /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`.
+ ///
+ /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then, use
+ /// [`Ref::into_ref`] to get a `&Self` with the same lifetime.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketTrailer {
+ /// frame_check_sequence: [u8; 4],
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode a `PacketTrailer`.
+ /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
+ ///
+ /// let trailer = PacketTrailer::ref_from_suffix(bytes).unwrap();
+ ///
+ /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
+ /// ```
+ #[inline]
+ fn ref_from_suffix(bytes: &[u8]) -> Option<&Self>
+ where
+ Self: Sized,
+ {
+ Ref::<&[u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_ref())
+ }
+
+ /// Interprets the given `bytes` as a `&mut Self` without copying.
+ ///
+ /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to
+ /// `align_of::<Self>()`, this returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(AsBytes, FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// // These bytes encode a `PacketHeader`.
+ /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
+ ///
+ /// let header = PacketHeader::mut_from(bytes).unwrap();
+ ///
+ /// assert_eq!(header.src_port, [0, 1]);
+ /// assert_eq!(header.dst_port, [2, 3]);
+ /// assert_eq!(header.length, [4, 5]);
+ /// assert_eq!(header.checksum, [6, 7]);
+ ///
+ /// header.checksum = [0, 0];
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
+ /// ```
+ #[inline]
+ fn mut_from(bytes: &mut [u8]) -> Option<&mut Self>
+ where
+ Self: Sized + AsBytes,
+ {
+ Ref::<&mut [u8], Self>::new(bytes).map(Ref::into_mut)
+ }
+
+ /// Interprets the prefix of the given `bytes` as a `&mut Self` without
+ /// copying.
+ ///
+ /// `mut_from_prefix` returns a reference to the first `size_of::<Self>()`
+ /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not
+ /// aligned to `align_of::<Self>()`, this returns `None`.
+ ///
+ /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use
+ /// [`Ref::into_mut`] to get a `&mut Self` with the same lifetime.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(AsBytes, FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode a `PacketHeader`.
+ /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
+ ///
+ /// let header = PacketHeader::mut_from_prefix(bytes).unwrap();
+ ///
+ /// assert_eq!(header.src_port, [0, 1]);
+ /// assert_eq!(header.dst_port, [2, 3]);
+ /// assert_eq!(header.length, [4, 5]);
+ /// assert_eq!(header.checksum, [6, 7]);
+ ///
+ /// header.checksum = [0, 0];
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 8, 9]);
+ /// ```
+ #[inline]
+ fn mut_from_prefix(bytes: &mut [u8]) -> Option<&mut Self>
+ where
+ Self: Sized + AsBytes,
+ {
+ Ref::<&mut [u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_mut())
+ }
+
+ /// Interprets the suffix of the given `bytes` as a `&mut Self` without copying.
+ ///
+ /// `mut_from_suffix` returns a reference to the last `size_of::<Self>()`
+ /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of
+ /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`.
+ ///
+ /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then,
+ /// use [`Ref::into_mut`] to get a `&mut Self` with the same lifetime.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(AsBytes, FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketTrailer {
+ /// frame_check_sequence: [u8; 4],
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode a `PacketTrailer`.
+ /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
+ ///
+ /// let trailer = PacketTrailer::mut_from_suffix(bytes).unwrap();
+ ///
+ /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
+ ///
+ /// trailer.frame_check_sequence = [0, 0, 0, 0];
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]);
+ /// ```
+ #[inline]
+ fn mut_from_suffix(bytes: &mut [u8]) -> Option<&mut Self>
+ where
+ Self: Sized + AsBytes,
+ {
+ Ref::<&mut [u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_mut())
+ }
+
+ /// Interprets the given `bytes` as a `&[Self]` without copying.
+ ///
+ /// If `bytes.len() % size_of::<Self>() != 0` or `bytes` is not aligned to
+ /// `align_of::<Self>()`, this returns `None`.
+ ///
+ /// If you need to convert a specific number of slice elements, see
+ /// [`slice_from_prefix`](FromBytes::slice_from_prefix) or
+ /// [`slice_from_suffix`](FromBytes::slice_from_suffix).
+ ///
+ /// # Panics
+ ///
+ /// If `Self` is a zero-sized type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// # #[derive(Debug, PartialEq, Eq)]
+ /// #[derive(FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct Pixel {
+ /// r: u8,
+ /// g: u8,
+ /// b: u8,
+ /// a: u8,
+ /// }
+ ///
+ /// // These bytes encode two `Pixel`s.
+ /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
+ ///
+ /// let pixels = Pixel::slice_from(bytes).unwrap();
+ ///
+ /// assert_eq!(pixels, &[
+ /// Pixel { r: 0, g: 1, b: 2, a: 3 },
+ /// Pixel { r: 4, g: 5, b: 6, a: 7 },
+ /// ]);
+ /// ```
+ #[inline]
+ fn slice_from(bytes: &[u8]) -> Option<&[Self]>
+ where
+ Self: Sized,
+ {
+ Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_slice())
+ }
+
+ /// Interprets the prefix of the given `bytes` as a `&[Self]` with length
+ /// equal to `count` without copying.
+ ///
+ /// This method verifies that `bytes.len() >= size_of::<T>() * count`
+ /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
+ /// first `size_of::<T>() * count` bytes from `bytes` to construct a
+ /// `&[Self]`, and returns the remaining bytes to the caller. It also
+ /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
+ /// If any of the length, alignment, or overflow checks fail, it returns
+ /// `None`.
+ ///
+ /// # Panics
+ ///
+ /// If `T` is a zero-sized type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// # #[derive(Debug, PartialEq, Eq)]
+ /// #[derive(FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct Pixel {
+ /// r: u8,
+ /// g: u8,
+ /// b: u8,
+ /// a: u8,
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode two `Pixel`s.
+ /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
+ ///
+ /// let (pixels, rest) = Pixel::slice_from_prefix(bytes, 2).unwrap();
+ ///
+ /// assert_eq!(pixels, &[
+ /// Pixel { r: 0, g: 1, b: 2, a: 3 },
+ /// Pixel { r: 4, g: 5, b: 6, a: 7 },
+ /// ]);
+ ///
+ /// assert_eq!(rest, &[8, 9]);
+ /// ```
+ #[inline]
+ fn slice_from_prefix(bytes: &[u8], count: usize) -> Option<(&[Self], &[u8])>
+ where
+ Self: Sized,
+ {
+ Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_slice(), b))
+ }
+
+ /// Interprets the suffix of the given `bytes` as a `&[Self]` with length
+ /// equal to `count` without copying.
+ ///
+ /// This method verifies that `bytes.len() >= size_of::<T>() * count`
+ /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
+ /// last `size_of::<T>() * count` bytes from `bytes` to construct a
+ /// `&[Self]`, and returns the preceding bytes to the caller. It also
+ /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
+ /// If any of the length, alignment, or overflow checks fail, it returns
+ /// `None`.
+ ///
+ /// # Panics
+ ///
+ /// If `T` is a zero-sized type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// # #[derive(Debug, PartialEq, Eq)]
+ /// #[derive(FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct Pixel {
+ /// r: u8,
+ /// g: u8,
+ /// b: u8,
+ /// a: u8,
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode two `Pixel`s.
+ /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
+ ///
+ /// let (rest, pixels) = Pixel::slice_from_suffix(bytes, 2).unwrap();
+ ///
+ /// assert_eq!(rest, &[0, 1]);
+ ///
+ /// assert_eq!(pixels, &[
+ /// Pixel { r: 2, g: 3, b: 4, a: 5 },
+ /// Pixel { r: 6, g: 7, b: 8, a: 9 },
+ /// ]);
+ /// ```
+ #[inline]
+ fn slice_from_suffix(bytes: &[u8], count: usize) -> Option<(&[u8], &[Self])>
+ where
+ Self: Sized,
+ {
+ Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_slice()))
+ }
+
+ /// Interprets the given `bytes` as a `&mut [Self]` without copying.
+ ///
+ /// If `bytes.len() % size_of::<T>() != 0` or `bytes` is not aligned to
+ /// `align_of::<T>()`, this returns `None`.
+ ///
+ /// If you need to convert a specific number of slice elements, see
+ /// [`mut_slice_from_prefix`](FromBytes::mut_slice_from_prefix) or
+ /// [`mut_slice_from_suffix`](FromBytes::mut_slice_from_suffix).
+ ///
+ /// # Panics
+ ///
+ /// If `T` is a zero-sized type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// # #[derive(Debug, PartialEq, Eq)]
+ /// #[derive(AsBytes, FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct Pixel {
+ /// r: u8,
+ /// g: u8,
+ /// b: u8,
+ /// a: u8,
+ /// }
+ ///
+ /// // These bytes encode two `Pixel`s.
+ /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
+ ///
+ /// let pixels = Pixel::mut_slice_from(bytes).unwrap();
+ ///
+ /// assert_eq!(pixels, &[
+ /// Pixel { r: 0, g: 1, b: 2, a: 3 },
+ /// Pixel { r: 4, g: 5, b: 6, a: 7 },
+ /// ]);
+ ///
+ /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
+ /// ```
+ #[inline]
+ fn mut_slice_from(bytes: &mut [u8]) -> Option<&mut [Self]>
+ where
+ Self: Sized + AsBytes,
+ {
+ Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_mut_slice())
+ }
+
+ /// Interprets the prefix of the given `bytes` as a `&mut [Self]` with length
+ /// equal to `count` without copying.
+ ///
+ /// This method verifies that `bytes.len() >= size_of::<T>() * count`
+ /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
+ /// first `size_of::<T>() * count` bytes from `bytes` to construct a
+ /// `&[Self]`, and returns the remaining bytes to the caller. It also
+ /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
+ /// If any of the length, alignment, or overflow checks fail, it returns
+ /// `None`.
+ ///
+ /// # Panics
+ ///
+ /// If `T` is a zero-sized type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// # #[derive(Debug, PartialEq, Eq)]
+ /// #[derive(AsBytes, FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct Pixel {
+ /// r: u8,
+ /// g: u8,
+ /// b: u8,
+ /// a: u8,
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode two `Pixel`s.
+ /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
+ ///
+ /// let (pixels, rest) = Pixel::mut_slice_from_prefix(bytes, 2).unwrap();
+ ///
+ /// assert_eq!(pixels, &[
+ /// Pixel { r: 0, g: 1, b: 2, a: 3 },
+ /// Pixel { r: 4, g: 5, b: 6, a: 7 },
+ /// ]);
+ ///
+ /// assert_eq!(rest, &[8, 9]);
+ ///
+ /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 8, 9]);
+ /// ```
+ #[inline]
+ fn mut_slice_from_prefix(bytes: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
+ where
+ Self: Sized + AsBytes,
+ {
+ Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_mut_slice(), b))
+ }
+
+ /// Interprets the suffix of the given `bytes` as a `&mut [Self]` with length
+ /// equal to `count` without copying.
+ ///
+ /// This method verifies that `bytes.len() >= size_of::<T>() * count`
+ /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
+ /// last `size_of::<T>() * count` bytes from `bytes` to construct a
+ /// `&[Self]`, and returns the preceding bytes to the caller. It also
+ /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
+ /// If any of the length, alignment, or overflow checks fail, it returns
+ /// `None`.
+ ///
+ /// # Panics
+ ///
+ /// If `T` is a zero-sized type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// # #[derive(Debug, PartialEq, Eq)]
+ /// #[derive(AsBytes, FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct Pixel {
+ /// r: u8,
+ /// g: u8,
+ /// b: u8,
+ /// a: u8,
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode two `Pixel`s.
+ /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
+ ///
+ /// let (rest, pixels) = Pixel::mut_slice_from_suffix(bytes, 2).unwrap();
+ ///
+ /// assert_eq!(rest, &[0, 1]);
+ ///
+ /// assert_eq!(pixels, &[
+ /// Pixel { r: 2, g: 3, b: 4, a: 5 },
+ /// Pixel { r: 6, g: 7, b: 8, a: 9 },
+ /// ]);
+ ///
+ /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]);
+ /// ```
+ #[inline]
+ fn mut_slice_from_suffix(bytes: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
+ where
+ Self: Sized + AsBytes,
+ {
+ Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_mut_slice()))
+ }
+
+ /// Reads a copy of `Self` from `bytes`.
+ ///
+ /// If `bytes.len() != size_of::<Self>()`, `read_from` returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// // These bytes encode a `PacketHeader`.
+ /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
+ ///
+ /// let header = PacketHeader::read_from(bytes).unwrap();
+ ///
+ /// assert_eq!(header.src_port, [0, 1]);
+ /// assert_eq!(header.dst_port, [2, 3]);
+ /// assert_eq!(header.length, [4, 5]);
+ /// assert_eq!(header.checksum, [6, 7]);
+ /// ```
+ #[inline]
+ fn read_from(bytes: &[u8]) -> Option<Self>
+ where
+ Self: Sized,
+ {
+ Ref::<_, Unalign<Self>>::new_unaligned(bytes).map(|r| r.read().into_inner())
+ }
+
+ /// Reads a copy of `Self` from the prefix of `bytes`.
+ ///
+ /// `read_from_prefix` reads a `Self` from the first `size_of::<Self>()`
+ /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
+ /// `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode a `PacketHeader`.
+ /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
+ ///
+ /// let header = PacketHeader::read_from_prefix(bytes).unwrap();
+ ///
+ /// assert_eq!(header.src_port, [0, 1]);
+ /// assert_eq!(header.dst_port, [2, 3]);
+ /// assert_eq!(header.length, [4, 5]);
+ /// assert_eq!(header.checksum, [6, 7]);
+ /// ```
+ #[inline]
+ fn read_from_prefix(bytes: &[u8]) -> Option<Self>
+ where
+ Self: Sized,
+ {
+ Ref::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes)
+ .map(|(r, _)| r.read().into_inner())
+ }
+
+ /// Reads a copy of `Self` from the suffix of `bytes`.
+ ///
+ /// `read_from_suffix` reads a `Self` from the last `size_of::<Self>()`
+ /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
+ /// `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::FromBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketTrailer {
+ /// frame_check_sequence: [u8; 4],
+ /// }
+ ///
+ /// // These are more bytes than are needed to encode a `PacketTrailer`.
+ /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
+ ///
+ /// let trailer = PacketTrailer::read_from_suffix(bytes).unwrap();
+ ///
+ /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
+ /// ```
+ #[inline]
+ fn read_from_suffix(bytes: &[u8]) -> Option<Self>
+ where
+ Self: Sized,
+ {
+ Ref::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes)
+ .map(|(_, r)| r.read().into_inner())
+ }
+}
+
+/// Analyzes whether a type is [`AsBytes`].
+///
+/// This derive analyzes, at compile time, whether the annotated type satisfies
+/// the [safety conditions] of `AsBytes` and implements `AsBytes` if it is
+/// sound to do so. This derive can be applied to structs, enums, and unions;
+/// e.g.:
+///
+/// ```
+/// # use zerocopy_derive::{AsBytes};
+/// #[derive(AsBytes)]
+/// #[repr(C)]
+/// struct MyStruct {
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(AsBytes)]
+/// #[repr(u8)]
+/// enum MyEnum {
+/// # Variant,
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(AsBytes)]
+/// #[repr(C)]
+/// union MyUnion {
+/// # variant: u8,
+/// # /*
+/// ...
+/// # */
+/// }
+/// ```
+///
+/// [safety conditions]: trait@AsBytes#safety
+///
+/// # Error Messages
+///
+/// Due to the way that the custom derive for `AsBytes` is implemented, you may
+/// get an error like this:
+///
+/// ```text
+/// error[E0277]: the trait bound `HasPadding<Foo, true>: ShouldBe<false>` is not satisfied
+/// --> lib.rs:23:10
+/// |
+/// 1 | #[derive(AsBytes)]
+/// | ^^^^^^^ the trait `ShouldBe<false>` is not implemented for `HasPadding<Foo, true>`
+/// |
+/// = help: the trait `ShouldBe<VALUE>` is implemented for `HasPadding<T, VALUE>`
+/// ```
+///
+/// This error indicates that the type being annotated has padding bytes, which
+/// is illegal for `AsBytes` types. Consider reducing the alignment of some
+/// fields by using types in the [`byteorder`] module, adding explicit struct
+/// fields where those padding bytes would be, or using `#[repr(packed)]`. See
+/// the Rust Reference's page on [type layout] for more information
+/// about type layout and padding.
+///
+/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
+///
+/// # Analysis
+///
+/// *This section describes, roughly, the analysis performed by this derive to
+/// determine whether it is sound to implement `AsBytes` for a given type.
+/// Unless you are modifying the implementation of this derive, or attempting to
+/// manually implement `AsBytes` for a type yourself, you don't need to read
+/// this section.*
+///
+/// If a type has the following properties, then this derive can implement
+/// `AsBytes` for that type:
+///
+/// - If the type is a struct:
+/// - It must have a defined representation (`repr(C)`, `repr(transparent)`,
+/// or `repr(packed)`).
+/// - All of its fields must be `AsBytes`.
+/// - Its layout must have no padding. This is always true for
+/// `repr(transparent)` and `repr(packed)`. For `repr(C)`, see the layout
+/// algorithm described in the [Rust Reference].
+/// - If the type is an enum:
+/// - It must be a C-like enum (meaning that all variants have no fields).
+/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
+/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
+/// - The type must not contain any [`UnsafeCell`]s (this is required in order
+/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
+/// memory). The type may contain references or pointers to `UnsafeCell`s so
+/// long as those values can themselves be initialized from zeroes (`AsBytes`
+/// is not currently implemented for, e.g., `Option<&UnsafeCell<_>>`, but it
+/// could be one day).
+///
+/// [`UnsafeCell`]: core::cell::UnsafeCell
+///
+/// This analysis is subject to change. Unsafe code may *only* rely on the
+/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
+/// implementation details of this derive.
+///
+/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
+#[cfg(any(feature = "derive", test))]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
+pub use zerocopy_derive::AsBytes;
+
+/// Types that can be viewed as an immutable slice of initialized bytes.
+///
+/// Any `AsBytes` type can be viewed as a slice of initialized bytes of the same
+/// size. This is useful for efficiently serializing structured data as raw
+/// bytes.
+///
+/// # Implementation
+///
+/// **Do not implement this trait yourself!** Instead, use
+/// [`#[derive(AsBytes)]`][derive] (requires the `derive` Cargo feature); e.g.:
+///
+/// ```
+/// # use zerocopy_derive::AsBytes;
+/// #[derive(AsBytes)]
+/// #[repr(C)]
+/// struct MyStruct {
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(AsBytes)]
+/// #[repr(u8)]
+/// enum MyEnum {
+/// # Variant0,
+/// # /*
+/// ...
+/// # */
+/// }
+///
+/// #[derive(AsBytes)]
+/// #[repr(C)]
+/// union MyUnion {
+/// # variant: u8,
+/// # /*
+/// ...
+/// # */
+/// }
+/// ```
+///
+/// This derive performs a sophisticated, compile-time safety analysis to
+/// determine whether a type is `AsBytes`. See the [derive
+/// documentation][derive] for guidance on how to interpret error messages
+/// produced by the derive's analysis.
+///
+/// # Safety
+///
+/// *This section describes what is required in order for `T: AsBytes`, and
+/// what unsafe code may assume of such types. If you don't plan on implementing
+/// `AsBytes` manually, and you don't plan on writing unsafe code that
+/// operates on `AsBytes` types, then you don't need to read this section.*
+///
+/// If `T: AsBytes`, then unsafe code may assume that:
+/// - It is sound to treat any `t: T` as an immutable `[u8]` of length
+/// `size_of_val(t)`.
+/// - Given `t: &T`, it is sound to construct a `b: &[u8]` where `b.len() ==
+/// size_of_val(t)` at the same address as `t`, and it is sound for both `b`
+/// and `t` to be live at the same time.
+///
+/// If a type is marked as `AsBytes` which violates this contract, it may cause
+/// undefined behavior.
+///
+/// `#[derive(AsBytes)]` only permits [types which satisfy these
+/// requirements][derive-analysis].
+///
+#[cfg_attr(
+ feature = "derive",
+ doc = "[derive]: zerocopy_derive::AsBytes",
+ doc = "[derive-analysis]: zerocopy_derive::AsBytes#analysis"
+)]
+#[cfg_attr(
+ not(feature = "derive"),
+ doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html"),
+ doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html#analysis"),
+)]
+pub unsafe trait AsBytes {
+ // The `Self: Sized` bound makes it so that this function doesn't prevent
+ // `AsBytes` from being object safe. Note that other `AsBytes` methods
+ // prevent object safety, but those provide a benefit in exchange for object
+ // safety. If at some point we remove those methods, change their type
+ // signatures, or move them out of this trait so that `AsBytes` is object
+ // safe again, it's important that this function not prevent object safety.
+ #[doc(hidden)]
+ fn only_derive_is_allowed_to_implement_this_trait()
+ where
+ Self: Sized;
+
+ /// Gets the bytes of this value.
+ ///
+ /// `as_bytes` provides access to the bytes of this value as an immutable
+ /// byte slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::AsBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(AsBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// let header = PacketHeader {
+ /// src_port: [0, 1],
+ /// dst_port: [2, 3],
+ /// length: [4, 5],
+ /// checksum: [6, 7],
+ /// };
+ ///
+ /// let bytes = header.as_bytes();
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
+ /// ```
+ #[inline(always)]
+ fn as_bytes(&self) -> &[u8] {
+ // Note that this method does not have a `Self: Sized` bound;
+ // `size_of_val` works for unsized values too.
+ let len = mem::size_of_val(self);
+ let slf: *const Self = self;
+
+ // SAFETY:
+ // - `slf.cast::<u8>()` is valid for reads for `len *
+ // mem::size_of::<u8>()` many bytes because...
+ // - `slf` is the same pointer as `self`, and `self` is a reference
+ // which points to an object whose size is `len`. Thus...
+ // - The entire region of `len` bytes starting at `slf` is contained
+ // within a single allocation.
+ // - `slf` is non-null.
+ // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
+ // - `Self: AsBytes` ensures that all of the bytes of `slf` are
+ // initialized.
+ // - Since `slf` is derived from `self`, and `self` is an immutable
+ // reference, the only other references to this memory region that
+ // could exist are other immutable references, and those don't allow
+ // mutation. `AsBytes` prohibits types which contain `UnsafeCell`s,
+ // which are the only types for which this rule wouldn't be sufficient.
+ // - The total size of the resulting slice is no larger than
+ // `isize::MAX` because no allocation produced by safe code can be
+ // larger than `isize::MAX`.
+ //
+ // TODO(#429): Add references to docs and quotes.
+ unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
+ }
+
+ /// Gets the bytes of this value mutably.
+ ///
+ /// `as_bytes_mut` provides access to the bytes of this value as a mutable
+ /// byte slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::AsBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// # #[derive(Eq, PartialEq, Debug)]
+ /// #[derive(AsBytes, FromZeroes, FromBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// let mut header = PacketHeader {
+ /// src_port: [0, 1],
+ /// dst_port: [2, 3],
+ /// length: [4, 5],
+ /// checksum: [6, 7],
+ /// };
+ ///
+ /// let bytes = header.as_bytes_mut();
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
+ ///
+ /// bytes.reverse();
+ ///
+ /// assert_eq!(header, PacketHeader {
+ /// src_port: [7, 6],
+ /// dst_port: [5, 4],
+ /// length: [3, 2],
+ /// checksum: [1, 0],
+ /// });
+ /// ```
+ #[inline(always)]
+ fn as_bytes_mut(&mut self) -> &mut [u8]
+ where
+ Self: FromBytes,
+ {
+ // Note that this method does not have a `Self: Sized` bound;
+ // `size_of_val` works for unsized values too.
+ let len = mem::size_of_val(self);
+ let slf: *mut Self = self;
+
+ // SAFETY:
+ // - `slf.cast::<u8>()` is valid for reads and writes for `len *
+ // mem::size_of::<u8>()` many bytes because...
+ // - `slf` is the same pointer as `self`, and `self` is a reference
+ // which points to an object whose size is `len`. Thus...
+ // - The entire region of `len` bytes starting at `slf` is contained
+ // within a single allocation.
+ // - `slf` is non-null.
+ // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
+ // - `Self: AsBytes` ensures that all of the bytes of `slf` are
+ // initialized.
+ // - `Self: FromBytes` ensures that no write to this memory region
+ // could result in it containing an invalid `Self`.
+ // - Since `slf` is derived from `self`, and `self` is a mutable
+ // reference, no other references to this memory region can exist.
+ // - The total size of the resulting slice is no larger than
+ // `isize::MAX` because no allocation produced by safe code can be
+ // larger than `isize::MAX`.
+ //
+ // TODO(#429): Add references to docs and quotes.
+ unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
+ }
+
+ /// Writes a copy of `self` to `bytes`.
+ ///
+ /// If `bytes.len() != size_of_val(self)`, `write_to` returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::AsBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(AsBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// let header = PacketHeader {
+ /// src_port: [0, 1],
+ /// dst_port: [2, 3],
+ /// length: [4, 5],
+ /// checksum: [6, 7],
+ /// };
+ ///
+ /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
+ ///
+ /// header.write_to(&mut bytes[..]);
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
+ /// ```
+ ///
+ /// If too many or too few target bytes are provided, `write_to` returns
+ /// `None` and leaves the target bytes unmodified:
+ ///
+ /// ```
+ /// # use zerocopy::AsBytes;
+ /// # let header = u128::MAX;
+ /// let mut excessive_bytes = &mut [0u8; 128][..];
+ ///
+ /// let write_result = header.write_to(excessive_bytes);
+ ///
+ /// assert!(write_result.is_none());
+ /// assert_eq!(excessive_bytes, [0u8; 128]);
+ /// ```
+ #[inline]
+ fn write_to(&self, bytes: &mut [u8]) -> Option<()> {
+ if bytes.len() != mem::size_of_val(self) {
+ return None;
+ }
+
+ bytes.copy_from_slice(self.as_bytes());
+ Some(())
+ }
+
+ /// Writes a copy of `self` to the prefix of `bytes`.
+ ///
+ /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
+ /// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::AsBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(AsBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// let header = PacketHeader {
+ /// src_port: [0, 1],
+ /// dst_port: [2, 3],
+ /// length: [4, 5],
+ /// checksum: [6, 7],
+ /// };
+ ///
+ /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
+ ///
+ /// header.write_to_prefix(&mut bytes[..]);
+ ///
+ /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
+ /// ```
+ ///
+ /// If insufficient target bytes are provided, `write_to_prefix` returns
+ /// `None` and leaves the target bytes unmodified:
+ ///
+ /// ```
+ /// # use zerocopy::AsBytes;
+ /// # let header = u128::MAX;
+ /// let mut insufficent_bytes = &mut [0, 0][..];
+ ///
+ /// let write_result = header.write_to_suffix(insufficent_bytes);
+ ///
+ /// assert!(write_result.is_none());
+ /// assert_eq!(insufficent_bytes, [0, 0]);
+ /// ```
+ #[inline]
+ fn write_to_prefix(&self, bytes: &mut [u8]) -> Option<()> {
+ let size = mem::size_of_val(self);
+ bytes.get_mut(..size)?.copy_from_slice(self.as_bytes());
+ Some(())
+ }
+
+ /// Writes a copy of `self` to the suffix of `bytes`.
+ ///
+ /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
+ /// `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use zerocopy::AsBytes;
+ /// # use zerocopy_derive::*;
+ ///
+ /// #[derive(AsBytes)]
+ /// #[repr(C)]
+ /// struct PacketHeader {
+ /// src_port: [u8; 2],
+ /// dst_port: [u8; 2],
+ /// length: [u8; 2],
+ /// checksum: [u8; 2],
+ /// }
+ ///
+ /// let header = PacketHeader {
+ /// src_port: [0, 1],
+ /// dst_port: [2, 3],
+ /// length: [4, 5],
+ /// checksum: [6, 7],
+ /// };
+ ///
+ /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
+ ///
+ /// header.write_to_suffix(&mut bytes[..]);
+ ///
+ /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
+ ///
+ /// let mut insufficent_bytes = &mut [0, 0][..];
+ ///
+ /// let write_result = header.write_to_suffix(insufficent_bytes);
+ ///
+ /// assert!(write_result.is_none());
+ /// assert_eq!(insufficent_bytes, [0, 0]);
+ /// ```
+ ///
+ /// If insufficient target bytes are provided, `write_to_suffix` returns
+ /// `None` and leaves the target bytes unmodified:
+ ///
+ /// ```
+ /// # use zerocopy::AsBytes;
+ /// # let header = u128::MAX;
+ /// let mut insufficent_bytes = &mut [0, 0][..];
+ ///
+ /// let write_result = header.write_to_suffix(insufficent_bytes);
+ ///
+ /// assert!(write_result.is_none());
+ /// assert_eq!(insufficent_bytes, [0, 0]);
+ /// ```
+ #[inline]
+ fn write_to_suffix(&self, bytes: &mut [u8]) -> Option<()> {
+ let start = bytes.len().checked_sub(mem::size_of_val(self))?;
+ bytes
+ .get_mut(start..)
+ .expect("`start` should be in-bounds of `bytes`")
+ .copy_from_slice(self.as_bytes());
+ Some(())
+ }
+}
+
+/// Types with no alignment requirement.
+///
+/// WARNING: Do not implement this trait yourself! Instead, use
+/// `#[derive(Unaligned)]` (requires the `derive` Cargo feature).
+///
+/// If `T: Unaligned`, then `align_of::<T>() == 1`.
+///
+/// # Safety
+///
+/// *This section describes what is required in order for `T: Unaligned`, and
+/// what unsafe code may assume of such types. `#[derive(Unaligned)]` only
+/// permits types which satisfy these requirements. If you don't plan on
+/// implementing `Unaligned` manually, and you don't plan on writing unsafe code
+/// that operates on `Unaligned` types, then you don't need to read this
+/// section.*
+///
+/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
+/// reference to `T` at any memory location regardless of alignment. If a type
+/// is marked as `Unaligned` which violates this contract, it may cause
+/// undefined behavior.
+pub unsafe trait Unaligned {
+ // The `Self: Sized` bound makes it so that `Unaligned` is still object
+ // safe.
+ #[doc(hidden)]
+ fn only_derive_is_allowed_to_implement_this_trait()
+ where
+ Self: Sized;
+}
+
+safety_comment! {
+ /// SAFETY:
+ /// Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a
+ /// zero-sized type to have a size of 0 and an alignment of 1."
+ /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There
+ /// is only one possible sequence of 0 bytes, and `()` is inhabited.
+ /// - `AsBytes`: Since `()` has size 0, it contains no padding bytes.
+ /// - `Unaligned`: `()` has alignment 1.
+ ///
+ /// [1] https://doc.rust-lang.org/reference/type-layout.html#tuple-layout
+ unsafe_impl!((): TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_unaligned!(());
+}
+
+safety_comment! {
+ /// SAFETY:
+ /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: all bit
+ /// patterns are valid for numeric types [1]
+ /// - `AsBytes`: numeric types have no padding bytes [1]
+ /// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size
+ /// of `u8` and `i8` as 1 byte. We also know that:
+ /// - Alignment is >= 1 [3]
+ /// - Size is an integer multiple of alignment [4]
+ /// - The only value >= 1 for which 1 is an integer multiple is 1
+ /// Therefore, the only possible alignment for `u8` and `i8` is 1.
+ ///
+ /// [1] Per https://doc.rust-lang.org/beta/reference/types/numeric.html#bit-validity:
+ ///
+ /// For every numeric type, `T`, the bit validity of `T` is equivalent to
+ /// the bit validity of `[u8; size_of::<T>()]`. An uninitialized byte is
+ /// not a valid `u8`.
+ ///
+ /// TODO(https://github.com/rust-lang/reference/pull/1392): Once this text
+ /// is available on the Stable docs, cite those instead.
+ ///
+ /// [2] https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout
+ ///
+ /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
+ ///
+ /// Alignment is measured in bytes, and must be at least 1.
+ ///
+ /// [4] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
+ ///
+ /// The size of a value is always a multiple of its alignment.
+ ///
+ /// TODO(#278): Once we've updated the trait docs to refer to `u8`s rather
+ /// than bits or bytes, update this comment, especially the reference to
+ /// [1].
+ unsafe_impl!(u8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ unsafe_impl!(i8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_unaligned!(u8, i8);
+ unsafe_impl!(u16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(i16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(u32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(i32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(u64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(i64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(u128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(i128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(usize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(isize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(f32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(f64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+}
+
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`: Valid since "[t]he value false has the bit pattern
+ /// 0x00" [1].
+ /// - `AsBytes`: Since "the boolean type has a size and alignment of 1 each"
+ /// and "The value false has the bit pattern 0x00 and the value true has
+ /// the bit pattern 0x01" [1]. Thus, the only byte of the bool is always
+ /// initialized.
+ /// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type
+ /// has a size and alignment of 1 each."
+ ///
+ /// [1] https://doc.rust-lang.org/reference/types/boolean.html
+ unsafe_impl!(bool: FromZeroes, AsBytes, Unaligned);
+ assert_unaligned!(bool);
+ /// SAFETY:
+ /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
+ /// closure:
+ /// - Given `t: *mut bool` and `let r = *mut u8`, `r` refers to an object
+ /// of the same size as that referred to by `t`. This is true because
+ /// `bool` and `u8` have the same size (1 byte) [1].
+ /// - Since the closure takes a `&u8` argument, given a `Ptr<'a, bool>`
+ /// which satisfies the preconditions of
+ /// `TryFromBytes::<bool>::is_bit_valid`, it must be guaranteed that the
+ /// memory referenced by that `Ptr` always contains a valid `u8`. Since
+ /// `bool`'s single byte is always initialized, `is_bit_valid`'s
+ /// precondition requires that the same is true of its argument. Since
+ /// `u8`'s only bit validity invariant is that its single byte must be
+ /// initialized, this memory is guaranteed to contain a valid `u8`.
+ /// - The alignment of `bool` is equal to the alignment of `u8`. [1] [2]
+ /// - The impl must only return `true` for its argument if the original
+ /// `Ptr<bool>` refers to a valid `bool`. We only return true if the
+ /// `u8` value is 0 or 1, and both of these are valid values for `bool`.
+ /// [3]
+ ///
+ /// [1] Per https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout:
+ ///
+ /// The size of most primitives is given in this table.
+ ///
+ /// | Type | `size_of::<Type>() ` |
+ /// |-----------|----------------------|
+ /// | `bool` | 1 |
+ /// | `u8`/`i8` | 1 |
+ ///
+ /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
+ ///
+ /// The size of a value is always a multiple of its alignment.
+ ///
+ /// [3] Per https://doc.rust-lang.org/reference/types/boolean.html:
+ ///
+ /// The value false has the bit pattern 0x00 and the value true has the
+ /// bit pattern 0x01.
+ unsafe_impl!(bool: TryFromBytes; |byte: &u8| *byte < 2);
+}
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`: Per reference [1], "[a] value of type char is a Unicode
+ /// scalar value (i.e. a code point that is not a surrogate), represented
+ /// as a 32-bit unsigned word in the 0x0000 to 0xD7FF or 0xE000 to
+ /// 0x10FFFF range" which contains 0x0000.
+ /// - `AsBytes`: `char` is per reference [1] "represented as a 32-bit
+ /// unsigned word" (`u32`) which is `AsBytes`. Note that unlike `u32`, not
+ /// all bit patterns are valid for `char`.
+ ///
+ /// [1] https://doc.rust-lang.org/reference/types/textual.html
+ unsafe_impl!(char: FromZeroes, AsBytes);
+ /// SAFETY:
+ /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
+ /// closure:
+ /// - Given `t: *mut char` and `let r = *mut u32`, `r` refers to an object
+ /// of the same size as that referred to by `t`. This is true because
+ /// `char` and `u32` have the same size [1].
+ /// - Since the closure takes a `&u32` argument, given a `Ptr<'a, char>`
+ /// which satisfies the preconditions of
+ /// `TryFromBytes::<char>::is_bit_valid`, it must be guaranteed that the
+ /// memory referenced by that `Ptr` always contains a valid `u32`. Since
+ /// `char`'s bytes are always initialized [2], `is_bit_valid`'s
+ /// precondition requires that the same is true of its argument. Since
+ /// `u32`'s only bit validity invariant is that its bytes must be
+ /// initialized, this memory is guaranteed to contain a valid `u32`.
+ /// - The alignment of `char` is equal to the alignment of `u32`. [1]
+ /// - The impl must only return `true` for its argument if the original
+ /// `Ptr<char>` refers to a valid `char`. `char::from_u32` guarantees
+ /// that it returns `None` if its input is not a valid `char`. [3]
+ ///
+ /// [1] Per https://doc.rust-lang.org/nightly/reference/types/textual.html#layout-and-bit-validity:
+ ///
+ /// `char` is guaranteed to have the same size and alignment as `u32` on
+ /// all platforms.
+ ///
+ /// [2] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32:
+ ///
+ /// Every byte of a `char` is guaranteed to be initialized.
+ ///
+ /// [3] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32:
+ ///
+ /// `from_u32()` will return `None` if the input is not a valid value for
+ /// a `char`.
+ unsafe_impl!(char: TryFromBytes; |candidate: &u32| char::from_u32(*candidate).is_some());
+}
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`, `AsBytes`, `Unaligned`: Per the reference [1], `str`
+ /// has the same layout as `[u8]`, and `[u8]` is `FromZeroes`, `AsBytes`,
+ /// and `Unaligned`.
+ ///
+ /// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!`
+ /// uses `align_of`, which only works for `Sized` types.
+ ///
+ /// TODO(#429): Add quotes from documentation.
+ ///
+ /// [1] https://doc.rust-lang.org/reference/type-layout.html#str-layout
+ unsafe_impl!(str: FromZeroes, AsBytes, Unaligned);
+ /// SAFETY:
+ /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
+ /// closure:
+ /// - Given `t: *mut str` and `let r = *mut [u8]`, `r` refers to an object
+ /// of the same size as that referred to by `t`. This is true because
+ /// `str` and `[u8]` have the same representation. [1]
+ /// - Since the closure takes a `&[u8]` argument, given a `Ptr<'a, str>`
+ /// which satisfies the preconditions of
+ /// `TryFromBytes::<str>::is_bit_valid`, it must be guaranteed that the
+ /// memory referenced by that `Ptr` always contains a valid `[u8]`.
+ /// Since `str`'s bytes are always initialized [1], `is_bit_valid`'s
+ /// precondition requires that the same is true of its argument. Since
+ /// `[u8]`'s only bit validity invariant is that its bytes must be
+ /// initialized, this memory is guaranteed to contain a valid `[u8]`.
+ /// - The alignment of `str` is equal to the alignment of `[u8]`. [1]
+ /// - The impl must only return `true` for its argument if the original
+ /// `Ptr<str>` refers to a valid `str`. `str::from_utf8` guarantees that
+ /// it returns `Err` if its input is not a valid `str`. [2]
+ ///
+ /// [1] Per https://doc.rust-lang.org/reference/types/textual.html:
+ ///
+ /// A value of type `str` is represented the same was as `[u8]`.
+ ///
+ /// [2] Per https://doc.rust-lang.org/core/str/fn.from_utf8.html#errors:
+ ///
+ /// Returns `Err` if the slice is not UTF-8.
+ unsafe_impl!(str: TryFromBytes; |candidate: &[u8]| core::str::from_utf8(candidate).is_ok());
+}
+
+safety_comment! {
+ // `NonZeroXxx` is `AsBytes`, but not `FromZeroes` or `FromBytes`.
+ //
+ /// SAFETY:
+ /// - `AsBytes`: `NonZeroXxx` has the same layout as its associated
+ /// primitive. Since it is the same size, this guarantees it has no
+ /// padding - integers have no padding, and there's no room for padding
+ /// if it can represent all of the same values except 0.
+ /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
+ /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
+ /// This is worded in a way that makes it unclear whether it's meant as a
+ /// guarantee, but given the purpose of those types, it's virtually
+ /// unthinkable that that would ever change. `Option` cannot be smaller
+ /// than its contained type, which implies that, and `NonZeroX8` are of
+ /// size 1 or 0. `NonZeroX8` can represent multiple states, so they cannot
+ /// be 0 bytes, which means that they must be 1 byte. The only valid
+ /// alignment for a 1-byte type is 1.
+ ///
+ /// TODO(#429): Add quotes from documentation.
+ ///
+ /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
+ /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
+ /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
+ /// that layout is the same as primitive layout.
+ unsafe_impl!(NonZeroU8: AsBytes, Unaligned);
+ unsafe_impl!(NonZeroI8: AsBytes, Unaligned);
+ assert_unaligned!(NonZeroU8, NonZeroI8);
+ unsafe_impl!(NonZeroU16: AsBytes);
+ unsafe_impl!(NonZeroI16: AsBytes);
+ unsafe_impl!(NonZeroU32: AsBytes);
+ unsafe_impl!(NonZeroI32: AsBytes);
+ unsafe_impl!(NonZeroU64: AsBytes);
+ unsafe_impl!(NonZeroI64: AsBytes);
+ unsafe_impl!(NonZeroU128: AsBytes);
+ unsafe_impl!(NonZeroI128: AsBytes);
+ unsafe_impl!(NonZeroUsize: AsBytes);
+ unsafe_impl!(NonZeroIsize: AsBytes);
+ /// SAFETY:
+ /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
+ /// closure:
+ /// - Given `t: *mut NonZeroXxx` and `let r = *mut xxx`, `r` refers to an
+ /// object of the same size as that referred to by `t`. This is true
+ /// because `NonZeroXxx` and `xxx` have the same size. [1]
+ /// - Since the closure takes a `&xxx` argument, given a `Ptr<'a,
+ /// NonZeroXxx>` which satisfies the preconditions of
+ /// `TryFromBytes::<NonZeroXxx>::is_bit_valid`, it must be guaranteed
+ /// that the memory referenced by that `Ptr` always contains a valid
+ /// `xxx`. Since `NonZeroXxx`'s bytes are always initialized [1],
+ /// `is_bit_valid`'s precondition requires that the same is true of its
+ /// argument. Since `xxx`'s only bit validity invariant is that its
+ /// bytes must be initialized, this memory is guaranteed to contain a
+ /// valid `xxx`.
+ /// - The alignment of `NonZeroXxx` is equal to the alignment of `xxx`.
+ /// [1]
+ /// - The impl must only return `true` for its argument if the original
+ /// `Ptr<NonZeroXxx>` refers to a valid `NonZeroXxx`. The only `xxx`
+ /// which is not also a valid `NonZeroXxx` is 0. [1]
+ ///
+ /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html:
+ ///
+ /// `NonZeroU16` is guaranteed to have the same layout and bit validity as
+ /// `u16` with the exception that `0` is not a valid instance.
+ unsafe_impl!(NonZeroU8: TryFromBytes; |n: &u8| *n != 0);
+ unsafe_impl!(NonZeroI8: TryFromBytes; |n: &i8| *n != 0);
+ unsafe_impl!(NonZeroU16: TryFromBytes; |n: &u16| *n != 0);
+ unsafe_impl!(NonZeroI16: TryFromBytes; |n: &i16| *n != 0);
+ unsafe_impl!(NonZeroU32: TryFromBytes; |n: &u32| *n != 0);
+ unsafe_impl!(NonZeroI32: TryFromBytes; |n: &i32| *n != 0);
+ unsafe_impl!(NonZeroU64: TryFromBytes; |n: &u64| *n != 0);
+ unsafe_impl!(NonZeroI64: TryFromBytes; |n: &i64| *n != 0);
+ unsafe_impl!(NonZeroU128: TryFromBytes; |n: &u128| *n != 0);
+ unsafe_impl!(NonZeroI128: TryFromBytes; |n: &i128| *n != 0);
+ unsafe_impl!(NonZeroUsize: TryFromBytes; |n: &usize| *n != 0);
+ unsafe_impl!(NonZeroIsize: TryFromBytes; |n: &isize| *n != 0);
+}
+safety_comment! {
+ /// SAFETY:
+ /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`,
+ /// `AsBytes`: The Rust compiler reuses `0` value to represent `None`, so
+ /// `size_of::<Option<NonZeroXxx>>() == size_of::<xxx>()`; see
+ /// `NonZeroXxx` documentation.
+ /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
+ /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
+ /// This is worded in a way that makes it unclear whether it's meant as a
+ /// guarantee, but given the purpose of those types, it's virtually
+ /// unthinkable that that would ever change. The only valid alignment for
+ /// a 1-byte type is 1.
+ ///
+ /// TODO(#429): Add quotes from documentation.
+ ///
+ /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
+ /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
+ ///
+ /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
+ /// for layout guarantees.
+ unsafe_impl!(Option<NonZeroU8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ unsafe_impl!(Option<NonZeroI8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_unaligned!(Option<NonZeroU8>, Option<NonZeroI8>);
+ unsafe_impl!(Option<NonZeroU16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroI16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroU32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroI32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroU64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroI64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroU128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroI128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroUsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroIsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
+}
+
+safety_comment! {
+ /// SAFETY:
+ /// The following types can be transmuted from `[0u8; size_of::<T>()]`. [1]
+ /// None of them contain `UnsafeCell`s, and so they all soundly implement
+ /// `FromZeroes`.
+ ///
+ /// [1] Per
+ /// https://doc.rust-lang.org/nightly/core/option/index.html#representation:
+ ///
+ /// Rust guarantees to optimize the following types `T` such that
+ /// [`Option<T>`] has the same size and alignment as `T`. In some of these
+ /// cases, Rust further guarantees that `transmute::<_, Option<T>>([0u8;
+ /// size_of::<T>()])` is sound and produces `Option::<T>::None`. These
+ /// cases are identified by the second column:
+ ///
+ /// | `T` | `transmute::<_, Option<T>>([0u8; size_of::<T>()])` sound? |
+ /// |-----------------------|-----------------------------------------------------------|
+ /// | [`Box<U>`] | when `U: Sized` |
+ /// | `&U` | when `U: Sized` |
+ /// | `&mut U` | when `U: Sized` |
+ /// | [`ptr::NonNull<U>`] | when `U: Sized` |
+ /// | `fn`, `extern "C" fn` | always |
+ ///
+ /// TODO(#429), TODO(https://github.com/rust-lang/rust/pull/115333): Cite
+ /// the Stable docs once they're available.
+ #[cfg(feature = "alloc")]
+ unsafe_impl!(
+ #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
+ T => FromZeroes for Option<Box<T>>
+ );
+ unsafe_impl!(T => FromZeroes for Option<&'_ T>);
+ unsafe_impl!(T => FromZeroes for Option<&'_ mut T>);
+ unsafe_impl!(T => FromZeroes for Option<NonNull<T>>);
+ unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_fn!(...));
+ unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_extern_c_fn!(...));
+}
+
+safety_comment! {
+ /// SAFETY:
+ /// Per reference [1]:
+ /// "For all T, the following are guaranteed:
+ /// size_of::<PhantomData<T>>() == 0
+ /// align_of::<PhantomData<T>>() == 1".
+ /// This gives:
+ /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There
+ /// is only one possible sequence of 0 bytes, and `PhantomData` is
+ /// inhabited.
+ /// - `AsBytes`: Since `PhantomData` has size 0, it contains no padding
+ /// bytes.
+ /// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment
+ /// 1.
+ ///
+ /// [1] https://doc.rust-lang.org/std/marker/struct.PhantomData.html#layout-1
+ unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData<T>);
+ unsafe_impl!(T: ?Sized => FromZeroes for PhantomData<T>);
+ unsafe_impl!(T: ?Sized => FromBytes for PhantomData<T>);
+ unsafe_impl!(T: ?Sized => AsBytes for PhantomData<T>);
+ unsafe_impl!(T: ?Sized => Unaligned for PhantomData<T>);
+ assert_unaligned!(PhantomData<()>, PhantomData<u8>, PhantomData<u64>);
+}
+safety_comment! {
+ /// SAFETY:
+ /// `Wrapping<T>` is guaranteed by its docs [1] to have the same layout and
+ /// bit validity as `T`. Also, `Wrapping<T>` is `#[repr(transparent)]`, and
+ /// has a single field, which is `pub`. Per the reference [2], this means
+ /// that the `#[repr(transparent)]` attribute is "considered part of the
+ /// public ABI".
+ ///
+ /// - `TryFromBytes`: The safety requirements for `unsafe_impl!` with an
+ /// `is_bit_valid` closure:
+ /// - Given `t: *mut Wrapping<T>` and `let r = *mut T`, `r` refers to an
+ /// object of the same size as that referred to by `t`. This is true
+ /// because `Wrapping<T>` and `T` have the same layout
+ /// - The alignment of `Wrapping<T>` is equal to the alignment of `T`.
+ /// - The impl must only return `true` for its argument if the original
+ /// `Ptr<Wrapping<T>>` refers to a valid `Wrapping<T>`. Since
+ /// `Wrapping<T>` has the same bit validity as `T`, and since our impl
+ /// just calls `T::is_bit_valid`, our impl returns `true` exactly when
+ /// its argument contains a valid `Wrapping<T>`.
+ /// - `FromBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if
+ /// `T: FromBytes`, then all initialized byte sequences are valid
+ /// instances of `Wrapping<T>`. Similarly, if `T: FromBytes`, then
+ /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl FromBytes
+ /// for Wrapping<T> where T: FromBytes` is a sound impl.
+ /// - `AsBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if
+ /// `T: AsBytes`, then all valid instances of `Wrapping<T>` have all of
+ /// their bytes initialized. Similarly, if `T: AsBytes`, then
+ /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl AsBytes
+ /// for Wrapping<T> where T: AsBytes` is a valid impl.
+ /// - `Unaligned`: Since `Wrapping<T>` has the same layout as `T`,
+ /// `Wrapping<T>` has alignment 1 exactly when `T` does.
+ ///
+ /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html:
+ ///
+ /// `NonZeroU16` is guaranteed to have the same layout and bit validity as
+ /// `u16` with the exception that `0` is not a valid instance.
+ ///
+ /// TODO(#429): Add quotes from documentation.
+ ///
+ /// [1] TODO(https://doc.rust-lang.org/nightly/core/num/struct.Wrapping.html#layout-1):
+ /// Reference this documentation once it's available on stable.
+ ///
+ /// [2] https://doc.rust-lang.org/nomicon/other-reprs.html#reprtransparent
+ unsafe_impl!(T: TryFromBytes => TryFromBytes for Wrapping<T>; |candidate: Ptr<T>| {
+ // SAFETY:
+ // - Since `T` and `Wrapping<T>` have the same layout and bit validity
+ // and contain the same fields, `T` contains `UnsafeCell`s exactly
+ // where `Wrapping<T>` does. Thus, all memory and `UnsafeCell`
+ // preconditions of `T::is_bit_valid` hold exactly when the same
+ // preconditions for `Wrapping<T>::is_bit_valid` hold.
+ // - By the same token, since `candidate` is guaranteed to have its
+ // bytes initialized where there are always initialized bytes in
+ // `Wrapping<T>`, the same is true for `T`.
+ unsafe { T::is_bit_valid(candidate) }
+ });
+ unsafe_impl!(T: FromZeroes => FromZeroes for Wrapping<T>);
+ unsafe_impl!(T: FromBytes => FromBytes for Wrapping<T>);
+ unsafe_impl!(T: AsBytes => AsBytes for Wrapping<T>);
+ unsafe_impl!(T: Unaligned => Unaligned for Wrapping<T>);
+ assert_unaligned!(Wrapping<()>, Wrapping<u8>);
+}
+safety_comment! {
+ // `MaybeUninit<T>` is `FromZeroes` and `FromBytes`, but never `AsBytes`
+ // since it may contain uninitialized bytes.
+ //
+ /// SAFETY:
+ /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`:
+ /// `MaybeUninit<T>` has no restrictions on its contents. Unfortunately,
+ /// in addition to bit validity, `TryFromBytes`, `FromZeroes` and
+ /// `FromBytes` also require that implementers contain no `UnsafeCell`s.
+ /// Thus, we require `T: Trait` in order to ensure that `T` - and thus
+ /// `MaybeUninit<T>` - contains to `UnsafeCell`s. Thus, requiring that `T`
+ /// implement each of these traits is sufficient.
+ /// - `Unaligned`: "MaybeUninit<T> is guaranteed to have the same size,
+ /// alignment, and ABI as T" [1]
+ ///
+ /// [1] https://doc.rust-lang.org/stable/core/mem/union.MaybeUninit.html#layout-1
+ ///
+ /// TODO(https://github.com/google/zerocopy/issues/251): If we split
+ /// `FromBytes` and `RefFromBytes`, or if we introduce a separate
+ /// `NoCell`/`Freeze` trait, we can relax the trait bounds for `FromZeroes`
+ /// and `FromBytes`.
+ unsafe_impl!(T: TryFromBytes => TryFromBytes for MaybeUninit<T>);
+ unsafe_impl!(T: FromZeroes => FromZeroes for MaybeUninit<T>);
+ unsafe_impl!(T: FromBytes => FromBytes for MaybeUninit<T>);
+ unsafe_impl!(T: Unaligned => Unaligned for MaybeUninit<T>);
+ assert_unaligned!(MaybeUninit<()>, MaybeUninit<u8>);
+}
+safety_comment! {
+ /// SAFETY:
+ /// `ManuallyDrop` has the same layout and bit validity as `T` [1], and
+ /// accessing the inner value is safe (meaning that it's unsound to leave
+ /// the inner value uninitialized while exposing the `ManuallyDrop` to safe
+ /// code).
+ /// - `FromZeroes`, `FromBytes`: Since it has the same layout as `T`, any
+ /// valid `T` is a valid `ManuallyDrop<T>`. If `T: FromZeroes`, a sequence
+ /// of zero bytes is a valid `T`, and thus a valid `ManuallyDrop<T>`. If
+ /// `T: FromBytes`, any sequence of bytes is a valid `T`, and thus a valid
+ /// `ManuallyDrop<T>`.
+ /// - `AsBytes`: Since it has the same layout as `T`, and since it's unsound
+ /// to let safe code access a `ManuallyDrop` whose inner value is
+ /// uninitialized, safe code can only ever access a `ManuallyDrop` whose
+ /// contents are a valid `T`. Since `T: AsBytes`, this means that safe
+ /// code can only ever access a `ManuallyDrop` with all initialized bytes.
+ /// - `Unaligned`: `ManuallyDrop` has the same layout (and thus alignment)
+ /// as `T`, and `T: Unaligned` guarantees that that alignment is 1.
+ ///
+ /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
+ /// validity as `T`
+ ///
+ /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
+ ///
+ /// TODO(#429):
+ /// - Add quotes from docs.
+ /// - Once [1] (added in
+ /// https://github.com/rust-lang/rust/pull/115522) is available on stable,
+ /// quote the stable docs instead of the nightly docs.
+ unsafe_impl!(T: ?Sized + FromZeroes => FromZeroes for ManuallyDrop<T>);
+ unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop<T>);
+ unsafe_impl!(T: ?Sized + AsBytes => AsBytes for ManuallyDrop<T>);
+ unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop<T>);
+ assert_unaligned!(ManuallyDrop<()>, ManuallyDrop<u8>);
+}
+safety_comment! {
+ /// SAFETY:
+ /// Per the reference [1]:
+ ///
+ /// An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
+ /// alignment of `T`. Arrays are laid out so that the zero-based `nth`
+ /// element of the array is offset from the start of the array by `n *
+ /// size_of::<T>()` bytes.
+ ///
+ /// ...
+ ///
+ /// Slices have the same layout as the section of the array they slice.
+ ///
+ /// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s
+ /// laid out back-to-back with no bytes in between. Therefore, `[T]` or `[T;
+ /// N]` are `TryFromBytes`, `FromZeroes`, `FromBytes`, and `AsBytes` if `T`
+ /// is (respectively). Furthermore, since an array/slice has "the same
+ /// alignment of `T`", `[T]` and `[T; N]` are `Unaligned` if `T` is.
+ ///
+ /// Note that we don't `assert_unaligned!` for slice types because
+ /// `assert_unaligned!` uses `align_of`, which only works for `Sized` types.
+ ///
+ /// [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout
+ unsafe_impl!(const N: usize, T: FromZeroes => FromZeroes for [T; N]);
+ unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]);
+ unsafe_impl!(const N: usize, T: AsBytes => AsBytes for [T; N]);
+ unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]);
+ assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]);
+ unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c: Ptr<[T]>| {
+ // SAFETY: Assuming the preconditions of `is_bit_valid` are satisfied,
+ // so too will the postcondition: that, if `is_bit_valid(candidate)`
+ // returns true, `*candidate` contains a valid `Self`. Per the reference
+ // [1]:
+ //
+ // An array of `[T; N]` has a size of `size_of::<T>() * N` and the
+ // same alignment of `T`. Arrays are laid out so that the zero-based
+ // `nth` element of the array is offset from the start of the array by
+ // `n * size_of::<T>()` bytes.
+ //
+ // ...
+ //
+ // Slices have the same layout as the section of the array they slice.
+ //
+ // In other words, the layout of a `[T] is a sequence of `T`s laid out
+ // back-to-back with no bytes in between. If all elements in `candidate`
+ // are `is_bit_valid`, so too is `candidate`.
+ //
+ // Note that any of the below calls may panic, but it would still be
+ // sound even if it did. `is_bit_valid` does not promise that it will
+ // not panic (in fact, it explicitly warns that it's a possibility), and
+ // we have not violated any safety invariants that we must fix before
+ // returning.
+ c.iter().all(|elem|
+ // SAFETY: We uphold the safety contract of `is_bit_valid(elem)`, by
+ // precondition on the surrounding call to `is_bit_valid`. The
+ // memory referenced by `elem` is contained entirely within `c`, and
+ // satisfies the preconditions satisfied by `c`. By axiom, we assume
+ // that `Iterator:all` does not invalidate these preconditions
+ // (e.g., by writing to `elem`.) Since `elem` is derived from `c`,
+ // it is only possible for uninitialized bytes to occur in `elem` at
+ // the same bytes they occur within `c`.
+ unsafe { <T as TryFromBytes>::is_bit_valid(elem) }
+ )
+ });
+ unsafe_impl!(T: FromZeroes => FromZeroes for [T]);
+ unsafe_impl!(T: FromBytes => FromBytes for [T]);
+ unsafe_impl!(T: AsBytes => AsBytes for [T]);
+ unsafe_impl!(T: Unaligned => Unaligned for [T]);
+}
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`: For thin pointers (note that `T: Sized`), the zero
+ /// pointer is considered "null". [1] No operations which require
+ /// provenance are legal on null pointers, so this is not a footgun.
+ ///
+ /// NOTE(#170): Implementing `FromBytes` and `AsBytes` for raw pointers
+ /// would be sound, but carries provenance footguns. We want to support
+ /// `FromBytes` and `AsBytes` for raw pointers eventually, but we are
+ /// holding off until we can figure out how to address those footguns.
+ ///
+ /// [1] TODO(https://github.com/rust-lang/rust/pull/116988): Cite the
+ /// documentation once this PR lands.
+ unsafe_impl!(T => FromZeroes for *const T);
+ unsafe_impl!(T => FromZeroes for *mut T);
+}
+
+// SIMD support
+//
+// Per the Unsafe Code Guidelines Reference [1]:
+//
+// Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs
+// containing `N` elements of type `T` where `N` is a power-of-two and the
+// size and alignment requirements of `T` are equal:
+//
+// ```rust
+// #[repr(simd)]
+// struct Vector<T, N>(T_0, ..., T_(N - 1));
+// ```
+//
+// ...
+//
+// The size of `Vector` is `N * size_of::<T>()` and its alignment is an
+// implementation-defined function of `T` and `N` greater than or equal to
+// `align_of::<T>()`.
+//
+// ...
+//
+// Vector elements are laid out in source field order, enabling random access
+// to vector elements by reinterpreting the vector as an array:
+//
+// ```rust
+// union U {
+// vec: Vector<T, N>,
+// arr: [T; N]
+// }
+//
+// assert_eq!(size_of::<Vector<T, N>>(), size_of::<[T; N]>());
+// assert!(align_of::<Vector<T, N>>() >= align_of::<[T; N]>());
+//
+// unsafe {
+// let u = U { vec: Vector<T, N>(t_0, ..., t_(N - 1)) };
+//
+// assert_eq!(u.vec.0, u.arr[0]);
+// // ...
+// assert_eq!(u.vec.(N - 1), u.arr[N - 1]);
+// }
+// ```
+//
+// Given this background, we can observe that:
+// - The size and bit pattern requirements of a SIMD type are equivalent to the
+// equivalent array type. Thus, for any SIMD type whose primitive `T` is
+// `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes`, that SIMD type is
+// also `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` respectively.
+// - Since no upper bound is placed on the alignment, no SIMD type can be
+// guaranteed to be `Unaligned`.
+//
+// Also per [1]:
+//
+// This chapter represents the consensus from issue #38. The statements in
+// here are not (yet) "guaranteed" not to change until an RFC ratifies them.
+//
+// See issue #38 [2]. While this behavior is not technically guaranteed, the
+// likelihood that the behavior will change such that SIMD types are no longer
+// `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` is next to zero, as
+// that would defeat the entire purpose of SIMD types. Nonetheless, we put this
+// behavior behind the `simd` Cargo feature, which requires consumers to opt
+// into this stability hazard.
+//
+// [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
+// [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38
+#[cfg(feature = "simd")]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))]
+mod simd {
+ /// Defines a module which implements `TryFromBytes`, `FromZeroes`,
+ /// `FromBytes`, and `AsBytes` for a set of types from a module in
+ /// `core::arch`.
+ ///
+ /// `$arch` is both the name of the defined module and the name of the
+ /// module in `core::arch`, and `$typ` is the list of items from that module
+ /// to implement `FromZeroes`, `FromBytes`, and `AsBytes` for.
+ #[allow(unused_macros)] // `allow(unused_macros)` is needed because some
+ // target/feature combinations don't emit any impls
+ // and thus don't use this macro.
+ macro_rules! simd_arch_mod {
+ (#[cfg $cfg:tt] $arch:ident, $mod:ident, $($typ:ident),*) => {
+ #[cfg $cfg]
+ #[cfg_attr(doc_cfg, doc(cfg $cfg))]
+ mod $mod {
+ use core::arch::$arch::{$($typ),*};
+
+ use crate::*;
+ impl_known_layout!($($typ),*);
+ safety_comment! {
+ /// SAFETY:
+ /// See comment on module definition for justification.
+ $( unsafe_impl!($typ: TryFromBytes, FromZeroes, FromBytes, AsBytes); )*
+ }
+ }
+ };
+ }
+
+ #[rustfmt::skip]
+ const _: () = {
+ simd_arch_mod!(
+ #[cfg(target_arch = "x86")]
+ x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i
+ );
+ simd_arch_mod!(
+ #[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
+ x86, x86_nightly, __m512bh, __m512, __m512d, __m512i
+ );
+ simd_arch_mod!(
+ #[cfg(target_arch = "x86_64")]
+ x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i
+ );
+ simd_arch_mod!(
+ #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
+ x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i
+ );
+ simd_arch_mod!(
+ #[cfg(target_arch = "wasm32")]
+ wasm32, wasm32, v128
+ );
+ simd_arch_mod!(
+ #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
+ powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
+ );
+ simd_arch_mod!(
+ #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
+ powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
+ );
+ simd_arch_mod!(
+ #[cfg(target_arch = "aarch64")]
+ aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
+ int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
+ int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
+ poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
+ poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
+ uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
+ uint64x1_t, uint64x2_t
+ );
+ simd_arch_mod!(
+ #[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
+ arm, arm, int8x4_t, uint8x4_t
+ );
+ };
+}
+
+/// Safely transmutes a value of one type to a value of another type of the same
+/// size.
+///
+/// The expression `$e` must have a concrete type, `T`, which implements
+/// `AsBytes`. The `transmute!` expression must also have a concrete type, `U`
+/// (`U` is inferred from the calling context), and `U` must implement
+/// `FromBytes`.
+///
+/// Note that the `T` produced by the expression `$e` will *not* be dropped.
+/// Semantically, its bits will be copied into a new value of type `U`, the
+/// original `T` will be forgotten, and the value of type `U` will be returned.
+///
+/// # Examples
+///
+/// ```
+/// # use zerocopy::transmute;
+/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
+///
+/// let two_dimensional: [[u8; 4]; 2] = transmute!(one_dimensional);
+///
+/// assert_eq!(two_dimensional, [[0, 1, 2, 3], [4, 5, 6, 7]]);
+/// ```
+#[macro_export]
+macro_rules! transmute {
+ ($e:expr) => {{
+ // NOTE: This must be a macro (rather than a function with trait bounds)
+ // because there's no way, in a generic context, to enforce that two
+ // types have the same size. `core::mem::transmute` uses compiler magic
+ // to enforce this so long as the types are concrete.
+
+ let e = $e;
+ if false {
+ // This branch, though never taken, ensures that the type of `e` is
+ // `AsBytes` and that the type of this macro invocation expression
+ // is `FromBytes`.
+
+ struct AssertIsAsBytes<T: $crate::AsBytes>(T);
+ let _ = AssertIsAsBytes(e);
+
+ struct AssertIsFromBytes<U: $crate::FromBytes>(U);
+ #[allow(unused, unreachable_code)]
+ let u = AssertIsFromBytes(loop {});
+ u.0
+ } else {
+ // SAFETY: `core::mem::transmute` ensures that the type of `e` and
+ // the type of this macro invocation expression have the same size.
+ // We know this transmute is safe thanks to the `AsBytes` and
+ // `FromBytes` bounds enforced by the `false` branch.
+ //
+ // We use this reexport of `core::mem::transmute` because we know it
+ // will always be available for crates which are using the 2015
+ // edition of Rust. By contrast, if we were to use
+ // `std::mem::transmute`, this macro would not work for such crates
+ // in `no_std` contexts, and if we were to use
+ // `core::mem::transmute`, this macro would not work in `std`
+ // contexts in which `core` was not manually imported. This is not a
+ // problem for 2018 edition crates.
+ unsafe {
+ // Clippy: It's okay to transmute a type to itself.
+ #[allow(clippy::useless_transmute)]
+ $crate::macro_util::core_reexport::mem::transmute(e)
+ }
+ }
+ }}
+}
+
+/// Safely transmutes a mutable or immutable reference of one type to an
+/// immutable reference of another type of the same size.
+///
+/// The expression `$e` must have a concrete type, `&T` or `&mut T`, where `T:
+/// Sized + AsBytes`. The `transmute_ref!` expression must also have a concrete
+/// type, `&U` (`U` is inferred from the calling context), where `U: Sized +
+/// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`.
+///
+/// The lifetime of the input type, `&T` or `&mut T`, must be the same as or
+/// outlive the lifetime of the output type, `&U`.
+///
+/// # Examples
+///
+/// ```
+/// # use zerocopy::transmute_ref;
+/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
+///
+/// let two_dimensional: &[[u8; 4]; 2] = transmute_ref!(&one_dimensional);
+///
+/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]);
+/// ```
+///
+/// # Alignment increase error message
+///
+/// Because of limitations on macros, the error message generated when
+/// `transmute_ref!` is used to transmute from a type of lower alignment to a
+/// type of higher alignment is somewhat confusing. For example, the following
+/// code:
+///
+/// ```compile_fail
+/// const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]);
+/// ```
+///
+/// ...generates the following error:
+///
+/// ```text
+/// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
+/// --> src/lib.rs:1524:34
+/// |
+/// 5 | const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]);
+/// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+/// |
+/// = note: source type: `AlignOf<[u8; 2]>` (8 bits)
+/// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits)
+/// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info)
+/// ```
+///
+/// This is saying that `max(align_of::<T>(), align_of::<U>()) !=
+/// align_of::<T>()`, which is equivalent to `align_of::<T>() <
+/// align_of::<U>()`.
+#[macro_export]
+macro_rules! transmute_ref {
+ ($e:expr) => {{
+ // NOTE: This must be a macro (rather than a function with trait bounds)
+ // because there's no way, in a generic context, to enforce that two
+ // types have the same size or alignment.
+
+ // Ensure that the source type is a reference or a mutable reference
+ // (note that mutable references are implicitly reborrowed here).
+ let e: &_ = $e;
+
+ #[allow(unused, clippy::diverging_sub_expression)]
+ if false {
+ // This branch, though never taken, ensures that the type of `e` is
+ // `&T` where `T: 't + Sized + AsBytes`, that the type of this macro
+ // expression is `&U` where `U: 'u + Sized + FromBytes`, and that
+ // `'t` outlives `'u`.
+
+ struct AssertIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
+ let _ = AssertIsAsBytes(e);
+
+ struct AssertIsFromBytes<'a, U: ::core::marker::Sized + $crate::FromBytes>(&'a U);
+ #[allow(unused, unreachable_code)]
+ let u = AssertIsFromBytes(loop {});
+ u.0
+ } else if false {
+ // This branch, though never taken, ensures that `size_of::<T>() ==
+ // size_of::<U>()` and that that `align_of::<T>() >=
+ // align_of::<U>()`.
+
+ // `t` is inferred to have type `T` because it's assigned to `e` (of
+ // type `&T`) as `&t`.
+ let mut t = unreachable!();
+ e = &t;
+
+ // `u` is inferred to have type `U` because it's used as `&u` as the
+ // value returned from this branch.
+ let u;
+
+ $crate::assert_size_eq!(t, u);
+ $crate::assert_align_gt_eq!(t, u);
+
+ &u
+ } else {
+ // SAFETY: For source type `Src` and destination type `Dst`:
+ // - We know that `Src: AsBytes` and `Dst: FromBytes` thanks to the
+ // uses of `AssertIsAsBytes` and `AssertIsFromBytes` above.
+ // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to
+ // the use of `assert_size_eq!` above.
+ // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to
+ // the use of `assert_align_gt_eq!` above.
+ unsafe { $crate::macro_util::transmute_ref(e) }
+ }
+ }}
+}
+
+/// Safely transmutes a mutable reference of one type to an mutable reference of
+/// another type of the same size.
+///
+/// The expression `$e` must have a concrete type, `&mut T`, where `T: Sized +
+/// AsBytes`. The `transmute_mut!` expression must also have a concrete type,
+/// `&mut U` (`U` is inferred from the calling context), where `U: Sized +
+/// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`.
+///
+/// The lifetime of the input type, `&mut T`, must be the same as or outlive the
+/// lifetime of the output type, `&mut U`.
+///
+/// # Examples
+///
+/// ```
+/// # use zerocopy::transmute_mut;
+/// let mut one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
+///
+/// let two_dimensional: &mut [[u8; 4]; 2] = transmute_mut!(&mut one_dimensional);
+///
+/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]);
+///
+/// two_dimensional.reverse();
+///
+/// assert_eq!(one_dimensional, [4, 5, 6, 7, 0, 1, 2, 3]);
+/// ```
+///
+/// # Alignment increase error message
+///
+/// Because of limitations on macros, the error message generated when
+/// `transmute_mut!` is used to transmute from a type of lower alignment to a
+/// type of higher alignment is somewhat confusing. For example, the following
+/// code:
+///
+/// ```compile_fail
+/// const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]);
+/// ```
+///
+/// ...generates the following error:
+///
+/// ```text
+/// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
+/// --> src/lib.rs:1524:34
+/// |
+/// 5 | const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]);
+/// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+/// |
+/// = note: source type: `AlignOf<[u8; 2]>` (8 bits)
+/// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits)
+/// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info)
+/// ```
+///
+/// This is saying that `max(align_of::<T>(), align_of::<U>()) !=
+/// align_of::<T>()`, which is equivalent to `align_of::<T>() <
+/// align_of::<U>()`.
+#[macro_export]
+macro_rules! transmute_mut {
+ ($e:expr) => {{
+ // NOTE: This must be a macro (rather than a function with trait bounds)
+ // because there's no way, in a generic context, to enforce that two
+ // types have the same size or alignment.
+
+ // Ensure that the source type is a mutable reference.
+ let e: &mut _ = $e;
+
+ #[allow(unused, clippy::diverging_sub_expression)]
+ if false {
+ // This branch, though never taken, ensures that the type of `e` is
+ // `&mut T` where `T: 't + Sized + FromBytes + AsBytes`, that the
+ // type of this macro expression is `&mut U` where `U: 'u + Sized +
+ // FromBytes + AsBytes`.
+
+ // We use immutable references here rather than mutable so that, if
+ // this macro is used in a const context (in which, as of this
+ // writing, mutable references are banned), the error message
+ // appears to originate in the user's code rather than in the
+ // internals of this macro.
+ struct AssertSrcIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
+ struct AssertSrcIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
+ struct AssertDstIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
+ struct AssertDstIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
+
+ if true {
+ let _ = AssertSrcIsFromBytes(&*e);
+ } else {
+ let _ = AssertSrcIsAsBytes(&*e);
+ }
+
+ if true {
+ #[allow(unused, unreachable_code)]
+ let u = AssertDstIsFromBytes(loop {});
+ &mut *u.0
+ } else {
+ #[allow(unused, unreachable_code)]
+ let u = AssertDstIsAsBytes(loop {});
+ &mut *u.0
+ }
+ } else if false {
+ // This branch, though never taken, ensures that `size_of::<T>() ==
+ // size_of::<U>()` and that that `align_of::<T>() >=
+ // align_of::<U>()`.
+
+ // `t` is inferred to have type `T` because it's assigned to `e` (of
+ // type `&mut T`) as `&mut t`.
+ let mut t = unreachable!();
+ e = &mut t;
+
+ // `u` is inferred to have type `U` because it's used as `&mut u` as
+ // the value returned from this branch.
+ let u;
+
+ $crate::assert_size_eq!(t, u);
+ $crate::assert_align_gt_eq!(t, u);
+
+ &mut u
+ } else {
+ // SAFETY: For source type `Src` and destination type `Dst`:
+ // - We know that `Src: FromBytes + AsBytes` and `Dst: FromBytes +
+ // AsBytes` thanks to the uses of `AssertSrcIsFromBytes`,
+ // `AssertSrcIsAsBytes`, `AssertDstIsFromBytes`, and
+ // `AssertDstIsAsBytes` above.
+ // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to
+ // the use of `assert_size_eq!` above.
+ // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to
+ // the use of `assert_align_gt_eq!` above.
+ unsafe { $crate::macro_util::transmute_mut(e) }
+ }
+ }}
+}
+
+/// Includes a file and safely transmutes it to a value of an arbitrary type.
+///
+/// The file will be included as a byte array, `[u8; N]`, which will be
+/// transmuted to another type, `T`. `T` is inferred from the calling context,
+/// and must implement [`FromBytes`].
+///
+/// The file is located relative to the current file (similarly to how modules
+/// are found). The provided path is interpreted in a platform-specific way at
+/// compile time. So, for instance, an invocation with a Windows path containing
+/// backslashes `\` would not compile correctly on Unix.
+///
+/// `include_value!` is ignorant of byte order. For byte order-aware types, see
+/// the [`byteorder`] module.
+///
+/// # Examples
+///
+/// Assume there are two files in the same directory with the following
+/// contents:
+///
+/// File `data` (no trailing newline):
+///
+/// ```text
+/// abcd
+/// ```
+///
+/// File `main.rs`:
+///
+/// ```rust
+/// use zerocopy::include_value;
+/// # macro_rules! include_value {
+/// # ($file:expr) => { zerocopy::include_value!(concat!("../testdata/include_value/", $file)) };
+/// # }
+///
+/// fn main() {
+/// let as_u32: u32 = include_value!("data");
+/// assert_eq!(as_u32, u32::from_ne_bytes([b'a', b'b', b'c', b'd']));
+/// let as_i32: i32 = include_value!("data");
+/// assert_eq!(as_i32, i32::from_ne_bytes([b'a', b'b', b'c', b'd']));
+/// }
+/// ```
+#[doc(alias("include_bytes", "include_data", "include_type"))]
+#[macro_export]
+macro_rules! include_value {
+ ($file:expr $(,)?) => {
+ $crate::transmute!(*::core::include_bytes!($file))
+ };
+}
+
+/// A typed reference derived from a byte slice.
+///
+/// A `Ref<B, T>` is a reference to a `T` which is stored in a byte slice, `B`.
+/// Unlike a native reference (`&T` or `&mut T`), `Ref<B, T>` has the same
+/// mutability as the byte slice it was constructed from (`B`).
+///
+/// # Examples
+///
+/// `Ref` can be used to treat a sequence of bytes as a structured type, and to
+/// read and write the fields of that type as if the byte slice reference were
+/// simply a reference to that type.
+///
+/// ```rust
+/// # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them
+/// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, FromZeroes, Ref, Unaligned};
+///
+/// #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+/// #[repr(C)]
+/// struct UdpHeader {
+/// src_port: [u8; 2],
+/// dst_port: [u8; 2],
+/// length: [u8; 2],
+/// checksum: [u8; 2],
+/// }
+///
+/// struct UdpPacket<B> {
+/// header: Ref<B, UdpHeader>,
+/// body: B,
+/// }
+///
+/// impl<B: ByteSlice> UdpPacket<B> {
+/// pub fn parse(bytes: B) -> Option<UdpPacket<B>> {
+/// let (header, body) = Ref::new_unaligned_from_prefix(bytes)?;
+/// Some(UdpPacket { header, body })
+/// }
+///
+/// pub fn get_src_port(&self) -> [u8; 2] {
+/// self.header.src_port
+/// }
+/// }
+///
+/// impl<B: ByteSliceMut> UdpPacket<B> {
+/// pub fn set_src_port(&mut self, src_port: [u8; 2]) {
+/// self.header.src_port = src_port;
+/// }
+/// }
+/// # }
+/// ```
+pub struct Ref<B, T: ?Sized>(B, PhantomData<T>);
+
+/// Deprecated: prefer [`Ref`] instead.
+#[deprecated(since = "0.7.0", note = "LayoutVerified has been renamed to Ref")]
+#[doc(hidden)]
+pub type LayoutVerified<B, T> = Ref<B, T>;
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSlice,
+{
+ /// Constructs a new `Ref`.
+ ///
+ /// `new` verifies that `bytes.len() == size_of::<T>()` and that `bytes` is
+ /// aligned to `align_of::<T>()`, and constructs a new `Ref`. If either of
+ /// these checks fail, it returns `None`.
+ #[inline]
+ pub fn new(bytes: B) -> Option<Ref<B, T>> {
+ if bytes.len() != mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
+ return None;
+ }
+ Some(Ref(bytes, PhantomData))
+ }
+
+ /// Constructs a new `Ref` from the prefix of a byte slice.
+ ///
+ /// `new_from_prefix` verifies that `bytes.len() >= size_of::<T>()` and that
+ /// `bytes` is aligned to `align_of::<T>()`. It consumes the first
+ /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
+ /// the remaining bytes to the caller. If either the length or alignment
+ /// checks fail, it returns `None`.
+ #[inline]
+ pub fn new_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
+ if bytes.len() < mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
+ return None;
+ }
+ let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
+ Some((Ref(bytes, PhantomData), suffix))
+ }
+
+ /// Constructs a new `Ref` from the suffix of a byte slice.
+ ///
+ /// `new_from_suffix` verifies that `bytes.len() >= size_of::<T>()` and that
+ /// the last `size_of::<T>()` bytes of `bytes` are aligned to
+ /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
+ /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
+ /// caller. If either the length or alignment checks fail, it returns
+ /// `None`.
+ #[inline]
+ pub fn new_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
+ let bytes_len = bytes.len();
+ let split_at = bytes_len.checked_sub(mem::size_of::<T>())?;
+ let (prefix, bytes) = bytes.split_at(split_at);
+ if !util::aligned_to::<_, T>(bytes.deref()) {
+ return None;
+ }
+ Some((prefix, Ref(bytes, PhantomData)))
+ }
+}
+
+impl<B, T> Ref<B, [T]>
+where
+ B: ByteSlice,
+{
+ /// Constructs a new `Ref` of a slice type.
+ ///
+ /// `new_slice` verifies that `bytes.len()` is a multiple of
+ /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
+ /// constructs a new `Ref`. If either of these checks fail, it returns
+ /// `None`.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice` panics if `T` is a zero-sized type.
+ #[inline]
+ pub fn new_slice(bytes: B) -> Option<Ref<B, [T]>> {
+ let remainder = bytes
+ .len()
+ .checked_rem(mem::size_of::<T>())
+ .expect("Ref::new_slice called on a zero-sized type");
+ if remainder != 0 || !util::aligned_to::<_, T>(bytes.deref()) {
+ return None;
+ }
+ Some(Ref(bytes, PhantomData))
+ }
+
+ /// Constructs a new `Ref` of a slice type from the prefix of a byte slice.
+ ///
+ /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
+ /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
+ /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
+ /// and returns the remaining bytes to the caller. It also ensures that
+ /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
+ /// length, alignment, or overflow checks fail, it returns `None`.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice_from_prefix` panics if `T` is a zero-sized type.
+ #[inline]
+ pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
+ let expected_len = match mem::size_of::<T>().checked_mul(count) {
+ Some(len) => len,
+ None => return None,
+ };
+ if bytes.len() < expected_len {
+ return None;
+ }
+ let (prefix, bytes) = bytes.split_at(expected_len);
+ Self::new_slice(prefix).map(move |l| (l, bytes))
+ }
+
+ /// Constructs a new `Ref` of a slice type from the suffix of a byte slice.
+ ///
+ /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
+ /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
+ /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
+ /// and returns the preceding bytes to the caller. It also ensures that
+ /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
+ /// length, alignment, or overflow checks fail, it returns `None`.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice_from_suffix` panics if `T` is a zero-sized type.
+ #[inline]
+ pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
+ let expected_len = match mem::size_of::<T>().checked_mul(count) {
+ Some(len) => len,
+ None => return None,
+ };
+ let split_at = bytes.len().checked_sub(expected_len)?;
+ let (bytes, suffix) = bytes.split_at(split_at);
+ Self::new_slice(suffix).map(move |l| (bytes, l))
+ }
+}
+
+fn map_zeroed<B: ByteSliceMut, T: ?Sized>(opt: Option<Ref<B, T>>) -> Option<Ref<B, T>> {
+ match opt {
+ Some(mut r) => {
+ r.0.fill(0);
+ Some(r)
+ }
+ None => None,
+ }
+}
+
+fn map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
+ opt: Option<(Ref<B, T>, B)>,
+) -> Option<(Ref<B, T>, B)> {
+ match opt {
+ Some((mut r, rest)) => {
+ r.0.fill(0);
+ Some((r, rest))
+ }
+ None => None,
+ }
+}
+
+fn map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
+ opt: Option<(B, Ref<B, T>)>,
+) -> Option<(B, Ref<B, T>)> {
+ map_prefix_tuple_zeroed(opt.map(|(a, b)| (b, a))).map(|(a, b)| (b, a))
+}
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSliceMut,
+{
+ /// Constructs a new `Ref` after zeroing the bytes.
+ ///
+ /// `new_zeroed` verifies that `bytes.len() == size_of::<T>()` and that
+ /// `bytes` is aligned to `align_of::<T>()`, and constructs a new `Ref`. If
+ /// either of these checks fail, it returns `None`.
+ ///
+ /// If the checks succeed, then `bytes` will be initialized to zero. This
+ /// can be useful when re-using buffers to ensure that sensitive data
+ /// previously stored in the buffer is not leaked.
+ #[inline(always)]
+ pub fn new_zeroed(bytes: B) -> Option<Ref<B, T>> {
+ map_zeroed(Self::new(bytes))
+ }
+
+ /// Constructs a new `Ref` from the prefix of a byte slice, zeroing the
+ /// prefix.
+ ///
+ /// `new_from_prefix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
+ /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the first
+ /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
+ /// the remaining bytes to the caller. If either the length or alignment
+ /// checks fail, it returns `None`.
+ ///
+ /// If the checks succeed, then the prefix which is consumed will be
+ /// initialized to zero. This can be useful when re-using buffers to ensure
+ /// that sensitive data previously stored in the buffer is not leaked.
+ #[inline(always)]
+ pub fn new_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
+ map_prefix_tuple_zeroed(Self::new_from_prefix(bytes))
+ }
+
+ /// Constructs a new `Ref` from the suffix of a byte slice, zeroing the
+ /// suffix.
+ ///
+ /// `new_from_suffix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
+ /// and that the last `size_of::<T>()` bytes of `bytes` are aligned to
+ /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
+ /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
+ /// caller. If either the length or alignment checks fail, it returns
+ /// `None`.
+ ///
+ /// If the checks succeed, then the suffix which is consumed will be
+ /// initialized to zero. This can be useful when re-using buffers to ensure
+ /// that sensitive data previously stored in the buffer is not leaked.
+ #[inline(always)]
+ pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
+ map_suffix_tuple_zeroed(Self::new_from_suffix(bytes))
+ }
+}
+
+impl<B, T> Ref<B, [T]>
+where
+ B: ByteSliceMut,
+{
+ /// Constructs a new `Ref` of a slice type after zeroing the bytes.
+ ///
+ /// `new_slice_zeroed` verifies that `bytes.len()` is a multiple of
+ /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
+ /// constructs a new `Ref`. If either of these checks fail, it returns
+ /// `None`.
+ ///
+ /// If the checks succeed, then `bytes` will be initialized to zero. This
+ /// can be useful when re-using buffers to ensure that sensitive data
+ /// previously stored in the buffer is not leaked.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice` panics if `T` is a zero-sized type.
+ #[inline(always)]
+ pub fn new_slice_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
+ map_zeroed(Self::new_slice(bytes))
+ }
+
+ /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
+ /// after zeroing the bytes.
+ ///
+ /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
+ /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
+ /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
+ /// and returns the remaining bytes to the caller. It also ensures that
+ /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
+ /// length, alignment, or overflow checks fail, it returns `None`.
+ ///
+ /// If the checks succeed, then the suffix which is consumed will be
+ /// initialized to zero. This can be useful when re-using buffers to ensure
+ /// that sensitive data previously stored in the buffer is not leaked.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice_from_prefix_zeroed` panics if `T` is a zero-sized type.
+ #[inline(always)]
+ pub fn new_slice_from_prefix_zeroed(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
+ map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count))
+ }
+
+ /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
+ /// after zeroing the bytes.
+ ///
+ /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
+ /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
+ /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
+ /// and returns the preceding bytes to the caller. It also ensures that
+ /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
+ /// length, alignment, or overflow checks fail, it returns `None`.
+ ///
+ /// If the checks succeed, then the consumed suffix will be initialized to
+ /// zero. This can be useful when re-using buffers to ensure that sensitive
+ /// data previously stored in the buffer is not leaked.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice_from_suffix_zeroed` panics if `T` is a zero-sized type.
+ #[inline(always)]
+ pub fn new_slice_from_suffix_zeroed(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
+ map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count))
+ }
+}
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSlice,
+ T: Unaligned,
+{
+ /// Constructs a new `Ref` for a type with no alignment requirement.
+ ///
+ /// `new_unaligned` verifies that `bytes.len() == size_of::<T>()` and
+ /// constructs a new `Ref`. If the check fails, it returns `None`.
+ #[inline(always)]
+ pub fn new_unaligned(bytes: B) -> Option<Ref<B, T>> {
+ Ref::new(bytes)
+ }
+
+ /// Constructs a new `Ref` from the prefix of a byte slice for a type with
+ /// no alignment requirement.
+ ///
+ /// `new_unaligned_from_prefix` verifies that `bytes.len() >=
+ /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
+ /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
+ /// caller. If the length check fails, it returns `None`.
+ #[inline(always)]
+ pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
+ Ref::new_from_prefix(bytes)
+ }
+
+ /// Constructs a new `Ref` from the suffix of a byte slice for a type with
+ /// no alignment requirement.
+ ///
+ /// `new_unaligned_from_suffix` verifies that `bytes.len() >=
+ /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
+ /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
+ /// caller. If the length check fails, it returns `None`.
+ #[inline(always)]
+ pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
+ Ref::new_from_suffix(bytes)
+ }
+}
+
+impl<B, T> Ref<B, [T]>
+where
+ B: ByteSlice,
+ T: Unaligned,
+{
+ /// Constructs a new `Ref` of a slice type with no alignment requirement.
+ ///
+ /// `new_slice_unaligned` verifies that `bytes.len()` is a multiple of
+ /// `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
+ /// returns `None`.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice` panics if `T` is a zero-sized type.
+ #[inline(always)]
+ pub fn new_slice_unaligned(bytes: B) -> Option<Ref<B, [T]>> {
+ Ref::new_slice(bytes)
+ }
+
+ /// Constructs a new `Ref` of a slice type with no alignment requirement
+ /// from the prefix of a byte slice.
+ ///
+ /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
+ /// count`. It consumes the first `size_of::<T>() * count` bytes from
+ /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
+ /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
+ /// `usize`. If either the length, or overflow checks fail, it returns
+ /// `None`.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice_unaligned_from_prefix` panics if `T` is a zero-sized type.
+ #[inline(always)]
+ pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
+ Ref::new_slice_from_prefix(bytes, count)
+ }
+
+ /// Constructs a new `Ref` of a slice type with no alignment requirement
+ /// from the suffix of a byte slice.
+ ///
+ /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
+ /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
+ /// to construct a `Ref`, and returns the remaining bytes to the caller. It
+ /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
+ /// If either the length, or overflow checks fail, it returns `None`.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice_unaligned_from_suffix` panics if `T` is a zero-sized type.
+ #[inline(always)]
+ pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
+ Ref::new_slice_from_suffix(bytes, count)
+ }
+}
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSliceMut,
+ T: Unaligned,
+{
+ /// Constructs a new `Ref` for a type with no alignment requirement, zeroing
+ /// the bytes.
+ ///
+ /// `new_unaligned_zeroed` verifies that `bytes.len() == size_of::<T>()` and
+ /// constructs a new `Ref`. If the check fails, it returns `None`.
+ ///
+ /// If the check succeeds, then `bytes` will be initialized to zero. This
+ /// can be useful when re-using buffers to ensure that sensitive data
+ /// previously stored in the buffer is not leaked.
+ #[inline(always)]
+ pub fn new_unaligned_zeroed(bytes: B) -> Option<Ref<B, T>> {
+ map_zeroed(Self::new_unaligned(bytes))
+ }
+
+ /// Constructs a new `Ref` from the prefix of a byte slice for a type with
+ /// no alignment requirement, zeroing the prefix.
+ ///
+ /// `new_unaligned_from_prefix_zeroed` verifies that `bytes.len() >=
+ /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
+ /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
+ /// caller. If the length check fails, it returns `None`.
+ ///
+ /// If the check succeeds, then the prefix which is consumed will be
+ /// initialized to zero. This can be useful when re-using buffers to ensure
+ /// that sensitive data previously stored in the buffer is not leaked.
+ #[inline(always)]
+ pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
+ map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes))
+ }
+
+ /// Constructs a new `Ref` from the suffix of a byte slice for a type with
+ /// no alignment requirement, zeroing the suffix.
+ ///
+ /// `new_unaligned_from_suffix_zeroed` verifies that `bytes.len() >=
+ /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
+ /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
+ /// caller. If the length check fails, it returns `None`.
+ ///
+ /// If the check succeeds, then the suffix which is consumed will be
+ /// initialized to zero. This can be useful when re-using buffers to ensure
+ /// that sensitive data previously stored in the buffer is not leaked.
+ #[inline(always)]
+ pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
+ map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes))
+ }
+}
+
+impl<B, T> Ref<B, [T]>
+where
+ B: ByteSliceMut,
+ T: Unaligned,
+{
+ /// Constructs a new `Ref` for a slice type with no alignment requirement,
+ /// zeroing the bytes.
+ ///
+ /// `new_slice_unaligned_zeroed` verifies that `bytes.len()` is a multiple
+ /// of `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
+ /// returns `None`.
+ ///
+ /// If the check succeeds, then `bytes` will be initialized to zero. This
+ /// can be useful when re-using buffers to ensure that sensitive data
+ /// previously stored in the buffer is not leaked.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice` panics if `T` is a zero-sized type.
+ #[inline(always)]
+ pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
+ map_zeroed(Self::new_slice_unaligned(bytes))
+ }
+
+ /// Constructs a new `Ref` of a slice type with no alignment requirement
+ /// from the prefix of a byte slice, after zeroing the bytes.
+ ///
+ /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
+ /// count`. It consumes the first `size_of::<T>() * count` bytes from
+ /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
+ /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
+ /// `usize`. If either the length, or overflow checks fail, it returns
+ /// `None`.
+ ///
+ /// If the checks succeed, then the prefix will be initialized to zero. This
+ /// can be useful when re-using buffers to ensure that sensitive data
+ /// previously stored in the buffer is not leaked.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice_unaligned_from_prefix_zeroed` panics if `T` is a zero-sized
+ /// type.
+ #[inline(always)]
+ pub fn new_slice_unaligned_from_prefix_zeroed(
+ bytes: B,
+ count: usize,
+ ) -> Option<(Ref<B, [T]>, B)> {
+ map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count))
+ }
+
+ /// Constructs a new `Ref` of a slice type with no alignment requirement
+ /// from the suffix of a byte slice, after zeroing the bytes.
+ ///
+ /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
+ /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
+ /// to construct a `Ref`, and returns the remaining bytes to the caller. It
+ /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
+ /// If either the length, or overflow checks fail, it returns `None`.
+ ///
+ /// If the checks succeed, then the suffix will be initialized to zero. This
+ /// can be useful when re-using buffers to ensure that sensitive data
+ /// previously stored in the buffer is not leaked.
+ ///
+ /// # Panics
+ ///
+ /// `new_slice_unaligned_from_suffix_zeroed` panics if `T` is a zero-sized
+ /// type.
+ #[inline(always)]
+ pub fn new_slice_unaligned_from_suffix_zeroed(
+ bytes: B,
+ count: usize,
+ ) -> Option<(B, Ref<B, [T]>)> {
+ map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count))
+ }
+}
+
+impl<'a, B, T> Ref<B, T>
+where
+ B: 'a + ByteSlice,
+ T: FromBytes,
+{
+ /// Converts this `Ref` into a reference.
+ ///
+ /// `into_ref` consumes the `Ref`, and returns a reference to `T`.
+ #[inline(always)]
+ pub fn into_ref(self) -> &'a T {
+ assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
+
+ // SAFETY: According to the safety preconditions on
+ // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
+ // ensures that, given `B: 'a`, it is sound to drop `self` and still
+ // access the underlying memory using reads for `'a`.
+ unsafe { self.deref_helper() }
+ }
+}
+
+impl<'a, B, T> Ref<B, T>
+where
+ B: 'a + ByteSliceMut,
+ T: FromBytes + AsBytes,
+{
+ /// Converts this `Ref` into a mutable reference.
+ ///
+ /// `into_mut` consumes the `Ref`, and returns a mutable reference to `T`.
+ #[inline(always)]
+ pub fn into_mut(mut self) -> &'a mut T {
+ assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
+
+ // SAFETY: According to the safety preconditions on
+ // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
+ // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop
+ // `self` and still access the underlying memory using both reads and
+ // writes for `'a`.
+ unsafe { self.deref_mut_helper() }
+ }
+}
+
+impl<'a, B, T> Ref<B, [T]>
+where
+ B: 'a + ByteSlice,
+ T: FromBytes,
+{
+ /// Converts this `Ref` into a slice reference.
+ ///
+ /// `into_slice` consumes the `Ref`, and returns a reference to `[T]`.
+ #[inline(always)]
+ pub fn into_slice(self) -> &'a [T] {
+ assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
+
+ // SAFETY: According to the safety preconditions on
+ // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
+ // ensures that, given `B: 'a`, it is sound to drop `self` and still
+ // access the underlying memory using reads for `'a`.
+ unsafe { self.deref_slice_helper() }
+ }
+}
+
+impl<'a, B, T> Ref<B, [T]>
+where
+ B: 'a + ByteSliceMut,
+ T: FromBytes + AsBytes,
+{
+ /// Converts this `Ref` into a mutable slice reference.
+ ///
+ /// `into_mut_slice` consumes the `Ref`, and returns a mutable reference to
+ /// `[T]`.
+ #[inline(always)]
+ pub fn into_mut_slice(mut self) -> &'a mut [T] {
+ assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
+
+ // SAFETY: According to the safety preconditions on
+ // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
+ // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop
+ // `self` and still access the underlying memory using both reads and
+ // writes for `'a`.
+ unsafe { self.deref_mut_slice_helper() }
+ }
+}
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSlice,
+ T: FromBytes,
+{
+ /// Creates an immutable reference to `T` with a specific lifetime.
+ ///
+ /// # Safety
+ ///
+ /// The type bounds on this method guarantee that it is safe to create an
+ /// immutable reference to `T` from `self`. However, since the lifetime `'a`
+ /// is not required to be shorter than the lifetime of the reference to
+ /// `self`, the caller must guarantee that the lifetime `'a` is valid for
+ /// this reference. In particular, the referent must exist for all of `'a`,
+ /// and no mutable references to the same memory may be constructed during
+ /// `'a`.
+ unsafe fn deref_helper<'a>(&self) -> &'a T {
+ // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ &*self.0.as_ptr().cast::<T>()
+ }
+ }
+}
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSliceMut,
+ T: FromBytes + AsBytes,
+{
+ /// Creates a mutable reference to `T` with a specific lifetime.
+ ///
+ /// # Safety
+ ///
+ /// The type bounds on this method guarantee that it is safe to create a
+ /// mutable reference to `T` from `self`. However, since the lifetime `'a`
+ /// is not required to be shorter than the lifetime of the reference to
+ /// `self`, the caller must guarantee that the lifetime `'a` is valid for
+ /// this reference. In particular, the referent must exist for all of `'a`,
+ /// and no other references - mutable or immutable - to the same memory may
+ /// be constructed during `'a`.
+ unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T {
+ // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ &mut *self.0.as_mut_ptr().cast::<T>()
+ }
+ }
+}
+
+impl<B, T> Ref<B, [T]>
+where
+ B: ByteSlice,
+ T: FromBytes,
+{
+ /// Creates an immutable reference to `[T]` with a specific lifetime.
+ ///
+ /// # Safety
+ ///
+ /// `deref_slice_helper` has the same safety requirements as `deref_helper`.
+ unsafe fn deref_slice_helper<'a>(&self) -> &'a [T] {
+ let len = self.0.len();
+ let elem_size = mem::size_of::<T>();
+ debug_assert_ne!(elem_size, 0);
+ // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
+ // Thus, neither the mod nor division operations here can panic.
+ #[allow(clippy::arithmetic_side_effects)]
+ let elems = {
+ debug_assert_eq!(len % elem_size, 0);
+ len / elem_size
+ };
+ // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ slice::from_raw_parts(self.0.as_ptr().cast::<T>(), elems)
+ }
+ }
+}
+
+impl<B, T> Ref<B, [T]>
+where
+ B: ByteSliceMut,
+ T: FromBytes + AsBytes,
+{
+ /// Creates a mutable reference to `[T]` with a specific lifetime.
+ ///
+ /// # Safety
+ ///
+ /// `deref_mut_slice_helper` has the same safety requirements as
+ /// `deref_mut_helper`.
+ unsafe fn deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T] {
+ let len = self.0.len();
+ let elem_size = mem::size_of::<T>();
+ debug_assert_ne!(elem_size, 0);
+ // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
+ // Thus, neither the mod nor division operations here can panic.
+ #[allow(clippy::arithmetic_side_effects)]
+ let elems = {
+ debug_assert_eq!(len % elem_size, 0);
+ len / elem_size
+ };
+ // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ slice::from_raw_parts_mut(self.0.as_mut_ptr().cast::<T>(), elems)
+ }
+ }
+}
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSlice,
+ T: ?Sized,
+{
+ /// Gets the underlying bytes.
+ #[inline]
+ pub fn bytes(&self) -> &[u8] {
+ &self.0
+ }
+}
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSliceMut,
+ T: ?Sized,
+{
+ /// Gets the underlying bytes mutably.
+ #[inline]
+ pub fn bytes_mut(&mut self) -> &mut [u8] {
+ &mut self.0
+ }
+}
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSlice,
+ T: FromBytes,
+{
+ /// Reads a copy of `T`.
+ #[inline]
+ pub fn read(&self) -> T {
+ // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
+ // at least `size_of::<T>()` bytes long, and that it is at least as
+ // aligned as `align_of::<T>()`. Because `T: FromBytes`, it is sound to
+ // interpret these bytes as a `T`.
+ unsafe { ptr::read(self.0.as_ptr().cast::<T>()) }
+ }
+}
+
+impl<B, T> Ref<B, T>
+where
+ B: ByteSliceMut,
+ T: AsBytes,
+{
+ /// Writes the bytes of `t` and then forgets `t`.
+ #[inline]
+ pub fn write(&mut self, t: T) {
+ // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
+ // at least `size_of::<T>()` bytes long, and that it is at least as
+ // aligned as `align_of::<T>()`. Writing `t` to the buffer will allow
+ // all of the bytes of `t` to be accessed as a `[u8]`, but because `T:
+ // AsBytes`, we know this is sound.
+ unsafe { ptr::write(self.0.as_mut_ptr().cast::<T>(), t) }
+ }
+}
+
+impl<B, T> Deref for Ref<B, T>
+where
+ B: ByteSlice,
+ T: FromBytes,
+{
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: This is sound because the lifetime of `self` is the same as
+ // the lifetime of the return value, meaning that a) the returned
+ // reference cannot outlive `self` and, b) no mutable methods on `self`
+ // can be called during the lifetime of the returned reference. See the
+ // documentation on `deref_helper` for what invariants we are required
+ // to uphold.
+ unsafe { self.deref_helper() }
+ }
+}
+
+impl<B, T> DerefMut for Ref<B, T>
+where
+ B: ByteSliceMut,
+ T: FromBytes + AsBytes,
+{
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: This is sound because the lifetime of `self` is the same as
+ // the lifetime of the return value, meaning that a) the returned
+ // reference cannot outlive `self` and, b) no other methods on `self`
+ // can be called during the lifetime of the returned reference. See the
+ // documentation on `deref_mut_helper` for what invariants we are
+ // required to uphold.
+ unsafe { self.deref_mut_helper() }
+ }
+}
+
+impl<B, T> Deref for Ref<B, [T]>
+where
+ B: ByteSlice,
+ T: FromBytes,
+{
+ type Target = [T];
+ #[inline]
+ fn deref(&self) -> &[T] {
+ // SAFETY: This is sound because the lifetime of `self` is the same as
+ // the lifetime of the return value, meaning that a) the returned
+ // reference cannot outlive `self` and, b) no mutable methods on `self`
+ // can be called during the lifetime of the returned reference. See the
+ // documentation on `deref_slice_helper` for what invariants we are
+ // required to uphold.
+ unsafe { self.deref_slice_helper() }
+ }
+}
+
+impl<B, T> DerefMut for Ref<B, [T]>
+where
+ B: ByteSliceMut,
+ T: FromBytes + AsBytes,
+{
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [T] {
+ // SAFETY: This is sound because the lifetime of `self` is the same as
+ // the lifetime of the return value, meaning that a) the returned
+ // reference cannot outlive `self` and, b) no other methods on `self`
+ // can be called during the lifetime of the returned reference. See the
+ // documentation on `deref_mut_slice_helper` for what invariants we are
+ // required to uphold.
+ unsafe { self.deref_mut_slice_helper() }
+ }
+}
+
+impl<T, B> Display for Ref<B, T>
+where
+ B: ByteSlice,
+ T: FromBytes + Display,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ let inner: &T = self;
+ inner.fmt(fmt)
+ }
+}
+
+impl<T, B> Display for Ref<B, [T]>
+where
+ B: ByteSlice,
+ T: FromBytes,
+ [T]: Display,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ let inner: &[T] = self;
+ inner.fmt(fmt)
+ }
+}
+
+impl<T, B> Debug for Ref<B, T>
+where
+ B: ByteSlice,
+ T: FromBytes + Debug,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ let inner: &T = self;
+ fmt.debug_tuple("Ref").field(&inner).finish()
+ }
+}
+
+impl<T, B> Debug for Ref<B, [T]>
+where
+ B: ByteSlice,
+ T: FromBytes + Debug,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ let inner: &[T] = self;
+ fmt.debug_tuple("Ref").field(&inner).finish()
+ }
+}
+
+impl<T, B> Eq for Ref<B, T>
+where
+ B: ByteSlice,
+ T: FromBytes + Eq,
+{
+}
+
+impl<T, B> Eq for Ref<B, [T]>
+where
+ B: ByteSlice,
+ T: FromBytes + Eq,
+{
+}
+
+impl<T, B> PartialEq for Ref<B, T>
+where
+ B: ByteSlice,
+ T: FromBytes + PartialEq,
+{
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.deref().eq(other.deref())
+ }
+}
+
+impl<T, B> PartialEq for Ref<B, [T]>
+where
+ B: ByteSlice,
+ T: FromBytes + PartialEq,
+{
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.deref().eq(other.deref())
+ }
+}
+
+impl<T, B> Ord for Ref<B, T>
+where
+ B: ByteSlice,
+ T: FromBytes + Ord,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ let inner: &T = self;
+ let other_inner: &T = other;
+ inner.cmp(other_inner)
+ }
+}
+
+impl<T, B> Ord for Ref<B, [T]>
+where
+ B: ByteSlice,
+ T: FromBytes + Ord,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ let inner: &[T] = self;
+ let other_inner: &[T] = other;
+ inner.cmp(other_inner)
+ }
+}
+
+impl<T, B> PartialOrd for Ref<B, T>
+where
+ B: ByteSlice,
+ T: FromBytes + PartialOrd,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ let inner: &T = self;
+ let other_inner: &T = other;
+ inner.partial_cmp(other_inner)
+ }
+}
+
+impl<T, B> PartialOrd for Ref<B, [T]>
+where
+ B: ByteSlice,
+ T: FromBytes + PartialOrd,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ let inner: &[T] = self;
+ let other_inner: &[T] = other;
+ inner.partial_cmp(other_inner)
+ }
+}
+
+mod sealed {
+ pub trait ByteSliceSealed {}
+}
+
+// ByteSlice and ByteSliceMut abstract over [u8] references (&[u8], &mut [u8],
+// Ref<[u8]>, RefMut<[u8]>, etc). We rely on various behaviors of these
+// references such as that a given reference will never changes its length
+// between calls to deref() or deref_mut(), and that split_at() works as
+// expected. If ByteSlice or ByteSliceMut were not sealed, consumers could
+// implement them in a way that violated these behaviors, and would break our
+// unsafe code. Thus, we seal them and implement it only for known-good
+// reference types. For the same reason, they're unsafe traits.
+
+#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
+/// A mutable or immutable reference to a byte slice.
+///
+/// `ByteSlice` abstracts over the mutability of a byte slice reference, and is
+/// implemented for various special reference types such as `Ref<[u8]>` and
+/// `RefMut<[u8]>`.
+///
+/// Note that, while it would be technically possible, `ByteSlice` is not
+/// implemented for [`Vec<u8>`], as the only way to implement the [`split_at`]
+/// method would involve reallocation, and `split_at` must be a very cheap
+/// operation in order for the utilities in this crate to perform as designed.
+///
+/// [`split_at`]: crate::ByteSlice::split_at
+// It may seem overkill to go to this length to ensure that this doc link never
+// breaks. We do this because it simplifies CI - it means that generating docs
+// always succeeds, so we don't need special logic to only generate docs under
+// certain features.
+#[cfg_attr(feature = "alloc", doc = "[`Vec<u8>`]: alloc::vec::Vec")]
+#[cfg_attr(
+ not(feature = "alloc"),
+ doc = "[`Vec<u8>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html"
+)]
+pub unsafe trait ByteSlice:
+ Deref<Target = [u8]> + Sized + self::sealed::ByteSliceSealed
+{
+ /// Are the [`Ref::into_ref`] and [`Ref::into_mut`] methods sound when used
+ /// with `Self`? If not, evaluating this constant must panic at compile
+ /// time.
+ ///
+ /// This exists to work around #716 on versions of zerocopy prior to 0.8.
+ ///
+ /// # Safety
+ ///
+ /// This may only be set to true if the following holds: Given the
+ /// following:
+ /// - `Self: 'a`
+ /// - `bytes: Self`
+ /// - `let ptr = bytes.as_ptr()`
+ ///
+ /// ...then:
+ /// - Using `ptr` to read the memory previously addressed by `bytes` is
+ /// sound for `'a` even after `bytes` has been dropped.
+ /// - If `Self: ByteSliceMut`, using `ptr` to write the memory previously
+ /// addressed by `bytes` is sound for `'a` even after `bytes` has been
+ /// dropped.
+ #[doc(hidden)]
+ const INTO_REF_INTO_MUT_ARE_SOUND: bool;
+
+ /// Gets a raw pointer to the first byte in the slice.
+ #[inline]
+ fn as_ptr(&self) -> *const u8 {
+ <[u8]>::as_ptr(self)
+ }
+
+ /// Splits the slice at the midpoint.
+ ///
+ /// `x.split_at(mid)` returns `x[..mid]` and `x[mid..]`.
+ ///
+ /// # Panics
+ ///
+ /// `x.split_at(mid)` panics if `mid > x.len()`.
+ fn split_at(self, mid: usize) -> (Self, Self);
+}
+
+#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
+/// A mutable reference to a byte slice.
+///
+/// `ByteSliceMut` abstracts over various ways of storing a mutable reference to
+/// a byte slice, and is implemented for various special reference types such as
+/// `RefMut<[u8]>`.
+pub unsafe trait ByteSliceMut: ByteSlice + DerefMut {
+ /// Gets a mutable raw pointer to the first byte in the slice.
+ #[inline]
+ fn as_mut_ptr(&mut self) -> *mut u8 {
+ <[u8]>::as_mut_ptr(self)
+ }
+}
+
+impl<'a> sealed::ByteSliceSealed for &'a [u8] {}
+// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
+unsafe impl<'a> ByteSlice for &'a [u8] {
+ // SAFETY: If `&'b [u8]: 'a`, then the underlying memory is treated as
+ // borrowed immutably for `'a` even if the slice itself is dropped.
+ const INTO_REF_INTO_MUT_ARE_SOUND: bool = true;
+
+ #[inline]
+ fn split_at(self, mid: usize) -> (Self, Self) {
+ <[u8]>::split_at(self, mid)
+ }
+}
+
+impl<'a> sealed::ByteSliceSealed for &'a mut [u8] {}
+// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
+unsafe impl<'a> ByteSlice for &'a mut [u8] {
+ // SAFETY: If `&'b mut [u8]: 'a`, then the underlying memory is treated as
+ // borrowed mutably for `'a` even if the slice itself is dropped.
+ const INTO_REF_INTO_MUT_ARE_SOUND: bool = true;
+
+ #[inline]
+ fn split_at(self, mid: usize) -> (Self, Self) {
+ <[u8]>::split_at_mut(self, mid)
+ }
+}
+
+impl<'a> sealed::ByteSliceSealed for cell::Ref<'a, [u8]> {}
+// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
+unsafe impl<'a> ByteSlice for cell::Ref<'a, [u8]> {
+ const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) {
+ panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::Ref; see https://github.com/google/zerocopy/issues/716")
+ } else {
+ // When compiling documentation, allow the evaluation of this constant
+ // to succeed. This doesn't represent a soundness hole - it just delays
+ // any error to runtime. The reason we need this is that, otherwise,
+ // `rustdoc` will fail when trying to document this item.
+ false
+ };
+
+ #[inline]
+ fn split_at(self, mid: usize) -> (Self, Self) {
+ cell::Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid))
+ }
+}
+
+impl<'a> sealed::ByteSliceSealed for RefMut<'a, [u8]> {}
+// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
+unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> {
+ const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) {
+ panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::RefMut; see https://github.com/google/zerocopy/issues/716")
+ } else {
+ // When compiling documentation, allow the evaluation of this constant
+ // to succeed. This doesn't represent a soundness hole - it just delays
+ // any error to runtime. The reason we need this is that, otherwise,
+ // `rustdoc` will fail when trying to document this item.
+ false
+ };
+
+ #[inline]
+ fn split_at(self, mid: usize) -> (Self, Self) {
+ RefMut::map_split(self, |slice| <[u8]>::split_at_mut(slice, mid))
+ }
+}
+
+// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
+unsafe impl<'a> ByteSliceMut for &'a mut [u8] {}
+
+// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
+unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {}
+
+#[cfg(feature = "alloc")]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
+mod alloc_support {
+ use alloc::vec::Vec;
+
+ use super::*;
+
+ /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
+ /// vector. The new items are initialized with zeroes.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `Vec::reserve(additional)` fails to reserve enough memory.
+ #[inline(always)]
+ pub fn extend_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, additional: usize) {
+ insert_vec_zeroed(v, v.len(), additional);
+ }
+
+ /// Inserts `additional` new items into `Vec<T>` at `position`.
+ /// The new items are initialized with zeroes.
+ ///
+ /// # Panics
+ ///
+ /// * Panics if `position > v.len()`.
+ /// * Panics if `Vec::reserve(additional)` fails to reserve enough memory.
+ #[inline]
+ pub fn insert_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, position: usize, additional: usize) {
+ assert!(position <= v.len());
+ v.reserve(additional);
+ // SAFETY: The `reserve` call guarantees that these cannot overflow:
+ // * `ptr.add(position)`
+ // * `position + additional`
+ // * `v.len() + additional`
+ //
+ // `v.len() - position` cannot overflow because we asserted that
+ // `position <= v.len()`.
+ unsafe {
+ // This is a potentially overlapping copy.
+ let ptr = v.as_mut_ptr();
+ #[allow(clippy::arithmetic_side_effects)]
+ ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
+ ptr.add(position).write_bytes(0, additional);
+ #[allow(clippy::arithmetic_side_effects)]
+ v.set_len(v.len() + additional);
+ }
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use core::convert::TryFrom as _;
+
+ use super::*;
+
+ #[test]
+ fn test_extend_vec_zeroed() {
+ // Test extending when there is an existing allocation.
+ let mut v = vec![100u64, 200, 300];
+ extend_vec_zeroed(&mut v, 3);
+ assert_eq!(v.len(), 6);
+ assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
+ drop(v);
+
+ // Test extending when there is no existing allocation.
+ let mut v: Vec<u64> = Vec::new();
+ extend_vec_zeroed(&mut v, 3);
+ assert_eq!(v.len(), 3);
+ assert_eq!(&*v, &[0, 0, 0]);
+ drop(v);
+ }
+
+ #[test]
+ fn test_extend_vec_zeroed_zst() {
+ // Test extending when there is an existing (fake) allocation.
+ let mut v = vec![(), (), ()];
+ extend_vec_zeroed(&mut v, 3);
+ assert_eq!(v.len(), 6);
+ assert_eq!(&*v, &[(), (), (), (), (), ()]);
+ drop(v);
+
+ // Test extending when there is no existing (fake) allocation.
+ let mut v: Vec<()> = Vec::new();
+ extend_vec_zeroed(&mut v, 3);
+ assert_eq!(&*v, &[(), (), ()]);
+ drop(v);
+ }
+
+ #[test]
+ fn test_insert_vec_zeroed() {
+ // Insert at start (no existing allocation).
+ let mut v: Vec<u64> = Vec::new();
+ insert_vec_zeroed(&mut v, 0, 2);
+ assert_eq!(v.len(), 2);
+ assert_eq!(&*v, &[0, 0]);
+ drop(v);
+
+ // Insert at start.
+ let mut v = vec![100u64, 200, 300];
+ insert_vec_zeroed(&mut v, 0, 2);
+ assert_eq!(v.len(), 5);
+ assert_eq!(&*v, &[0, 0, 100, 200, 300]);
+ drop(v);
+
+ // Insert at middle.
+ let mut v = vec![100u64, 200, 300];
+ insert_vec_zeroed(&mut v, 1, 1);
+ assert_eq!(v.len(), 4);
+ assert_eq!(&*v, &[100, 0, 200, 300]);
+ drop(v);
+
+ // Insert at end.
+ let mut v = vec![100u64, 200, 300];
+ insert_vec_zeroed(&mut v, 3, 1);
+ assert_eq!(v.len(), 4);
+ assert_eq!(&*v, &[100, 200, 300, 0]);
+ drop(v);
+ }
+
+ #[test]
+ fn test_insert_vec_zeroed_zst() {
+ // Insert at start (no existing fake allocation).
+ let mut v: Vec<()> = Vec::new();
+ insert_vec_zeroed(&mut v, 0, 2);
+ assert_eq!(v.len(), 2);
+ assert_eq!(&*v, &[(), ()]);
+ drop(v);
+
+ // Insert at start.
+ let mut v = vec![(), (), ()];
+ insert_vec_zeroed(&mut v, 0, 2);
+ assert_eq!(v.len(), 5);
+ assert_eq!(&*v, &[(), (), (), (), ()]);
+ drop(v);
+
+ // Insert at middle.
+ let mut v = vec![(), (), ()];
+ insert_vec_zeroed(&mut v, 1, 1);
+ assert_eq!(v.len(), 4);
+ assert_eq!(&*v, &[(), (), (), ()]);
+ drop(v);
+
+ // Insert at end.
+ let mut v = vec![(), (), ()];
+ insert_vec_zeroed(&mut v, 3, 1);
+ assert_eq!(v.len(), 4);
+ assert_eq!(&*v, &[(), (), (), ()]);
+ drop(v);
+ }
+
+ #[test]
+ fn test_new_box_zeroed() {
+ assert_eq!(*u64::new_box_zeroed(), 0);
+ }
+
+ #[test]
+ fn test_new_box_zeroed_array() {
+ drop(<[u32; 0x1000]>::new_box_zeroed());
+ }
+
+ #[test]
+ fn test_new_box_zeroed_zst() {
+ // This test exists in order to exercise unsafe code, especially
+ // when running under Miri.
+ #[allow(clippy::unit_cmp)]
+ {
+ assert_eq!(*<()>::new_box_zeroed(), ());
+ }
+ }
+
+ #[test]
+ fn test_new_box_slice_zeroed() {
+ let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3);
+ assert_eq!(s.len(), 3);
+ assert_eq!(&*s, &[0, 0, 0]);
+ s[1] = 3;
+ assert_eq!(&*s, &[0, 3, 0]);
+ }
+
+ #[test]
+ fn test_new_box_slice_zeroed_empty() {
+ let s: Box<[u64]> = u64::new_box_slice_zeroed(0);
+ assert_eq!(s.len(), 0);
+ }
+
+ #[test]
+ fn test_new_box_slice_zeroed_zst() {
+ let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3);
+ assert_eq!(s.len(), 3);
+ assert!(s.get(10).is_none());
+ // This test exists in order to exercise unsafe code, especially
+ // when running under Miri.
+ #[allow(clippy::unit_cmp)]
+ {
+ assert_eq!(s[1], ());
+ }
+ s[2] = ();
+ }
+
+ #[test]
+ fn test_new_box_slice_zeroed_zst_empty() {
+ let s: Box<[()]> = <()>::new_box_slice_zeroed(0);
+ assert_eq!(s.len(), 0);
+ }
+
+ #[test]
+ #[should_panic(expected = "mem::size_of::<Self>() * len overflows `usize`")]
+ fn test_new_box_slice_zeroed_panics_mul_overflow() {
+ let _ = u16::new_box_slice_zeroed(usize::MAX);
+ }
+
+ #[test]
+ #[should_panic(expected = "assertion failed: size <= max_alloc")]
+ fn test_new_box_slice_zeroed_panics_isize_overflow() {
+ let max = usize::try_from(isize::MAX).unwrap();
+ let _ = u16::new_box_slice_zeroed((max / mem::size_of::<u16>()) + 1);
+ }
+ }
+}
+
+#[cfg(feature = "alloc")]
+#[doc(inline)]
+pub use alloc_support::*;
+
+#[cfg(test)]
+mod tests {
+ #![allow(clippy::unreadable_literal)]
+
+ use core::{cell::UnsafeCell, convert::TryInto as _, ops::Deref};
+
+ use static_assertions::assert_impl_all;
+
+ use super::*;
+ use crate::util::testutil::*;
+
+ // An unsized type.
+ //
+ // This is used to test the custom derives of our traits. The `[u8]` type
+ // gets a hand-rolled impl, so it doesn't exercise our custom derives.
+ #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes, Unaligned)]
+ #[repr(transparent)]
+ struct Unsized([u8]);
+
+ impl Unsized {
+ fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
+ // SAFETY: This *probably* sound - since the layouts of `[u8]` and
+ // `Unsized` are the same, so are the layouts of `&mut [u8]` and
+ // `&mut Unsized`. [1] Even if it turns out that this isn't actually
+ // guaranteed by the language spec, we can just change this since
+ // it's in test code.
+ //
+ // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
+ unsafe { mem::transmute(slc) }
+ }
+ }
+
+ /// Tests of when a sized `DstLayout` is extended with a sized field.
+ #[allow(clippy::decimal_literal_representation)]
+ #[test]
+ fn test_dst_layout_extend_sized_with_sized() {
+ // This macro constructs a layout corresponding to a `u8` and extends it
+ // with a zero-sized trailing field of given alignment `n`. The macro
+ // tests that the resulting layout has both size and alignment `min(n,
+ // P)` for all valid values of `repr(packed(P))`.
+ macro_rules! test_align_is_size {
+ ($n:expr) => {
+ let base = DstLayout::for_type::<u8>();
+ let trailing_field = DstLayout::for_type::<elain::Align<$n>>();
+
+ let packs =
+ core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p))));
+
+ for pack in packs {
+ let composite = base.extend(trailing_field, pack);
+ let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN);
+ let align = $n.min(max_align.get());
+ assert_eq!(
+ composite,
+ DstLayout {
+ align: NonZeroUsize::new(align).unwrap(),
+ size_info: SizeInfo::Sized { _size: align }
+ }
+ )
+ }
+ };
+ }
+
+ test_align_is_size!(1);
+ test_align_is_size!(2);
+ test_align_is_size!(4);
+ test_align_is_size!(8);
+ test_align_is_size!(16);
+ test_align_is_size!(32);
+ test_align_is_size!(64);
+ test_align_is_size!(128);
+ test_align_is_size!(256);
+ test_align_is_size!(512);
+ test_align_is_size!(1024);
+ test_align_is_size!(2048);
+ test_align_is_size!(4096);
+ test_align_is_size!(8192);
+ test_align_is_size!(16384);
+ test_align_is_size!(32768);
+ test_align_is_size!(65536);
+ test_align_is_size!(131072);
+ test_align_is_size!(262144);
+ test_align_is_size!(524288);
+ test_align_is_size!(1048576);
+ test_align_is_size!(2097152);
+ test_align_is_size!(4194304);
+ test_align_is_size!(8388608);
+ test_align_is_size!(16777216);
+ test_align_is_size!(33554432);
+ test_align_is_size!(67108864);
+ test_align_is_size!(33554432);
+ test_align_is_size!(134217728);
+ test_align_is_size!(268435456);
+ }
+
+ /// Tests of when a sized `DstLayout` is extended with a DST field.
+ #[test]
+ fn test_dst_layout_extend_sized_with_dst() {
+ // Test that for all combinations of real-world alignments and
+ // `repr_packed` values, that the extension of a sized `DstLayout`` with
+ // a DST field correctly computes the trailing offset in the composite
+ // layout.
+
+ let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap());
+ let packs = core::iter::once(None).chain(aligns.clone().map(Some));
+
+ for align in aligns {
+ for pack in packs.clone() {
+ let base = DstLayout::for_type::<u8>();
+ let elem_size = 42;
+ let trailing_field_offset = 11;
+
+ let trailing_field = DstLayout {
+ align,
+ size_info: SizeInfo::SliceDst(TrailingSliceLayout {
+ _elem_size: elem_size,
+ _offset: 11,
+ }),
+ };
+
+ let composite = base.extend(trailing_field, pack);
+
+ let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get();
+
+ let align = align.get().min(max_align);
+
+ assert_eq!(
+ composite,
+ DstLayout {
+ align: NonZeroUsize::new(align).unwrap(),
+ size_info: SizeInfo::SliceDst(TrailingSliceLayout {
+ _elem_size: elem_size,
+ _offset: align + trailing_field_offset,
+ }),
+ }
+ )
+ }
+ }
+ }
+
+ /// Tests that calling `pad_to_align` on a sized `DstLayout` adds the
+ /// expected amount of trailing padding.
+ #[test]
+ fn test_dst_layout_pad_to_align_with_sized() {
+ // For all valid alignments `align`, construct a one-byte layout aligned
+ // to `align`, call `pad_to_align`, and assert that the size of the
+ // resulting layout is equal to `align`.
+ for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
+ let layout = DstLayout { align, size_info: SizeInfo::Sized { _size: 1 } };
+
+ assert_eq!(
+ layout.pad_to_align(),
+ DstLayout { align, size_info: SizeInfo::Sized { _size: align.get() } }
+ );
+ }
+
+ // Test explicitly-provided combinations of unpadded and padded
+ // counterparts.
+
+ macro_rules! test {
+ (unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr }
+ => padded { size: $padded_size:expr, align: $padded_align:expr }) => {
+ let unpadded = DstLayout {
+ align: NonZeroUsize::new($unpadded_align).unwrap(),
+ size_info: SizeInfo::Sized { _size: $unpadded_size },
+ };
+ let padded = unpadded.pad_to_align();
+
+ assert_eq!(
+ padded,
+ DstLayout {
+ align: NonZeroUsize::new($padded_align).unwrap(),
+ size_info: SizeInfo::Sized { _size: $padded_size },
+ }
+ );
+ };
+ }
+
+ test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 });
+ test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 });
+ test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 });
+ test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 });
+ test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 });
+ test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 });
+ test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 });
+ test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 });
+ test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 });
+
+ let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get();
+
+ test!(unpadded { size: 1, align: current_max_align }
+ => padded { size: current_max_align, align: current_max_align });
+
+ test!(unpadded { size: current_max_align + 1, align: current_max_align }
+ => padded { size: current_max_align * 2, align: current_max_align });
+ }
+
+ /// Tests that calling `pad_to_align` on a DST `DstLayout` is a no-op.
+ #[test]
+ fn test_dst_layout_pad_to_align_with_dst() {
+ for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
+ for offset in 0..10 {
+ for elem_size in 0..10 {
+ let layout = DstLayout {
+ align,
+ size_info: SizeInfo::SliceDst(TrailingSliceLayout {
+ _offset: offset,
+ _elem_size: elem_size,
+ }),
+ };
+ assert_eq!(layout.pad_to_align(), layout);
+ }
+ }
+ }
+ }
+
+ // This test takes a long time when running under Miri, so we skip it in
+ // that case. This is acceptable because this is a logic test that doesn't
+ // attempt to expose UB.
+ #[test]
+ #[cfg_attr(miri, ignore)]
+ fn testvalidate_cast_and_convert_metadata() {
+ impl From<usize> for SizeInfo {
+ fn from(_size: usize) -> SizeInfo {
+ SizeInfo::Sized { _size }
+ }
+ }
+
+ impl From<(usize, usize)> for SizeInfo {
+ fn from((_offset, _elem_size): (usize, usize)) -> SizeInfo {
+ SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
+ }
+ }
+
+ fn layout<S: Into<SizeInfo>>(s: S, align: usize) -> DstLayout {
+ DstLayout { size_info: s.into(), align: NonZeroUsize::new(align).unwrap() }
+ }
+
+ /// This macro accepts arguments in the form of:
+ ///
+ /// layout(_, _, _).validate(_, _, _), Ok(Some((_, _)))
+ /// | | | | | | | |
+ /// base_size ----+ | | | | | | |
+ /// align -----------+ | | | | | |
+ /// trailing_size ------+ | | | | |
+ /// addr ---------------------------+ | | | |
+ /// bytes_len -------------------------+ | | |
+ /// cast_type ----------------------------+ | |
+ /// elems ---------------------------------------------+ |
+ /// split_at ---------------------------------------------+
+ ///
+ /// `.validate` is shorthand for `.validate_cast_and_convert_metadata`
+ /// for brevity.
+ ///
+ /// Each argument can either be an iterator or a wildcard. Each
+ /// wildcarded variable is implicitly replaced by an iterator over a
+ /// representative sample of values for that variable. Each `test!`
+ /// invocation iterates over every combination of values provided by
+ /// each variable's iterator (ie, the cartesian product) and validates
+ /// that the results are expected.
+ ///
+ /// The final argument uses the same syntax, but it has a different
+ /// meaning:
+ /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to
+ /// `assert_matches!` to validate the computed result for each
+ /// combination of input values.
+ /// - If it is `Err(msg)`, then `test!` validates that the call to
+ /// `validate_cast_and_convert_metadata` panics with the given panic
+ /// message.
+ ///
+ /// Note that the meta-variables that match these variables have the
+ /// `tt` type, and some valid expressions are not valid `tt`s (such as
+ /// `a..b`). In this case, wrap the expression in parentheses, and it
+ /// will become valid `tt`.
+ macro_rules! test {
+ ($(:$sizes:expr =>)?
+ layout($size:tt, $align:tt)
+ .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)?
+ ) => {
+ itertools::iproduct!(
+ test!(@generate_size $size),
+ test!(@generate_align $align),
+ test!(@generate_usize $addr),
+ test!(@generate_usize $bytes_len),
+ test!(@generate_cast_type $cast_type)
+ ).for_each(|(size_info, align, addr, bytes_len, cast_type)| {
+ // Temporarily disable the panic hook installed by the test
+ // harness. If we don't do this, all panic messages will be
+ // kept in an internal log. On its own, this isn't a
+ // problem, but if a non-caught panic ever happens (ie, in
+ // code later in this test not in this macro), all of the
+ // previously-buffered messages will be dumped, hiding the
+ // real culprit.
+ let previous_hook = std::panic::take_hook();
+ // I don't understand why, but this seems to be required in
+ // addition to the previous line.
+ std::panic::set_hook(Box::new(|_| {}));
+ let actual = std::panic::catch_unwind(|| {
+ layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
+ }).map_err(|d| {
+ *d.downcast::<&'static str>().expect("expected string panic message").as_ref()
+ });
+ std::panic::set_hook(previous_hook);
+
+ assert_matches::assert_matches!(
+ actual, $expect,
+ "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?})",
+ );
+ });
+ };
+ (@generate_usize _) => { 0..8 };
+ // Generate sizes for both Sized and !Sized types.
+ (@generate_size _) => {
+ test!(@generate_size (_)).chain(test!(@generate_size (_, _)))
+ };
+ // Generate sizes for both Sized and !Sized types by chaining
+ // specified iterators for each.
+ (@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => {
+ test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes))
+ };
+ // Generate sizes for Sized types.
+ (@generate_size (_)) => { test!(@generate_size (0..8)) };
+ (@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::<SizeInfo>::into) };
+ // Generate sizes for !Sized types.
+ (@generate_size ($min_sizes:tt, $elem_sizes:tt)) => {
+ itertools::iproduct!(
+ test!(@generate_min_size $min_sizes),
+ test!(@generate_elem_size $elem_sizes)
+ ).map(Into::<SizeInfo>::into)
+ };
+ (@generate_fixed_size _) => { (0..8).into_iter().map(Into::<SizeInfo>::into) };
+ (@generate_min_size _) => { 0..8 };
+ (@generate_elem_size _) => { 1..8 };
+ (@generate_align _) => { [1, 2, 4, 8, 16] };
+ (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) };
+ (@generate_cast_type _) => { [_CastType::_Prefix, _CastType::_Suffix] };
+ (@generate_cast_type $variant:ident) => { [_CastType::$variant] };
+ // Some expressions need to be wrapped in parentheses in order to be
+ // valid `tt`s (required by the top match pattern). See the comment
+ // below for more details. This arm removes these parentheses to
+ // avoid generating an `unused_parens` warning.
+ (@$_:ident ($vals:expr)) => { $vals };
+ (@$_:ident $vals:expr) => { $vals };
+ }
+
+ const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14];
+ const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15];
+
+ // base_size is too big for the memory region.
+ test!(layout(((1..8) | ((1..8), (1..8))), _).validate(_, [0], _), Ok(None));
+ test!(layout(((2..8) | ((2..8), (2..8))), _).validate(_, [1], _), Ok(None));
+
+ // addr is unaligned for prefix cast
+ test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
+ test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
+
+ // addr is aligned, but end of buffer is unaligned for suffix cast
+ test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
+ test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
+
+ // Unfortunately, these constants cannot easily be used in the
+ // implementation of `validate_cast_and_convert_metadata`, since
+ // `panic!` consumes a string literal, not an expression.
+ //
+ // It's important that these messages be in a separate module. If they
+ // were at the function's top level, we'd pass them to `test!` as, e.g.,
+ // `Err(TRAILING)`, which would run into a subtle Rust footgun - the
+ // `TRAILING` identifier would be treated as a pattern to match rather
+ // than a value to check for equality.
+ mod msgs {
+ pub(super) const TRAILING: &str =
+ "attempted to cast to slice type with zero-sized element";
+ pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX";
+ }
+
+ // casts with ZST trailing element types are unsupported
+ test!(layout((_, [0]), _).validate(_, _, _), Err(msgs::TRAILING),);
+
+ // addr + bytes_len must not overflow usize
+ test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(msgs::OVERFLOW));
+ test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(msgs::OVERFLOW));
+ test!(
+ layout(_, _).validate(
+ [usize::MAX / 2 + 1, usize::MAX],
+ [usize::MAX / 2 + 1, usize::MAX],
+ _
+ ),
+ Err(msgs::OVERFLOW)
+ );
+
+ // Validates that `validate_cast_and_convert_metadata` satisfies its own
+ // documented safety postconditions, and also a few other properties
+ // that aren't documented but we want to guarantee anyway.
+ fn validate_behavior(
+ (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, _CastType),
+ ) {
+ if let Some((elems, split_at)) =
+ layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
+ {
+ let (size_info, align) = (layout.size_info, layout.align);
+ let debug_str = format!(
+ "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?}) => ({elems}, {split_at})",
+ );
+
+ // If this is a sized type (no trailing slice), then `elems` is
+ // meaningless, but in practice we set it to 0. Callers are not
+ // allowed to rely on this, but a lot of math is nicer if
+ // they're able to, and some callers might accidentally do that.
+ let sized = matches!(layout.size_info, SizeInfo::Sized { .. });
+ assert!(!(sized && elems != 0), "{}", debug_str);
+
+ let resulting_size = match layout.size_info {
+ SizeInfo::Sized { _size } => _size,
+ SizeInfo::SliceDst(TrailingSliceLayout {
+ _offset: offset,
+ _elem_size: elem_size,
+ }) => {
+ let padded_size = |elems| {
+ let without_padding = offset + elems * elem_size;
+ without_padding
+ + util::core_layout::padding_needed_for(without_padding, align)
+ };
+
+ let resulting_size = padded_size(elems);
+ // Test that `validate_cast_and_convert_metadata`
+ // computed the largest possible value that fits in the
+ // given range.
+ assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str);
+ resulting_size
+ }
+ };
+
+ // Test safety postconditions guaranteed by
+ // `validate_cast_and_convert_metadata`.
+ assert!(resulting_size <= bytes_len, "{}", debug_str);
+ match cast_type {
+ _CastType::_Prefix => {
+ assert_eq!(addr % align, 0, "{}", debug_str);
+ assert_eq!(resulting_size, split_at, "{}", debug_str);
+ }
+ _CastType::_Suffix => {
+ assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str);
+ assert_eq!((addr + split_at) % align, 0, "{}", debug_str);
+ }
+ }
+ } else {
+ let min_size = match layout.size_info {
+ SizeInfo::Sized { _size } => _size,
+ SizeInfo::SliceDst(TrailingSliceLayout { _offset, .. }) => {
+ _offset + util::core_layout::padding_needed_for(_offset, layout.align)
+ }
+ };
+
+ // If a cast is invalid, it is either because...
+ // 1. there are insufficent bytes at the given region for type:
+ let insufficient_bytes = bytes_len < min_size;
+ // 2. performing the cast would misalign type:
+ let base = match cast_type {
+ _CastType::_Prefix => 0,
+ _CastType::_Suffix => bytes_len,
+ };
+ let misaligned = (base + addr) % layout.align != 0;
+
+ assert!(insufficient_bytes || misaligned);
+ }
+ }
+
+ let sizes = 0..8;
+ let elem_sizes = 1..8;
+ let size_infos = sizes
+ .clone()
+ .map(Into::<SizeInfo>::into)
+ .chain(itertools::iproduct!(sizes, elem_sizes).map(Into::<SizeInfo>::into));
+ let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32])
+ .filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { _size } if _size % align != 0))
+ .map(|(size_info, align)| layout(size_info, align));
+ itertools::iproduct!(layouts, 0..8, 0..8, [_CastType::_Prefix, _CastType::_Suffix])
+ .for_each(validate_behavior);
+ }
+
+ #[test]
+ #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
+ fn test_validate_rust_layout() {
+ use core::ptr::NonNull;
+
+ // This test synthesizes pointers with various metadata and uses Rust's
+ // built-in APIs to confirm that Rust makes decisions about type layout
+ // which are consistent with what we believe is guaranteed by the
+ // language. If this test fails, it doesn't just mean our code is wrong
+ // - it means we're misunderstanding the language's guarantees.
+
+ #[derive(Debug)]
+ struct MacroArgs {
+ offset: usize,
+ align: NonZeroUsize,
+ elem_size: Option<usize>,
+ }
+
+ /// # Safety
+ ///
+ /// `test` promises to only call `addr_of_slice_field` on a `NonNull<T>`
+ /// which points to a valid `T`.
+ ///
+ /// `with_elems` must produce a pointer which points to a valid `T`.
+ fn test<T: ?Sized, W: Fn(usize) -> NonNull<T>>(
+ args: MacroArgs,
+ with_elems: W,
+ addr_of_slice_field: Option<fn(NonNull<T>) -> NonNull<u8>>,
+ ) {
+ let dst = args.elem_size.is_some();
+ let layout = {
+ let size_info = match args.elem_size {
+ Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
+ _offset: args.offset,
+ _elem_size: elem_size,
+ }),
+ None => SizeInfo::Sized {
+ // Rust only supports types whose sizes are a multiple
+ // of their alignment. If the macro created a type like
+ // this:
+ //
+ // #[repr(C, align(2))]
+ // struct Foo([u8; 1]);
+ //
+ // ...then Rust will automatically round the type's size
+ // up to 2.
+ _size: args.offset
+ + util::core_layout::padding_needed_for(args.offset, args.align),
+ },
+ };
+ DstLayout { size_info, align: args.align }
+ };
+
+ for elems in 0..128 {
+ let ptr = with_elems(elems);
+
+ if let Some(addr_of_slice_field) = addr_of_slice_field {
+ let slc_field_ptr = addr_of_slice_field(ptr).as_ptr();
+ // SAFETY: Both `slc_field_ptr` and `ptr` are pointers to
+ // the same valid Rust object.
+ let offset: usize =
+ unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() };
+ assert_eq!(offset, args.offset);
+ }
+
+ // SAFETY: `ptr` points to a valid `T`.
+ let (size, align) = unsafe {
+ (mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr()))
+ };
+
+ // Avoid expensive allocation when running under Miri.
+ let assert_msg = if !cfg!(miri) {
+ format!("\n{args:?}\nsize:{size}, align:{align}")
+ } else {
+ String::new()
+ };
+
+ let without_padding =
+ args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0);
+ assert!(size >= without_padding, "{}", assert_msg);
+ assert_eq!(align, args.align.get(), "{}", assert_msg);
+
+ // This encodes the most important part of the test: our
+ // understanding of how Rust determines the layout of repr(C)
+ // types. Sized repr(C) types are trivial, but DST types have
+ // some subtlety. Note that:
+ // - For sized types, `without_padding` is just the size of the
+ // type that we constructed for `Foo`. Since we may have
+ // requested a larger alignment, `Foo` may actually be larger
+ // than this, hence `padding_needed_for`.
+ // - For unsized types, `without_padding` is dynamically
+ // computed from the offset, the element size, and element
+ // count. We expect that the size of the object should be
+ // `offset + elem_size * elems` rounded up to the next
+ // alignment.
+ let expected_size = without_padding
+ + util::core_layout::padding_needed_for(without_padding, args.align);
+ assert_eq!(expected_size, size, "{}", assert_msg);
+
+ // For zero-sized element types,
+ // `validate_cast_and_convert_metadata` just panics, so we skip
+ // testing those types.
+ if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) {
+ let addr = ptr.addr().get();
+ let (got_elems, got_split_at) = layout
+ .validate_cast_and_convert_metadata(addr, size, _CastType::_Prefix)
+ .unwrap();
+ // Avoid expensive allocation when running under Miri.
+ let assert_msg = if !cfg!(miri) {
+ format!(
+ "{}\nvalidate_cast_and_convert_metadata({addr}, {size})",
+ assert_msg
+ )
+ } else {
+ String::new()
+ };
+ assert_eq!(got_split_at, size, "{}", assert_msg);
+ if dst {
+ assert!(got_elems >= elems, "{}", assert_msg);
+ if got_elems != elems {
+ // If `validate_cast_and_convert_metadata`
+ // returned more elements than `elems`, that
+ // means that `elems` is not the maximum number
+ // of elements that can fit in `size` - in other
+ // words, there is enough padding at the end of
+ // the value to fit at least one more element.
+ // If we use this metadata to synthesize a
+ // pointer, despite having a different element
+ // count, we still expect it to have the same
+ // size.
+ let got_ptr = with_elems(got_elems);
+ // SAFETY: `got_ptr` is a pointer to a valid `T`.
+ let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) };
+ assert_eq!(size_of_got_ptr, size, "{}", assert_msg);
+ }
+ } else {
+ // For sized casts, the returned element value is
+ // technically meaningless, and we don't guarantee any
+ // particular value. In practice, it's always zero.
+ assert_eq!(got_elems, 0, "{}", assert_msg)
+ }
+ }
+ }
+ }
+
+ macro_rules! validate_against_rust {
+ ($offset:literal, $align:literal $(, $elem_size:literal)?) => {{
+ #[repr(C, align($align))]
+ struct Foo([u8; $offset]$(, [[u8; $elem_size]])?);
+
+ let args = MacroArgs {
+ offset: $offset,
+ align: $align.try_into().unwrap(),
+ elem_size: {
+ #[allow(unused)]
+ let ret = None::<usize>;
+ $(let ret = Some($elem_size);)?
+ ret
+ }
+ };
+
+ #[repr(C, align($align))]
+ struct FooAlign;
+ // Create an aligned buffer to use in order to synthesize
+ // pointers to `Foo`. We don't ever load values from these
+ // pointers - we just do arithmetic on them - so having a "real"
+ // block of memory as opposed to a validly-aligned-but-dangling
+ // pointer is only necessary to make Miri happy since we run it
+ // with "strict provenance" checking enabled.
+ let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]);
+ let with_elems = |elems| {
+ let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems);
+ #[allow(clippy::as_conversions)]
+ NonNull::new(slc.as_ptr() as *mut Foo).unwrap()
+ };
+ let addr_of_slice_field = {
+ #[allow(unused)]
+ let f = None::<fn(NonNull<Foo>) -> NonNull<u8>>;
+ $(
+ // SAFETY: `test` promises to only call `f` with a `ptr`
+ // to a valid `Foo`.
+ let f: Option<fn(NonNull<Foo>) -> NonNull<u8>> = Some(|ptr: NonNull<Foo>| unsafe {
+ NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::<u8>()
+ });
+ let _ = $elem_size;
+ )?
+ f
+ };
+
+ test::<Foo, _>(args, with_elems, addr_of_slice_field);
+ }};
+ }
+
+ // Every permutation of:
+ // - offset in [0, 4]
+ // - align in [1, 16]
+ // - elem_size in [0, 4] (plus no elem_size)
+ validate_against_rust!(0, 1);
+ validate_against_rust!(0, 1, 0);
+ validate_against_rust!(0, 1, 1);
+ validate_against_rust!(0, 1, 2);
+ validate_against_rust!(0, 1, 3);
+ validate_against_rust!(0, 1, 4);
+ validate_against_rust!(0, 2);
+ validate_against_rust!(0, 2, 0);
+ validate_against_rust!(0, 2, 1);
+ validate_against_rust!(0, 2, 2);
+ validate_against_rust!(0, 2, 3);
+ validate_against_rust!(0, 2, 4);
+ validate_against_rust!(0, 4);
+ validate_against_rust!(0, 4, 0);
+ validate_against_rust!(0, 4, 1);
+ validate_against_rust!(0, 4, 2);
+ validate_against_rust!(0, 4, 3);
+ validate_against_rust!(0, 4, 4);
+ validate_against_rust!(0, 8);
+ validate_against_rust!(0, 8, 0);
+ validate_against_rust!(0, 8, 1);
+ validate_against_rust!(0, 8, 2);
+ validate_against_rust!(0, 8, 3);
+ validate_against_rust!(0, 8, 4);
+ validate_against_rust!(0, 16);
+ validate_against_rust!(0, 16, 0);
+ validate_against_rust!(0, 16, 1);
+ validate_against_rust!(0, 16, 2);
+ validate_against_rust!(0, 16, 3);
+ validate_against_rust!(0, 16, 4);
+ validate_against_rust!(1, 1);
+ validate_against_rust!(1, 1, 0);
+ validate_against_rust!(1, 1, 1);
+ validate_against_rust!(1, 1, 2);
+ validate_against_rust!(1, 1, 3);
+ validate_against_rust!(1, 1, 4);
+ validate_against_rust!(1, 2);
+ validate_against_rust!(1, 2, 0);
+ validate_against_rust!(1, 2, 1);
+ validate_against_rust!(1, 2, 2);
+ validate_against_rust!(1, 2, 3);
+ validate_against_rust!(1, 2, 4);
+ validate_against_rust!(1, 4);
+ validate_against_rust!(1, 4, 0);
+ validate_against_rust!(1, 4, 1);
+ validate_against_rust!(1, 4, 2);
+ validate_against_rust!(1, 4, 3);
+ validate_against_rust!(1, 4, 4);
+ validate_against_rust!(1, 8);
+ validate_against_rust!(1, 8, 0);
+ validate_against_rust!(1, 8, 1);
+ validate_against_rust!(1, 8, 2);
+ validate_against_rust!(1, 8, 3);
+ validate_against_rust!(1, 8, 4);
+ validate_against_rust!(1, 16);
+ validate_against_rust!(1, 16, 0);
+ validate_against_rust!(1, 16, 1);
+ validate_against_rust!(1, 16, 2);
+ validate_against_rust!(1, 16, 3);
+ validate_against_rust!(1, 16, 4);
+ validate_against_rust!(2, 1);
+ validate_against_rust!(2, 1, 0);
+ validate_against_rust!(2, 1, 1);
+ validate_against_rust!(2, 1, 2);
+ validate_against_rust!(2, 1, 3);
+ validate_against_rust!(2, 1, 4);
+ validate_against_rust!(2, 2);
+ validate_against_rust!(2, 2, 0);
+ validate_against_rust!(2, 2, 1);
+ validate_against_rust!(2, 2, 2);
+ validate_against_rust!(2, 2, 3);
+ validate_against_rust!(2, 2, 4);
+ validate_against_rust!(2, 4);
+ validate_against_rust!(2, 4, 0);
+ validate_against_rust!(2, 4, 1);
+ validate_against_rust!(2, 4, 2);
+ validate_against_rust!(2, 4, 3);
+ validate_against_rust!(2, 4, 4);
+ validate_against_rust!(2, 8);
+ validate_against_rust!(2, 8, 0);
+ validate_against_rust!(2, 8, 1);
+ validate_against_rust!(2, 8, 2);
+ validate_against_rust!(2, 8, 3);
+ validate_against_rust!(2, 8, 4);
+ validate_against_rust!(2, 16);
+ validate_against_rust!(2, 16, 0);
+ validate_against_rust!(2, 16, 1);
+ validate_against_rust!(2, 16, 2);
+ validate_against_rust!(2, 16, 3);
+ validate_against_rust!(2, 16, 4);
+ validate_against_rust!(3, 1);
+ validate_against_rust!(3, 1, 0);
+ validate_against_rust!(3, 1, 1);
+ validate_against_rust!(3, 1, 2);
+ validate_against_rust!(3, 1, 3);
+ validate_against_rust!(3, 1, 4);
+ validate_against_rust!(3, 2);
+ validate_against_rust!(3, 2, 0);
+ validate_against_rust!(3, 2, 1);
+ validate_against_rust!(3, 2, 2);
+ validate_against_rust!(3, 2, 3);
+ validate_against_rust!(3, 2, 4);
+ validate_against_rust!(3, 4);
+ validate_against_rust!(3, 4, 0);
+ validate_against_rust!(3, 4, 1);
+ validate_against_rust!(3, 4, 2);
+ validate_against_rust!(3, 4, 3);
+ validate_against_rust!(3, 4, 4);
+ validate_against_rust!(3, 8);
+ validate_against_rust!(3, 8, 0);
+ validate_against_rust!(3, 8, 1);
+ validate_against_rust!(3, 8, 2);
+ validate_against_rust!(3, 8, 3);
+ validate_against_rust!(3, 8, 4);
+ validate_against_rust!(3, 16);
+ validate_against_rust!(3, 16, 0);
+ validate_against_rust!(3, 16, 1);
+ validate_against_rust!(3, 16, 2);
+ validate_against_rust!(3, 16, 3);
+ validate_against_rust!(3, 16, 4);
+ validate_against_rust!(4, 1);
+ validate_against_rust!(4, 1, 0);
+ validate_against_rust!(4, 1, 1);
+ validate_against_rust!(4, 1, 2);
+ validate_against_rust!(4, 1, 3);
+ validate_against_rust!(4, 1, 4);
+ validate_against_rust!(4, 2);
+ validate_against_rust!(4, 2, 0);
+ validate_against_rust!(4, 2, 1);
+ validate_against_rust!(4, 2, 2);
+ validate_against_rust!(4, 2, 3);
+ validate_against_rust!(4, 2, 4);
+ validate_against_rust!(4, 4);
+ validate_against_rust!(4, 4, 0);
+ validate_against_rust!(4, 4, 1);
+ validate_against_rust!(4, 4, 2);
+ validate_against_rust!(4, 4, 3);
+ validate_against_rust!(4, 4, 4);
+ validate_against_rust!(4, 8);
+ validate_against_rust!(4, 8, 0);
+ validate_against_rust!(4, 8, 1);
+ validate_against_rust!(4, 8, 2);
+ validate_against_rust!(4, 8, 3);
+ validate_against_rust!(4, 8, 4);
+ validate_against_rust!(4, 16);
+ validate_against_rust!(4, 16, 0);
+ validate_against_rust!(4, 16, 1);
+ validate_against_rust!(4, 16, 2);
+ validate_against_rust!(4, 16, 3);
+ validate_against_rust!(4, 16, 4);
+ }
+
+ #[test]
+ fn test_known_layout() {
+ // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
+ // Test that `PhantomData<$ty>` has the same layout as `()` regardless
+ // of `$ty`.
+ macro_rules! test {
+ ($ty:ty, $expect:expr) => {
+ let expect = $expect;
+ assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
+ assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
+ assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
+ };
+ }
+
+ let layout = |offset, align, _trailing_slice_elem_size| DstLayout {
+ align: NonZeroUsize::new(align).unwrap(),
+ size_info: match _trailing_slice_elem_size {
+ None => SizeInfo::Sized { _size: offset },
+ Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
+ _offset: offset,
+ _elem_size: elem_size,
+ }),
+ },
+ };
+
+ test!((), layout(0, 1, None));
+ test!(u8, layout(1, 1, None));
+ // Use `align_of` because `u64` alignment may be smaller than 8 on some
+ // platforms.
+ test!(u64, layout(8, mem::align_of::<u64>(), None));
+ test!(AU64, layout(8, 8, None));
+
+ test!(Option<&'static ()>, usize::LAYOUT);
+
+ test!([()], layout(0, 1, Some(0)));
+ test!([u8], layout(0, 1, Some(1)));
+ test!(str, layout(0, 1, Some(1)));
+ }
+
+ #[cfg(feature = "derive")]
+ #[test]
+ fn test_known_layout_derive() {
+ // In this and other files (`late_compile_pass.rs`,
+ // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
+ // modes of `derive(KnownLayout)` for the following combination of
+ // properties:
+ //
+ // +------------+--------------------------------------+-----------+
+ // | | trailing field properties | |
+ // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
+ // |------------+----------+----------------+----------+-----------|
+ // | N | N | N | N | KL00 |
+ // | N | N | N | Y | KL01 |
+ // | N | N | Y | N | KL02 |
+ // | N | N | Y | Y | KL03 |
+ // | N | Y | N | N | KL04 |
+ // | N | Y | N | Y | KL05 |
+ // | N | Y | Y | N | KL06 |
+ // | N | Y | Y | Y | KL07 |
+ // | Y | N | N | N | KL08 |
+ // | Y | N | N | Y | KL09 |
+ // | Y | N | Y | N | KL10 |
+ // | Y | N | Y | Y | KL11 |
+ // | Y | Y | N | N | KL12 |
+ // | Y | Y | N | Y | KL13 |
+ // | Y | Y | Y | N | KL14 |
+ // | Y | Y | Y | Y | KL15 |
+ // +------------+----------+----------------+----------+-----------+
+
+ struct NotKnownLayout<T = ()> {
+ _t: T,
+ }
+
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct AlignSize<const ALIGN: usize, const SIZE: usize>
+ where
+ elain::Align<ALIGN>: elain::Alignment,
+ {
+ _align: elain::Align<ALIGN>,
+ _size: [u8; SIZE],
+ }
+
+ type AU16 = AlignSize<2, 2>;
+ type AU32 = AlignSize<4, 4>;
+
+ fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
+
+ let sized_layout = |align, size| DstLayout {
+ align: NonZeroUsize::new(align).unwrap(),
+ size_info: SizeInfo::Sized { _size: size },
+ };
+
+ let unsized_layout = |align, elem_size, offset| DstLayout {
+ align: NonZeroUsize::new(align).unwrap(),
+ size_info: SizeInfo::SliceDst(TrailingSliceLayout {
+ _offset: offset,
+ _elem_size: elem_size,
+ }),
+ };
+
+ // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
+ // | N | N | N | Y | KL01 |
+ #[derive(KnownLayout)]
+ struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
+
+ let expected = DstLayout::for_type::<KL01>();
+
+ assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
+
+ // ...with `align(N)`:
+ #[derive(KnownLayout)]
+ #[repr(align(64))]
+ struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
+
+ let expected = DstLayout::for_type::<KL01Align>();
+
+ assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
+
+ // ...with `packed`:
+ #[derive(KnownLayout)]
+ #[repr(packed)]
+ struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
+
+ let expected = DstLayout::for_type::<KL01Packed>();
+
+ assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
+
+ // ...with `packed(N)`:
+ #[derive(KnownLayout)]
+ #[repr(packed(2))]
+ struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
+
+ assert_impl_all!(KL01PackedN: KnownLayout);
+
+ let expected = DstLayout::for_type::<KL01PackedN>();
+
+ assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
+
+ // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
+ // | N | N | Y | Y | KL03 |
+ #[derive(KnownLayout)]
+ struct KL03(NotKnownLayout, u8);
+
+ let expected = DstLayout::for_type::<KL03>();
+
+ assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
+
+ // ... with `align(N)`
+ #[derive(KnownLayout)]
+ #[repr(align(64))]
+ struct KL03Align(NotKnownLayout<AU32>, u8);
+
+ let expected = DstLayout::for_type::<KL03Align>();
+
+ assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
+
+ // ... with `packed`:
+ #[derive(KnownLayout)]
+ #[repr(packed)]
+ struct KL03Packed(NotKnownLayout<AU32>, u8);
+
+ let expected = DstLayout::for_type::<KL03Packed>();
+
+ assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
+
+ // ... with `packed(N)`
+ #[derive(KnownLayout)]
+ #[repr(packed(2))]
+ struct KL03PackedN(NotKnownLayout<AU32>, u8);
+
+ assert_impl_all!(KL03PackedN: KnownLayout);
+
+ let expected = DstLayout::for_type::<KL03PackedN>();
+
+ assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
+
+ // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
+ // | N | Y | N | Y | KL05 |
+ #[derive(KnownLayout)]
+ struct KL05<T>(u8, T);
+
+ fn _test_kl05<T>(t: T) -> impl KnownLayout {
+ KL05(0u8, t)
+ }
+
+ // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
+ // | N | Y | Y | Y | KL07 |
+ #[derive(KnownLayout)]
+ struct KL07<T: KnownLayout>(u8, T);
+
+ fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
+ let _ = KL07(0u8, t);
+ }
+
+ // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
+ // | Y | N | Y | N | KL10 |
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KL10(NotKnownLayout<AU32>, [u8]);
+
+ let expected = DstLayout::new_zst(None)
+ .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
+ .extend(<[u8] as KnownLayout>::LAYOUT, None)
+ .pad_to_align();
+
+ assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4));
+
+ // ...with `align(N)`:
+ #[derive(KnownLayout)]
+ #[repr(C, align(64))]
+ struct KL10Align(NotKnownLayout<AU32>, [u8]);
+
+ let repr_align = NonZeroUsize::new(64);
+
+ let expected = DstLayout::new_zst(repr_align)
+ .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
+ .extend(<[u8] as KnownLayout>::LAYOUT, None)
+ .pad_to_align();
+
+ assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4));
+
+ // ...with `packed`:
+ #[derive(KnownLayout)]
+ #[repr(C, packed)]
+ struct KL10Packed(NotKnownLayout<AU32>, [u8]);
+
+ let repr_packed = NonZeroUsize::new(1);
+
+ let expected = DstLayout::new_zst(None)
+ .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
+ .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
+ .pad_to_align();
+
+ assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4));
+
+ // ...with `packed(N)`:
+ #[derive(KnownLayout)]
+ #[repr(C, packed(2))]
+ struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
+
+ let repr_packed = NonZeroUsize::new(2);
+
+ let expected = DstLayout::new_zst(None)
+ .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
+ .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
+ .pad_to_align();
+
+ assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
+
+ // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
+ // | Y | N | Y | Y | KL11 |
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KL11(NotKnownLayout<AU64>, u8);
+
+ let expected = DstLayout::new_zst(None)
+ .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
+ .extend(<u8 as KnownLayout>::LAYOUT, None)
+ .pad_to_align();
+
+ assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
+
+ // ...with `align(N)`:
+ #[derive(KnownLayout)]
+ #[repr(C, align(64))]
+ struct KL11Align(NotKnownLayout<AU64>, u8);
+
+ let repr_align = NonZeroUsize::new(64);
+
+ let expected = DstLayout::new_zst(repr_align)
+ .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
+ .extend(<u8 as KnownLayout>::LAYOUT, None)
+ .pad_to_align();
+
+ assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
+
+ // ...with `packed`:
+ #[derive(KnownLayout)]
+ #[repr(C, packed)]
+ struct KL11Packed(NotKnownLayout<AU64>, u8);
+
+ let repr_packed = NonZeroUsize::new(1);
+
+ let expected = DstLayout::new_zst(None)
+ .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
+ .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
+ .pad_to_align();
+
+ assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
+
+ // ...with `packed(N)`:
+ #[derive(KnownLayout)]
+ #[repr(C, packed(2))]
+ struct KL11PackedN(NotKnownLayout<AU64>, u8);
+
+ let repr_packed = NonZeroUsize::new(2);
+
+ let expected = DstLayout::new_zst(None)
+ .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
+ .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
+ .pad_to_align();
+
+ assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
+ assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
+
+ // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
+ // | Y | Y | Y | N | KL14 |
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KL14<T: ?Sized + KnownLayout>(u8, T);
+
+ fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
+ _assert_kl(kl)
+ }
+
+ // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
+ // | Y | Y | Y | Y | KL15 |
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KL15<T: KnownLayout>(u8, T);
+
+ fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
+ let _ = KL15(0u8, t);
+ }
+
+ // Test a variety of combinations of field types:
+ // - ()
+ // - u8
+ // - AU16
+ // - [()]
+ // - [u8]
+ // - [AU16]
+
+ #[allow(clippy::upper_case_acronyms)]
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KLTU<T, U: ?Sized>(T, U);
+
+ assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
+
+ assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
+
+ assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
+
+ assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0));
+
+ assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
+
+ assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0));
+
+ assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
+
+ assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
+
+ assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
+
+ assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1));
+
+ assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
+
+ assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
+
+ assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
+
+ assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
+
+ assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
+
+ assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2));
+
+ assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2));
+
+ assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
+
+ // Test a variety of field counts.
+
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KLF0;
+
+ assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
+
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KLF1([u8]);
+
+ assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
+
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KLF2(NotKnownLayout<u8>, [u8]);
+
+ assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
+
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
+
+ assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
+
+ #[derive(KnownLayout)]
+ #[repr(C)]
+ struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
+
+ assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8));
+ }
+
+ #[test]
+ fn test_object_safety() {
+ fn _takes_from_zeroes(_: &dyn FromZeroes) {}
+ fn _takes_from_bytes(_: &dyn FromBytes) {}
+ fn _takes_unaligned(_: &dyn Unaligned) {}
+ }
+
+ #[test]
+ fn test_from_zeroes_only() {
+ // Test types that implement `FromZeroes` but not `FromBytes`.
+
+ assert!(!bool::new_zeroed());
+ assert_eq!(char::new_zeroed(), '\0');
+
+ #[cfg(feature = "alloc")]
+ {
+ assert_eq!(bool::new_box_zeroed(), Box::new(false));
+ assert_eq!(char::new_box_zeroed(), Box::new('\0'));
+
+ assert_eq!(bool::new_box_slice_zeroed(3).as_ref(), [false, false, false]);
+ assert_eq!(char::new_box_slice_zeroed(3).as_ref(), ['\0', '\0', '\0']);
+
+ assert_eq!(bool::new_vec_zeroed(3).as_ref(), [false, false, false]);
+ assert_eq!(char::new_vec_zeroed(3).as_ref(), ['\0', '\0', '\0']);
+ }
+
+ let mut string = "hello".to_string();
+ let s: &mut str = string.as_mut();
+ assert_eq!(s, "hello");
+ s.zero();
+ assert_eq!(s, "\0\0\0\0\0");
+ }
+
+ #[test]
+ fn test_read_write() {
+ const VAL: u64 = 0x12345678;
+ #[cfg(target_endian = "big")]
+ const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
+ #[cfg(target_endian = "little")]
+ const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
+
+ // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
+
+ assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL));
+ // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
+ // zeroes.
+ let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
+ assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Some(VAL));
+ assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Some(0));
+ // The first 8 bytes are all zeroes and the second 8 bytes are from
+ // `VAL_BYTES`
+ let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
+ assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0));
+ assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL));
+
+ // Test `AsBytes::{write_to, write_to_prefix, write_to_suffix}`.
+
+ let mut bytes = [0u8; 8];
+ assert_eq!(VAL.write_to(&mut bytes[..]), Some(()));
+ assert_eq!(bytes, VAL_BYTES);
+ let mut bytes = [0u8; 16];
+ assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Some(()));
+ let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
+ assert_eq!(bytes, want);
+ let mut bytes = [0u8; 16];
+ assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Some(()));
+ let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
+ assert_eq!(bytes, want);
+ }
+
+ #[test]
+ fn test_transmute() {
+ // Test that memory is transmuted as expected.
+ let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
+ let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
+ let x: [[u8; 2]; 4] = transmute!(array_of_u8s);
+ assert_eq!(x, array_of_arrays);
+ let x: [u8; 8] = transmute!(array_of_arrays);
+ assert_eq!(x, array_of_u8s);
+
+ // Test that the source expression's value is forgotten rather than
+ // dropped.
+ #[derive(AsBytes)]
+ #[repr(transparent)]
+ struct PanicOnDrop(());
+ impl Drop for PanicOnDrop {
+ fn drop(&mut self) {
+ panic!("PanicOnDrop::drop");
+ }
+ }
+ #[allow(clippy::let_unit_value)]
+ let _: () = transmute!(PanicOnDrop(()));
+
+ // Test that `transmute!` is legal in a const context.
+ const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
+ const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
+ const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S);
+ assert_eq!(X, ARRAY_OF_ARRAYS);
+ }
+
+ #[test]
+ fn test_transmute_ref() {
+ // Test that memory is transmuted as expected.
+ let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
+ let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
+ let x: &[[u8; 2]; 4] = transmute_ref!(&array_of_u8s);
+ assert_eq!(*x, array_of_arrays);
+ let x: &[u8; 8] = transmute_ref!(&array_of_arrays);
+ assert_eq!(*x, array_of_u8s);
+
+ // Test that `transmute_ref!` is legal in a const context.
+ const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
+ const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
+ #[allow(clippy::redundant_static_lifetimes)]
+ const X: &'static [[u8; 2]; 4] = transmute_ref!(&ARRAY_OF_U8S);
+ assert_eq!(*X, ARRAY_OF_ARRAYS);
+
+ // Test that it's legal to transmute a reference while shrinking the
+ // lifetime (note that `X` has the lifetime `'static`).
+ let x: &[u8; 8] = transmute_ref!(X);
+ assert_eq!(*x, ARRAY_OF_U8S);
+
+ // Test that `transmute_ref!` supports decreasing alignment.
+ let u = AU64(0);
+ let array = [0, 0, 0, 0, 0, 0, 0, 0];
+ let x: &[u8; 8] = transmute_ref!(&u);
+ assert_eq!(*x, array);
+
+ // Test that a mutable reference can be turned into an immutable one.
+ let mut x = 0u8;
+ #[allow(clippy::useless_transmute)]
+ let y: &u8 = transmute_ref!(&mut x);
+ assert_eq!(*y, 0);
+ }
+
+ #[test]
+ fn test_transmute_mut() {
+ // Test that memory is transmuted as expected.
+ let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
+ let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
+ let x: &mut [[u8; 2]; 4] = transmute_mut!(&mut array_of_u8s);
+ assert_eq!(*x, array_of_arrays);
+ let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
+ assert_eq!(*x, array_of_u8s);
+
+ {
+ // Test that it's legal to transmute a reference while shrinking the
+ // lifetime.
+ let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
+ assert_eq!(*x, array_of_u8s);
+ }
+ // Test that `transmute_mut!` supports decreasing alignment.
+ let mut u = AU64(0);
+ let array = [0, 0, 0, 0, 0, 0, 0, 0];
+ let x: &[u8; 8] = transmute_mut!(&mut u);
+ assert_eq!(*x, array);
+
+ // Test that a mutable reference can be turned into an immutable one.
+ let mut x = 0u8;
+ #[allow(clippy::useless_transmute)]
+ let y: &u8 = transmute_mut!(&mut x);
+ assert_eq!(*y, 0);
+ }
+
+ #[test]
+ fn test_macros_evaluate_args_once() {
+ let mut ctr = 0;
+ let _: usize = transmute!({
+ ctr += 1;
+ 0usize
+ });
+ assert_eq!(ctr, 1);
+
+ let mut ctr = 0;
+ let _: &usize = transmute_ref!({
+ ctr += 1;
+ &0usize
+ });
+ assert_eq!(ctr, 1);
+ }
+
+ #[test]
+ fn test_include_value() {
+ const AS_U32: u32 = include_value!("../testdata/include_value/data");
+ assert_eq!(AS_U32, u32::from_ne_bytes([b'a', b'b', b'c', b'd']));
+ const AS_I32: i32 = include_value!("../testdata/include_value/data");
+ assert_eq!(AS_I32, i32::from_ne_bytes([b'a', b'b', b'c', b'd']));
+ }
+
+ #[test]
+ fn test_address() {
+ // Test that the `Deref` and `DerefMut` implementations return a
+ // reference which points to the right region of memory.
+
+ let buf = [0];
+ let r = Ref::<_, u8>::new(&buf[..]).unwrap();
+ let buf_ptr = buf.as_ptr();
+ let deref_ptr: *const u8 = r.deref();
+ assert_eq!(buf_ptr, deref_ptr);
+
+ let buf = [0];
+ let r = Ref::<_, [u8]>::new_slice(&buf[..]).unwrap();
+ let buf_ptr = buf.as_ptr();
+ let deref_ptr = r.deref().as_ptr();
+ assert_eq!(buf_ptr, deref_ptr);
+ }
+
+ // Verify that values written to a `Ref` are properly shared between the
+ // typed and untyped representations, that reads via `deref` and `read`
+ // behave the same, and that writes via `deref_mut` and `write` behave the
+ // same.
+ fn test_new_helper(mut r: Ref<&mut [u8], AU64>) {
+ // assert that the value starts at 0
+ assert_eq!(*r, AU64(0));
+ assert_eq!(r.read(), AU64(0));
+
+ // Assert that values written to the typed value are reflected in the
+ // byte slice.
+ const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
+ *r = VAL1;
+ assert_eq!(r.bytes(), &VAL1.to_bytes());
+ *r = AU64(0);
+ r.write(VAL1);
+ assert_eq!(r.bytes(), &VAL1.to_bytes());
+
+ // Assert that values written to the byte slice are reflected in the
+ // typed value.
+ const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1`
+ r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]);
+ assert_eq!(*r, VAL2);
+ assert_eq!(r.read(), VAL2);
+ }
+
+ // Verify that values written to a `Ref` are properly shared between the
+ // typed and untyped representations; pass a value with `typed_len` `AU64`s
+ // backed by an array of `typed_len * 8` bytes.
+ fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) {
+ // Assert that the value starts out zeroed.
+ assert_eq!(&*r, vec![AU64(0); typed_len].as_slice());
+
+ // Check the backing storage is the exact same slice.
+ let untyped_len = typed_len * 8;
+ assert_eq!(r.bytes().len(), untyped_len);
+ assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::<u8>());
+
+ // Assert that values written to the typed value are reflected in the
+ // byte slice.
+ const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
+ for typed in &mut *r {
+ *typed = VAL1;
+ }
+ assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice());
+
+ // Assert that values written to the byte slice are reflected in the
+ // typed value.
+ const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1
+ r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len));
+ assert!(r.iter().copied().all(|x| x == VAL2));
+ }
+
+ // Verify that values written to a `Ref` are properly shared between the
+ // typed and untyped representations, that reads via `deref` and `read`
+ // behave the same, and that writes via `deref_mut` and `write` behave the
+ // same.
+ fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) {
+ // assert that the value starts at 0
+ assert_eq!(*r, [0; 8]);
+ assert_eq!(r.read(), [0; 8]);
+
+ // Assert that values written to the typed value are reflected in the
+ // byte slice.
+ const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00];
+ *r = VAL1;
+ assert_eq!(r.bytes(), &VAL1);
+ *r = [0; 8];
+ r.write(VAL1);
+ assert_eq!(r.bytes(), &VAL1);
+
+ // Assert that values written to the byte slice are reflected in the
+ // typed value.
+ const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1
+ r.bytes_mut().copy_from_slice(&VAL2[..]);
+ assert_eq!(*r, VAL2);
+ assert_eq!(r.read(), VAL2);
+ }
+
+ // Verify that values written to a `Ref` are properly shared between the
+ // typed and untyped representations; pass a value with `len` `u8`s backed
+ // by an array of `len` bytes.
+ fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) {
+ // Assert that the value starts out zeroed.
+ assert_eq!(&*r, vec![0u8; len].as_slice());
+
+ // Check the backing storage is the exact same slice.
+ assert_eq!(r.bytes().len(), len);
+ assert_eq!(r.bytes().as_ptr(), r.as_ptr());
+
+ // Assert that values written to the typed value are reflected in the
+ // byte slice.
+ let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::<Vec<_>>();
+ r.copy_from_slice(&expected_bytes);
+ assert_eq!(r.bytes(), expected_bytes.as_slice());
+
+ // Assert that values written to the byte slice are reflected in the
+ // typed value.
+ for byte in &mut expected_bytes {
+ *byte = !*byte; // different from `expected_len`
+ }
+ r.bytes_mut().copy_from_slice(&expected_bytes);
+ assert_eq!(&*r, expected_bytes.as_slice());
+ }
+
+ #[test]
+ fn test_new_aligned_sized() {
+ // Test that a properly-aligned, properly-sized buffer works for new,
+ // new_from_prefix, and new_from_suffix, and that new_from_prefix and
+ // new_from_suffix return empty slices. Test that a properly-aligned
+ // buffer whose length is a multiple of the element size works for
+ // new_slice. Test that xxx_zeroed behaves the same, and zeroes the
+ // memory.
+
+ // A buffer with an alignment of 8.
+ let mut buf = Align::<[u8; 8], AU64>::default();
+ // `buf.t` should be aligned to 8, so this should always succeed.
+ test_new_helper(Ref::<_, AU64>::new(&mut buf.t[..]).unwrap());
+ let ascending: [u8; 8] = (0..8).collect::<Vec<_>>().try_into().unwrap();
+ buf.t = ascending;
+ test_new_helper(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).unwrap());
+ {
+ // In a block so that `r` and `suffix` don't live too long.
+ buf.set_default();
+ let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
+ assert!(suffix.is_empty());
+ test_new_helper(r);
+ }
+ {
+ buf.t = ascending;
+ let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
+ assert!(suffix.is_empty());
+ test_new_helper(r);
+ }
+ {
+ buf.set_default();
+ let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
+ assert!(prefix.is_empty());
+ test_new_helper(r);
+ }
+ {
+ buf.t = ascending;
+ let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
+ assert!(prefix.is_empty());
+ test_new_helper(r);
+ }
+
+ // A buffer with alignment 8 and length 24. We choose this length very
+ // intentionally: if we instead used length 16, then the prefix and
+ // suffix lengths would be identical. In the past, we used length 16,
+ // which resulted in this test failing to discover the bug uncovered in
+ // #506.
+ let mut buf = Align::<[u8; 24], AU64>::default();
+ // `buf.t` should be aligned to 8 and have a length which is a multiple
+ // of `size_of::<AU64>()`, so this should always succeed.
+ test_new_helper_slice(Ref::<_, [AU64]>::new_slice(&mut buf.t[..]).unwrap(), 3);
+ let ascending: [u8; 24] = (0..24).collect::<Vec<_>>().try_into().unwrap();
+ // 16 ascending bytes followed by 8 zeros.
+ let mut ascending_prefix = ascending;
+ ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
+ // 8 zeros followed by 16 ascending bytes.
+ let mut ascending_suffix = ascending;
+ ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
+ test_new_helper_slice(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).unwrap(), 3);
+
+ {
+ buf.t = ascending_suffix;
+ let (r, suffix) = Ref::<_, [AU64]>::new_slice_from_prefix(&mut buf.t[..], 1).unwrap();
+ assert_eq!(suffix, &ascending[8..]);
+ test_new_helper_slice(r, 1);
+ }
+ {
+ buf.t = ascending_suffix;
+ let (r, suffix) =
+ Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 1).unwrap();
+ assert_eq!(suffix, &ascending[8..]);
+ test_new_helper_slice(r, 1);
+ }
+ {
+ buf.t = ascending_prefix;
+ let (prefix, r) = Ref::<_, [AU64]>::new_slice_from_suffix(&mut buf.t[..], 1).unwrap();
+ assert_eq!(prefix, &ascending[..16]);
+ test_new_helper_slice(r, 1);
+ }
+ {
+ buf.t = ascending_prefix;
+ let (prefix, r) =
+ Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 1).unwrap();
+ assert_eq!(prefix, &ascending[..16]);
+ test_new_helper_slice(r, 1);
+ }
+ }
+
+ #[test]
+ fn test_new_unaligned_sized() {
+ // Test that an unaligned, properly-sized buffer works for
+ // `new_unaligned`, `new_unaligned_from_prefix`, and
+ // `new_unaligned_from_suffix`, and that `new_unaligned_from_prefix`
+ // `new_unaligned_from_suffix` return empty slices. Test that an
+ // unaligned buffer whose length is a multiple of the element size works
+ // for `new_slice`. Test that `xxx_zeroed` behaves the same, and zeroes
+ // the memory.
+
+ let mut buf = [0u8; 8];
+ test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap());
+ buf = [0xFFu8; 8];
+ test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap());
+ {
+ // In a block so that `r` and `suffix` don't live too long.
+ buf = [0u8; 8];
+ let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
+ assert!(suffix.is_empty());
+ test_new_helper_unaligned(r);
+ }
+ {
+ buf = [0xFFu8; 8];
+ let (r, suffix) =
+ Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
+ assert!(suffix.is_empty());
+ test_new_helper_unaligned(r);
+ }
+ {
+ buf = [0u8; 8];
+ let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
+ assert!(prefix.is_empty());
+ test_new_helper_unaligned(r);
+ }
+ {
+ buf = [0xFFu8; 8];
+ let (prefix, r) =
+ Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
+ assert!(prefix.is_empty());
+ test_new_helper_unaligned(r);
+ }
+
+ let mut buf = [0u8; 16];
+ // `buf.t` should be aligned to 8 and have a length which is a multiple
+ // of `size_of::AU64>()`, so this should always succeed.
+ test_new_helper_slice_unaligned(
+ Ref::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(),
+ 16,
+ );
+ buf = [0xFFu8; 16];
+ test_new_helper_slice_unaligned(
+ Ref::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(),
+ 16,
+ );
+
+ {
+ buf = [0u8; 16];
+ let (r, suffix) =
+ Ref::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8).unwrap();
+ assert_eq!(suffix, [0; 8]);
+ test_new_helper_slice_unaligned(r, 8);
+ }
+ {
+ buf = [0xFFu8; 16];
+ let (r, suffix) =
+ Ref::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8).unwrap();
+ assert_eq!(suffix, [0xFF; 8]);
+ test_new_helper_slice_unaligned(r, 8);
+ }
+ {
+ buf = [0u8; 16];
+ let (prefix, r) =
+ Ref::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8).unwrap();
+ assert_eq!(prefix, [0; 8]);
+ test_new_helper_slice_unaligned(r, 8);
+ }
+ {
+ buf = [0xFFu8; 16];
+ let (prefix, r) =
+ Ref::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8).unwrap();
+ assert_eq!(prefix, [0xFF; 8]);
+ test_new_helper_slice_unaligned(r, 8);
+ }
+ }
+
+ #[test]
+ fn test_new_oversized() {
+ // Test that a properly-aligned, overly-sized buffer works for
+ // `new_from_prefix` and `new_from_suffix`, and that they return the
+ // remainder and prefix of the slice respectively. Test that
+ // `xxx_zeroed` behaves the same, and zeroes the memory.
+
+ let mut buf = Align::<[u8; 16], AU64>::default();
+ {
+ // In a block so that `r` and `suffix` don't live too long. `buf.t`
+ // should be aligned to 8, so this should always succeed.
+ let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
+ assert_eq!(suffix.len(), 8);
+ test_new_helper(r);
+ }
+ {
+ buf.t = [0xFFu8; 16];
+ // `buf.t` should be aligned to 8, so this should always succeed.
+ let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
+ // Assert that the suffix wasn't zeroed.
+ assert_eq!(suffix, &[0xFFu8; 8]);
+ test_new_helper(r);
+ }
+ {
+ buf.set_default();
+ // `buf.t` should be aligned to 8, so this should always succeed.
+ let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
+ assert_eq!(prefix.len(), 8);
+ test_new_helper(r);
+ }
+ {
+ buf.t = [0xFFu8; 16];
+ // `buf.t` should be aligned to 8, so this should always succeed.
+ let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
+ // Assert that the prefix wasn't zeroed.
+ assert_eq!(prefix, &[0xFFu8; 8]);
+ test_new_helper(r);
+ }
+ }
+
+ #[test]
+ fn test_new_unaligned_oversized() {
+ // Test than an unaligned, overly-sized buffer works for
+ // `new_unaligned_from_prefix` and `new_unaligned_from_suffix`, and that
+ // they return the remainder and prefix of the slice respectively. Test
+ // that `xxx_zeroed` behaves the same, and zeroes the memory.
+
+ let mut buf = [0u8; 16];
+ {
+ // In a block so that `r` and `suffix` don't live too long.
+ let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
+ assert_eq!(suffix.len(), 8);
+ test_new_helper_unaligned(r);
+ }
+ {
+ buf = [0xFFu8; 16];
+ let (r, suffix) =
+ Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
+ // Assert that the suffix wasn't zeroed.
+ assert_eq!(suffix, &[0xFF; 8]);
+ test_new_helper_unaligned(r);
+ }
+ {
+ buf = [0u8; 16];
+ let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
+ assert_eq!(prefix.len(), 8);
+ test_new_helper_unaligned(r);
+ }
+ {
+ buf = [0xFFu8; 16];
+ let (prefix, r) =
+ Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
+ // Assert that the prefix wasn't zeroed.
+ assert_eq!(prefix, &[0xFF; 8]);
+ test_new_helper_unaligned(r);
+ }
+ }
+
+ #[test]
+ fn test_ref_from_mut_from() {
+ // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` success cases
+ // Exhaustive coverage for these methods is covered by the `Ref` tests above,
+ // which these helper methods defer to.
+
+ let mut buf =
+ Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
+
+ assert_eq!(
+ AU64::ref_from(&buf.t[8..]).unwrap().0.to_ne_bytes(),
+ [8, 9, 10, 11, 12, 13, 14, 15]
+ );
+ let suffix = AU64::mut_from(&mut buf.t[8..]).unwrap();
+ suffix.0 = 0x0101010101010101;
+ // The `[u8:9]` is a non-half size of the full buffer, which would catch
+ // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
+ assert_eq!(<[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]);
+ let suffix = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
+ suffix.0 = 0x0202020202020202;
+ <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap()[0] = 42;
+ assert_eq!(<[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), &[0, 1, 2, 3, 4, 5, 42, 7, 2]);
+ <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap()[1] = 30;
+ assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
+ }
+
+ #[test]
+ fn test_ref_from_mut_from_error() {
+ // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` error cases.
+
+ // Fail because the buffer is too large.
+ let mut buf = Align::<[u8; 16], AU64>::default();
+ // `buf.t` should be aligned to 8, so only the length check should fail.
+ assert!(AU64::ref_from(&buf.t[..]).is_none());
+ assert!(AU64::mut_from(&mut buf.t[..]).is_none());
+ assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
+ assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
+
+ // Fail because the buffer is too small.
+ let mut buf = Align::<[u8; 4], AU64>::default();
+ assert!(AU64::ref_from(&buf.t[..]).is_none());
+ assert!(AU64::mut_from(&mut buf.t[..]).is_none());
+ assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
+ assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
+ assert!(AU64::ref_from_prefix(&buf.t[..]).is_none());
+ assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_none());
+ assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
+ assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
+ assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_none());
+ assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_none());
+ assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_none());
+ assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_none());
+
+ // Fail because the alignment is insufficient.
+ let mut buf = Align::<[u8; 13], AU64>::default();
+ assert!(AU64::ref_from(&buf.t[1..]).is_none());
+ assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
+ assert!(AU64::ref_from(&buf.t[1..]).is_none());
+ assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
+ assert!(AU64::ref_from_prefix(&buf.t[1..]).is_none());
+ assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_none());
+ assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
+ assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
+ }
+
+ #[test]
+ #[allow(clippy::cognitive_complexity)]
+ fn test_new_error() {
+ // Fail because the buffer is too large.
+
+ // A buffer with an alignment of 8.
+ let mut buf = Align::<[u8; 16], AU64>::default();
+ // `buf.t` should be aligned to 8, so only the length check should fail.
+ assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
+
+ // Fail because the buffer is too small.
+
+ // A buffer with an alignment of 8.
+ let mut buf = Align::<[u8; 4], AU64>::default();
+ // `buf.t` should be aligned to 8, so only the length check should fail.
+ assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.t[..]).is_none());
+
+ // Fail because the length is not a multiple of the element size.
+
+ let mut buf = Align::<[u8; 12], AU64>::default();
+ // `buf.t` has length 12, but element size is 8.
+ assert!(Ref::<_, [AU64]>::new_slice(&buf.t[..]).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned(&buf.t[..]).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.t[..]).is_none());
+
+ // Fail because the buffer is too short.
+ let mut buf = Align::<[u8; 12], AU64>::default();
+ // `buf.t` has length 12, but the element size is 8 (and we're expecting
+ // two of them).
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], 2).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 2).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], 2).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 2).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], 2).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(&mut buf.t[..], 2)
+ .is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], 2).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(&mut buf.t[..], 2)
+ .is_none());
+
+ // Fail because the alignment is insufficient.
+
+ // A buffer with an alignment of 8. An odd buffer size is chosen so that
+ // the last byte of the buffer has odd alignment.
+ let mut buf = Align::<[u8; 13], AU64>::default();
+ // Slicing from 1, we get a buffer with size 12 (so the length check
+ // should succeed) but an alignment of only 1, which is insufficient.
+ assert!(Ref::<_, AU64>::new(&buf.t[1..]).is_none());
+ assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[1..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[1..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[1..]).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice(&buf.t[1..]).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[1..]).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[1..], 1).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[1..], 1).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[1..], 1).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[1..], 1).is_none());
+ // Slicing is unnecessary here because `new_from_suffix[_zeroed]` use
+ // the suffix of the slice, which has odd alignment.
+ assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
+
+ // Fail due to arithmetic overflow.
+
+ let mut buf = Align::<[u8; 16], AU64>::default();
+ let unreasonable_len = usize::MAX / mem::size_of::<AU64>() + 1;
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], unreasonable_len).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], unreasonable_len)
+ .is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], unreasonable_len).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], unreasonable_len)
+ .is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], unreasonable_len)
+ .is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
+ &mut buf.t[..],
+ unreasonable_len
+ )
+ .is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], unreasonable_len)
+ .is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
+ &mut buf.t[..],
+ unreasonable_len
+ )
+ .is_none());
+ }
+
+ // Tests for ensuring that, if a ZST is passed into a slice-like function,
+ // we always panic. Since these tests need to be separate per-function, and
+ // they tend to take up a lot of space, we generate them using a macro in a
+ // submodule instead. The submodule ensures that we can just re-use the name
+ // of the function under test for the name of the test itself.
+ mod test_zst_panics {
+ macro_rules! zst_test {
+ ($name:ident($($tt:tt)*), $constructor_in_panic_msg:tt) => {
+ #[test]
+ #[should_panic = concat!("Ref::", $constructor_in_panic_msg, " called on a zero-sized type")]
+ fn $name() {
+ let mut buffer = [0u8];
+ let r = $crate::Ref::<_, [()]>::$name(&mut buffer[..], $($tt)*);
+ unreachable!("should have panicked, got {:?}", r);
+ }
+ }
+ }
+ zst_test!(new_slice(), "new_slice");
+ zst_test!(new_slice_zeroed(), "new_slice");
+ zst_test!(new_slice_from_prefix(1), "new_slice");
+ zst_test!(new_slice_from_prefix_zeroed(1), "new_slice");
+ zst_test!(new_slice_from_suffix(1), "new_slice");
+ zst_test!(new_slice_from_suffix_zeroed(1), "new_slice");
+ zst_test!(new_slice_unaligned(), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_zeroed(), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_from_prefix(1), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_from_prefix_zeroed(1), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_from_suffix(1), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_from_suffix_zeroed(1), "new_slice_unaligned");
+ }
+
+ #[test]
+ fn test_as_bytes_methods() {
+ /// Run a series of tests by calling `AsBytes` methods on `t`.
+ ///
+ /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
+ /// before `t` has been modified. `post_mutation` is the expected
+ /// sequence returned from `t.as_bytes()` after `t.as_bytes_mut()[0]`
+ /// has had its bits flipped (by applying `^= 0xFF`).
+ ///
+ /// `N` is the size of `t` in bytes.
+ fn test<T: FromBytes + AsBytes + Debug + Eq + ?Sized, const N: usize>(
+ t: &mut T,
+ bytes: &[u8],
+ post_mutation: &T,
+ ) {
+ // Test that we can access the underlying bytes, and that we get the
+ // right bytes and the right number of bytes.
+ assert_eq!(t.as_bytes(), bytes);
+
+ // Test that changes to the underlying byte slices are reflected in
+ // the original object.
+ t.as_bytes_mut()[0] ^= 0xFF;
+ assert_eq!(t, post_mutation);
+ t.as_bytes_mut()[0] ^= 0xFF;
+
+ // `write_to` rejects slices that are too small or too large.
+ assert_eq!(t.write_to(&mut vec![0; N - 1][..]), None);
+ assert_eq!(t.write_to(&mut vec![0; N + 1][..]), None);
+
+ // `write_to` works as expected.
+ let mut bytes = [0; N];
+ assert_eq!(t.write_to(&mut bytes[..]), Some(()));
+ assert_eq!(bytes, t.as_bytes());
+
+ // `write_to_prefix` rejects slices that are too small.
+ assert_eq!(t.write_to_prefix(&mut vec![0; N - 1][..]), None);
+
+ // `write_to_prefix` works with exact-sized slices.
+ let mut bytes = [0; N];
+ assert_eq!(t.write_to_prefix(&mut bytes[..]), Some(()));
+ assert_eq!(bytes, t.as_bytes());
+
+ // `write_to_prefix` works with too-large slices, and any bytes past
+ // the prefix aren't modified.
+ let mut too_many_bytes = vec![0; N + 1];
+ too_many_bytes[N] = 123;
+ assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Some(()));
+ assert_eq!(&too_many_bytes[..N], t.as_bytes());
+ assert_eq!(too_many_bytes[N], 123);
+
+ // `write_to_suffix` rejects slices that are too small.
+ assert_eq!(t.write_to_suffix(&mut vec![0; N - 1][..]), None);
+
+ // `write_to_suffix` works with exact-sized slices.
+ let mut bytes = [0; N];
+ assert_eq!(t.write_to_suffix(&mut bytes[..]), Some(()));
+ assert_eq!(bytes, t.as_bytes());
+
+ // `write_to_suffix` works with too-large slices, and any bytes
+ // before the suffix aren't modified.
+ let mut too_many_bytes = vec![0; N + 1];
+ too_many_bytes[0] = 123;
+ assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Some(()));
+ assert_eq!(&too_many_bytes[1..], t.as_bytes());
+ assert_eq!(too_many_bytes[0], 123);
+ }
+
+ #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes)]
+ #[repr(C)]
+ struct Foo {
+ a: u32,
+ b: Wrapping<u32>,
+ c: Option<NonZeroU32>,
+ }
+
+ let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
+ vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
+ } else {
+ vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
+ };
+ let post_mutation_expected_a =
+ if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
+ test::<_, 12>(
+ &mut Foo { a: 1, b: Wrapping(2), c: None },
+ expected_bytes.as_bytes(),
+ &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
+ );
+ test::<_, 3>(
+ Unsized::from_mut_slice(&mut [1, 2, 3]),
+ &[1, 2, 3],
+ Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
+ );
+ }
+
+ #[test]
+ fn test_array() {
+ #[derive(FromZeroes, FromBytes, AsBytes)]
+ #[repr(C)]
+ struct Foo {
+ a: [u16; 33],
+ }
+
+ let foo = Foo { a: [0xFFFF; 33] };
+ let expected = [0xFFu8; 66];
+ assert_eq!(foo.as_bytes(), &expected[..]);
+ }
+
+ #[test]
+ fn test_display_debug() {
+ let buf = Align::<[u8; 8], u64>::default();
+ let r = Ref::<_, u64>::new(&buf.t[..]).unwrap();
+ assert_eq!(format!("{}", r), "0");
+ assert_eq!(format!("{:?}", r), "Ref(0)");
+
+ let buf = Align::<[u8; 8], u64>::default();
+ let r = Ref::<_, [u64]>::new_slice(&buf.t[..]).unwrap();
+ assert_eq!(format!("{:?}", r), "Ref([0])");
+ }
+
+ #[test]
+ fn test_eq() {
+ let buf1 = 0_u64;
+ let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
+ let buf2 = 0_u64;
+ let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
+ assert_eq!(r1, r2);
+ }
+
+ #[test]
+ fn test_ne() {
+ let buf1 = 0_u64;
+ let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
+ let buf2 = 1_u64;
+ let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
+ assert_ne!(r1, r2);
+ }
+
+ #[test]
+ fn test_ord() {
+ let buf1 = 0_u64;
+ let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
+ let buf2 = 1_u64;
+ let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
+ assert!(r1 < r2);
+ }
+
+ #[test]
+ fn test_new_zeroed() {
+ assert!(!bool::new_zeroed());
+ assert_eq!(u64::new_zeroed(), 0);
+ // This test exists in order to exercise unsafe code, especially when
+ // running under Miri.
+ #[allow(clippy::unit_cmp)]
+ {
+ assert_eq!(<()>::new_zeroed(), ());
+ }
+ }
+
+ #[test]
+ fn test_transparent_packed_generic_struct() {
+ #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
+ #[repr(transparent)]
+ struct Foo<T> {
+ _t: T,
+ _phantom: PhantomData<()>,
+ }
+
+ assert_impl_all!(Foo<u32>: FromZeroes, FromBytes, AsBytes);
+ assert_impl_all!(Foo<u8>: Unaligned);
+
+ #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
+ #[repr(packed)]
+ struct Bar<T, U> {
+ _t: T,
+ _u: U,
+ }
+
+ assert_impl_all!(Bar<u8, AU64>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ }
+
+ #[test]
+ fn test_impls() {
+ use core::borrow::Borrow;
+
+ // A type that can supply test cases for testing
+ // `TryFromBytes::is_bit_valid`. All types passed to `assert_impls!`
+ // must implement this trait; that macro uses it to generate runtime
+ // tests for `TryFromBytes` impls.
+ //
+ // All `T: FromBytes` types are provided with a blanket impl. Other
+ // types must implement `TryFromBytesTestable` directly (ie using
+ // `impl_try_from_bytes_testable!`).
+ trait TryFromBytesTestable {
+ fn with_passing_test_cases<F: Fn(&Self)>(f: F);
+ fn with_failing_test_cases<F: Fn(&[u8])>(f: F);
+ }
+
+ impl<T: FromBytes> TryFromBytesTestable for T {
+ fn with_passing_test_cases<F: Fn(&Self)>(f: F) {
+ // Test with a zeroed value.
+ f(&Self::new_zeroed());
+
+ let ffs = {
+ let mut t = Self::new_zeroed();
+ let ptr: *mut T = &mut t;
+ // SAFETY: `T: FromBytes`
+ unsafe { ptr::write_bytes(ptr.cast::<u8>(), 0xFF, mem::size_of::<T>()) };
+ t
+ };
+
+ // Test with a value initialized with 0xFF.
+ f(&ffs);
+ }
+
+ fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {}
+ }
+
+ // Implements `TryFromBytesTestable`.
+ macro_rules! impl_try_from_bytes_testable {
+ // Base case for recursion (when the list of types has run out).
+ (=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {};
+ // Implements for type(s) with no type parameters.
+ ($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
+ impl TryFromBytesTestable for $ty {
+ impl_try_from_bytes_testable!(
+ @methods @success $($success_case),*
+ $(, @failure $($failure_case),*)?
+ );
+ }
+ impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?);
+ };
+ // Implements for multiple types with no type parameters.
+ ($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => {
+ $(
+ impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*);
+ )*
+ };
+ // Implements only the methods; caller must invoke this from inside
+ // an impl block.
+ (@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
+ fn with_passing_test_cases<F: Fn(&Self)>(_f: F) {
+ $(
+ _f($success_case.borrow());
+ )*
+ }
+
+ fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {
+ $($(
+ // `unused_qualifications` is spuriously triggered on
+ // `Option::<Self>::None`.
+ #[allow(unused_qualifications)]
+ let case = $failure_case.as_bytes();
+ _f(case.as_bytes());
+ )*)?
+ }
+ };
+ }
+
+ // Note that these impls are only for types which are not `FromBytes`.
+ // `FromBytes` types are covered by a preceding blanket impl.
+ impl_try_from_bytes_testable!(
+ bool => @success true, false,
+ @failure 2u8, 3u8, 0xFFu8;
+ char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}',
+ @failure 0xD800u32, 0xDFFFu32, 0x110000u32;
+ str => @success "", "hello", "❤️🧡💛💚💙💜",
+ @failure [0, 159, 146, 150];
+ [u8] => @success [], [0, 1, 2];
+ NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32,
+ NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128,
+ NonZeroUsize, NonZeroIsize
+ => @success Self::new(1).unwrap(),
+ // Doing this instead of `0` ensures that we always satisfy
+ // the size and alignment requirements of `Self` (whereas
+ // `0` may be any integer type with a different size or
+ // alignment than some `NonZeroXxx` types).
+ @failure Option::<Self>::None;
+ [bool]
+ => @success [true, false], [false, true],
+ @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8];
+ );
+
+ // Asserts that `$ty` implements any `$trait` and doesn't implement any
+ // `!$trait`. Note that all `$trait`s must come before any `!$trait`s.
+ //
+ // For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success
+ // and failure cases for `TryFromBytes::is_bit_valid`.
+ macro_rules! assert_impls {
+ ($ty:ty: TryFromBytes) => {
+ <$ty as TryFromBytesTestable>::with_passing_test_cases(|val| {
+ let c = Ptr::from(val);
+ // SAFETY:
+ // - Since `val` is a normal reference, `c` is guranteed to
+ // be aligned, to point to a single allocation, and to
+ // have a size which doesn't overflow `isize`.
+ // - Since `val` is a valid `$ty`, `c`'s referent satisfies
+ // the bit validity constraints of `is_bit_valid`, which
+ // are a superset of the bit validity constraints of
+ // `$ty`.
+ let res = unsafe { <$ty as TryFromBytes>::is_bit_valid(c) };
+ assert!(res, "{}::is_bit_valid({:?}): got false, expected true", stringify!($ty), val);
+
+ // TODO(#5): In addition to testing `is_bit_valid`, test the
+ // methods built on top of it. This would both allow us to
+ // test their implementations and actually convert the bytes
+ // to `$ty`, giving Miri a chance to catch if this is
+ // unsound (ie, if our `is_bit_valid` impl is buggy).
+ //
+ // The following code was tried, but it doesn't work because
+ // a) some types are not `AsBytes` and, b) some types are
+ // not `Sized`.
+ //
+ // let r = <$ty as TryFromBytes>::try_from_ref(val.as_bytes()).unwrap();
+ // assert_eq!(r, &val);
+ // let r = <$ty as TryFromBytes>::try_from_mut(val.as_bytes_mut()).unwrap();
+ // assert_eq!(r, &mut val);
+ // let v = <$ty as TryFromBytes>::try_read_from(val.as_bytes()).unwrap();
+ // assert_eq!(v, val);
+ });
+ #[allow(clippy::as_conversions)]
+ <$ty as TryFromBytesTestable>::with_failing_test_cases(|c| {
+ let res = <$ty as TryFromBytes>::try_from_ref(c);
+ assert!(res.is_none(), "{}::is_bit_valid({:?}): got true, expected false", stringify!($ty), c);
+ });
+
+ #[allow(dead_code)]
+ const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); };
+ };
+ ($ty:ty: $trait:ident) => {
+ #[allow(dead_code)]
+ const _: () = { static_assertions::assert_impl_all!($ty: $trait); };
+ };
+ ($ty:ty: !$trait:ident) => {
+ #[allow(dead_code)]
+ const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); };
+ };
+ ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => {
+ $(
+ assert_impls!($ty: $trait);
+ )*
+
+ $(
+ assert_impls!($ty: !$negative_trait);
+ )*
+ };
+ }
+
+ // NOTE: The negative impl assertions here are not necessarily
+ // prescriptive. They merely serve as change detectors to make sure
+ // we're aware of what trait impls are getting added with a given
+ // change. Of course, some impls would be invalid (e.g., `bool:
+ // FromBytes`), and so this change detection is very important.
+
+ assert_impls!((): KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(u8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(i8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(u16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(i16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(u32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(i32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(u64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(i64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(u128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(i128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(usize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(isize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(f32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(f64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+
+ assert_impls!(bool: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
+ assert_impls!(char: KnownLayout, TryFromBytes, FromZeroes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(str: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
+
+ assert_impls!(NonZeroU8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
+ assert_impls!(NonZeroI8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
+ assert_impls!(NonZeroU16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroI16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroU32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroI32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroU64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroI64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroU128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroI128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroUsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroIsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
+
+ assert_impls!(Option<NonZeroU8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(Option<NonZeroI8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(Option<NonZeroU16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroI16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroU32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroI32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroU64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroI64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroU128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroI128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroUsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroIsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
+
+ // Implements none of the ZC traits.
+ struct NotZerocopy;
+
+ #[rustfmt::skip]
+ type FnManyArgs = fn(
+ NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
+ ) -> (NotZerocopy, NotZerocopy);
+
+ // Allowed, because we're not actually using this type for FFI.
+ #[allow(improper_ctypes_definitions)]
+ #[rustfmt::skip]
+ type ECFnManyArgs = extern "C" fn(
+ NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
+ ) -> (NotZerocopy, NotZerocopy);
+
+ #[cfg(feature = "alloc")]
+ assert_impls!(Option<Box<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<Box<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<&'static UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<&'static [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<&'static mut UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<&'static mut [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<NonNull<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<NonNull<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<FnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<extern "C" fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(Option<ECFnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+
+ assert_impls!(PhantomData<NotZerocopy>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(PhantomData<[u8]>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+
+ assert_impls!(ManuallyDrop<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
+ assert_impls!(ManuallyDrop<[u8]>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
+ assert_impls!(ManuallyDrop<NotZerocopy>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(ManuallyDrop<[NotZerocopy]>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+
+ assert_impls!(MaybeUninit<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, Unaligned, !AsBytes);
+ assert_impls!(MaybeUninit<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+
+ assert_impls!(Wrapping<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(Wrapping<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+
+ assert_impls!(Unalign<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
+ assert_impls!(Unalign<NotZerocopy>: Unaligned, !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes);
+
+ assert_impls!([u8]: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!([bool]: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
+ assert_impls!([NotZerocopy]: !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!([u8; 0]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
+ assert_impls!([NotZerocopy; 0]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!([u8; 1]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
+ assert_impls!([NotZerocopy; 1]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+
+ assert_impls!(*const NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(*mut NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(*const [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(*mut [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(*const dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(*mut dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+
+ #[cfg(feature = "simd")]
+ {
+ #[allow(unused_macros)]
+ macro_rules! test_simd_arch_mod {
+ ($arch:ident, $($typ:ident),*) => {
+ {
+ use core::arch::$arch::{$($typ),*};
+ use crate::*;
+ $( assert_impls!($typ: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); )*
+ }
+ };
+ }
+ #[cfg(target_arch = "x86")]
+ test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
+
+ #[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
+ test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i);
+
+ #[cfg(target_arch = "x86_64")]
+ test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
+
+ #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
+ test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i);
+
+ #[cfg(target_arch = "wasm32")]
+ test_simd_arch_mod!(wasm32, v128);
+
+ #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
+ test_simd_arch_mod!(
+ powerpc,
+ vector_bool_long,
+ vector_double,
+ vector_signed_long,
+ vector_unsigned_long
+ );
+
+ #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
+ test_simd_arch_mod!(
+ powerpc64,
+ vector_bool_long,
+ vector_double,
+ vector_signed_long,
+ vector_unsigned_long
+ );
+ #[cfg(target_arch = "aarch64")]
+ #[rustfmt::skip]
+ test_simd_arch_mod!(
+ aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
+ int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
+ int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
+ poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
+ poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
+ uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
+ uint64x1_t, uint64x2_t
+ );
+ #[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
+ #[rustfmt::skip]
+ test_simd_arch_mod!(arm, int8x4_t, uint8x4_t);
+ }
+ }
+}
+
+#[cfg(kani)]
+mod proofs {
+ use super::*;
+
+ impl kani::Arbitrary for DstLayout {
+ fn any() -> Self {
+ let align: NonZeroUsize = kani::any();
+ let size_info: SizeInfo = kani::any();
+
+ kani::assume(align.is_power_of_two());
+ kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN);
+
+ // For testing purposes, we most care about instantiations of
+ // `DstLayout` that can correspond to actual Rust types. We use
+ // `Layout` to verify that our `DstLayout` satisfies the validity
+ // conditions of Rust layouts.
+ kani::assume(
+ match size_info {
+ SizeInfo::Sized { _size } => Layout::from_size_align(_size, align.get()),
+ SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => {
+ // `SliceDst`` cannot encode an exact size, but we know
+ // it is at least `_offset` bytes.
+ Layout::from_size_align(_offset, align.get())
+ }
+ }
+ .is_ok(),
+ );
+
+ Self { align: align, size_info: size_info }
+ }
+ }
+
+ impl kani::Arbitrary for SizeInfo {
+ fn any() -> Self {
+ let is_sized: bool = kani::any();
+
+ match is_sized {
+ true => {
+ let size: usize = kani::any();
+
+ kani::assume(size <= isize::MAX as _);
+
+ SizeInfo::Sized { _size: size }
+ }
+ false => SizeInfo::SliceDst(kani::any()),
+ }
+ }
+ }
+
+ impl kani::Arbitrary for TrailingSliceLayout {
+ fn any() -> Self {
+ let elem_size: usize = kani::any();
+ let offset: usize = kani::any();
+
+ kani::assume(elem_size < isize::MAX as _);
+ kani::assume(offset < isize::MAX as _);
+
+ TrailingSliceLayout { _elem_size: elem_size, _offset: offset }
+ }
+ }
+
+ #[kani::proof]
+ fn prove_dst_layout_extend() {
+ use crate::util::{core_layout::padding_needed_for, max, min};
+
+ let base: DstLayout = kani::any();
+ let field: DstLayout = kani::any();
+ let packed: Option<NonZeroUsize> = kani::any();
+
+ if let Some(max_align) = packed {
+ kani::assume(max_align.is_power_of_two());
+ kani::assume(base.align <= max_align);
+ }
+
+ // The base can only be extended if it's sized.
+ kani::assume(matches!(base.size_info, SizeInfo::Sized { .. }));
+ let base_size = if let SizeInfo::Sized { _size: size } = base.size_info {
+ size
+ } else {
+ unreachable!();
+ };
+
+ // Under the above conditions, `DstLayout::extend` will not panic.
+ let composite = base.extend(field, packed);
+
+ // The field's alignment is clamped by `max_align` (i.e., the
+ // `packed` attribute, if any) [1].
+ //
+ // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
+ //
+ // The alignments of each field, for the purpose of positioning
+ // fields, is the smaller of the specified alignment and the
+ // alignment of the field's type.
+ let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN));
+
+ // The struct's alignment is the maximum of its previous alignment and
+ // `field_align`.
+ assert_eq!(composite.align, max(base.align, field_align));
+
+ // Compute the minimum amount of inter-field padding needed to
+ // satisfy the field's alignment, and offset of the trailing field.
+ // [1]
+ //
+ // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
+ //
+ // Inter-field padding is guaranteed to be the minimum required in
+ // order to satisfy each field's (possibly altered) alignment.
+ let padding = padding_needed_for(base_size, field_align);
+ let offset = base_size + padding;
+
+ // For testing purposes, we'll also construct `alloc::Layout`
+ // stand-ins for `DstLayout`, and show that `extend` behaves
+ // comparably on both types.
+ let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap();
+
+ match field.size_info {
+ SizeInfo::Sized { _size: field_size } => {
+ if let SizeInfo::Sized { _size: composite_size } = composite.size_info {
+ // If the trailing field is sized, the resulting layout
+ // will be sized. Its size will be the sum of the
+ // preceeding layout, the size of the new field, and the
+ // size of inter-field padding between the two.
+ assert_eq!(composite_size, offset + field_size);
+
+ let field_analog =
+ Layout::from_size_align(field_size, field_align.get()).unwrap();
+
+ if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
+ {
+ assert_eq!(actual_offset, offset);
+ assert_eq!(actual_composite.size(), composite_size);
+ assert_eq!(actual_composite.align(), composite.align.get());
+ } else {
+ // An error here reflects that composite of `base`
+ // and `field` cannot correspond to a real Rust type
+ // fragment, because such a fragment would violate
+ // the basic invariants of a valid Rust layout. At
+ // the time of writing, `DstLayout` is a little more
+ // permissive than `Layout`, so we don't assert
+ // anything in this branch (e.g., unreachability).
+ }
+ } else {
+ panic!("The composite of two sized layouts must be sized.")
+ }
+ }
+ SizeInfo::SliceDst(TrailingSliceLayout {
+ _offset: field_offset,
+ _elem_size: field_elem_size,
+ }) => {
+ if let SizeInfo::SliceDst(TrailingSliceLayout {
+ _offset: composite_offset,
+ _elem_size: composite_elem_size,
+ }) = composite.size_info
+ {
+ // The offset of the trailing slice component is the sum
+ // of the offset of the trailing field and the trailing
+ // slice offset within that field.
+ assert_eq!(composite_offset, offset + field_offset);
+ // The elem size is unchanged.
+ assert_eq!(composite_elem_size, field_elem_size);
+
+ let field_analog =
+ Layout::from_size_align(field_offset, field_align.get()).unwrap();
+
+ if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
+ {
+ assert_eq!(actual_offset, offset);
+ assert_eq!(actual_composite.size(), composite_offset);
+ assert_eq!(actual_composite.align(), composite.align.get());
+ } else {
+ // An error here reflects that composite of `base`
+ // and `field` cannot correspond to a real Rust type
+ // fragment, because such a fragment would violate
+ // the basic invariants of a valid Rust layout. At
+ // the time of writing, `DstLayout` is a little more
+ // permissive than `Layout`, so we don't assert
+ // anything in this branch (e.g., unreachability).
+ }
+ } else {
+ panic!("The extension of a layout with a DST must result in a DST.")
+ }
+ }
+ }
+ }
+
+ #[kani::proof]
+ #[kani::should_panic]
+ fn prove_dst_layout_extend_dst_panics() {
+ let base: DstLayout = kani::any();
+ let field: DstLayout = kani::any();
+ let packed: Option<NonZeroUsize> = kani::any();
+
+ if let Some(max_align) = packed {
+ kani::assume(max_align.is_power_of_two());
+ kani::assume(base.align <= max_align);
+ }
+
+ kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..)));
+
+ let _ = base.extend(field, packed);
+ }
+
+ #[kani::proof]
+ fn prove_dst_layout_pad_to_align() {
+ use crate::util::core_layout::padding_needed_for;
+
+ let layout: DstLayout = kani::any();
+
+ let padded: DstLayout = layout.pad_to_align();
+
+ // Calling `pad_to_align` does not alter the `DstLayout`'s alignment.
+ assert_eq!(padded.align, layout.align);
+
+ if let SizeInfo::Sized { _size: unpadded_size } = layout.size_info {
+ if let SizeInfo::Sized { _size: padded_size } = padded.size_info {
+ // If the layout is sized, it will remain sized after padding is
+ // added. Its sum will be its unpadded size and the size of the
+ // trailing padding needed to satisfy its alignment
+ // requirements.
+ let padding = padding_needed_for(unpadded_size, layout.align);
+ assert_eq!(padded_size, unpadded_size + padding);
+
+ // Prove that calling `DstLayout::pad_to_align` behaves
+ // identically to `Layout::pad_to_align`.
+ let layout_analog =
+ Layout::from_size_align(unpadded_size, layout.align.get()).unwrap();
+ let padded_analog = layout_analog.pad_to_align();
+ assert_eq!(padded_analog.align(), layout.align.get());
+ assert_eq!(padded_analog.size(), padded_size);
+ } else {
+ panic!("The padding of a sized layout must result in a sized layout.")
+ }
+ } else {
+ // If the layout is a DST, padding cannot be statically added.
+ assert_eq!(padded.size_info, layout.size_info);
+ }
+ }
+}
diff --git a/third_party/rust/zerocopy/src/macro_util.rs b/third_party/rust/zerocopy/src/macro_util.rs
new file mode 100644
index 0000000000..24fec4f015
--- /dev/null
+++ b/third_party/rust/zerocopy/src/macro_util.rs
@@ -0,0 +1,670 @@
+// Copyright 2022 The Fuchsia Authors
+//
+// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
+// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
+// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
+// This file may not be copied, modified, or distributed except according to
+// those terms.
+
+//! Utilities used by macros and by `zerocopy-derive`.
+//!
+//! These are defined here `zerocopy` rather than in code generated by macros or
+//! by `zerocopy-derive` so that they can be compiled once rather than
+//! recompiled for every invocation (e.g., if they were defined in generated
+//! code, then deriving `AsBytes` and `FromBytes` on three different types would
+//! result in the code in question being emitted and compiled six different
+//! times).
+
+#![allow(missing_debug_implementations)]
+
+use core::{marker::PhantomData, mem::ManuallyDrop};
+
+// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this
+// `cfg` when `size_of_val_raw` is stabilized.
+#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
+use core::ptr::{self, NonNull};
+
+/// A compile-time check that should be one particular value.
+pub trait ShouldBe<const VALUE: bool> {}
+
+/// A struct for checking whether `T` contains padding.
+pub struct HasPadding<T: ?Sized, const VALUE: bool>(PhantomData<T>);
+
+impl<T: ?Sized, const VALUE: bool> ShouldBe<VALUE> for HasPadding<T, VALUE> {}
+
+/// A type whose size is equal to `align_of::<T>()`.
+#[repr(C)]
+pub struct AlignOf<T> {
+ // This field ensures that:
+ // - The size is always at least 1 (the minimum possible alignment).
+ // - If the alignment is greater than 1, Rust has to round up to the next
+ // multiple of it in order to make sure that `Align`'s size is a multiple
+ // of that alignment. Without this field, its size could be 0, which is a
+ // valid multiple of any alignment.
+ _u: u8,
+ _a: [T; 0],
+}
+
+impl<T> AlignOf<T> {
+ #[inline(never)] // Make `missing_inline_in_public_items` happy.
+ pub fn into_t(self) -> T {
+ unreachable!()
+ }
+}
+
+/// A type whose size is equal to `max(align_of::<T>(), align_of::<U>())`.
+#[repr(C)]
+pub union MaxAlignsOf<T, U> {
+ _t: ManuallyDrop<AlignOf<T>>,
+ _u: ManuallyDrop<AlignOf<U>>,
+}
+
+impl<T, U> MaxAlignsOf<T, U> {
+ #[inline(never)] // Make `missing_inline_in_public_items` happy.
+ pub fn new(_t: T, _u: U) -> MaxAlignsOf<T, U> {
+ unreachable!()
+ }
+}
+
+const _64K: usize = 1 << 16;
+
+// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this
+// `cfg` when `size_of_val_raw` is stabilized.
+#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
+#[repr(C, align(65536))]
+struct Aligned64kAllocation([u8; _64K]);
+
+/// A pointer to an aligned allocation of size 2^16.
+///
+/// # Safety
+///
+/// `ALIGNED_64K_ALLOCATION` is guaranteed to point to the entirety of an
+/// allocation with size and alignment 2^16, and to have valid provenance.
+// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this
+// `cfg` when `size_of_val_raw` is stabilized.
+#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
+pub const ALIGNED_64K_ALLOCATION: NonNull<[u8]> = {
+ const REF: &Aligned64kAllocation = &Aligned64kAllocation([0; _64K]);
+ let ptr: *const Aligned64kAllocation = REF;
+ let ptr: *const [u8] = ptr::slice_from_raw_parts(ptr.cast(), _64K);
+ // SAFETY:
+ // - `ptr` is derived from a Rust reference, which is guaranteed to be
+ // non-null.
+ // - `ptr` is derived from an `&Aligned64kAllocation`, which has size and
+ // alignment `_64K` as promised. Its length is initialized to `_64K`,
+ // which means that it refers to the entire allocation.
+ // - `ptr` is derived from a Rust reference, which is guaranteed to have
+ // valid provenance.
+ //
+ // TODO(#429): Once `NonNull::new_unchecked` docs document that it preserves
+ // provenance, cite those docs.
+ // TODO: Replace this `as` with `ptr.cast_mut()` once our MSRV >= 1.65
+ #[allow(clippy::as_conversions)]
+ unsafe {
+ NonNull::new_unchecked(ptr as *mut _)
+ }
+};
+
+/// Computes the offset of the base of the field `$trailing_field_name` within
+/// the type `$ty`.
+///
+/// `trailing_field_offset!` produces code which is valid in a `const` context.
+// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this
+// `cfg` when `size_of_val_raw` is stabilized.
+#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
+#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`.
+#[macro_export]
+macro_rules! trailing_field_offset {
+ ($ty:ty, $trailing_field_name:tt) => {{
+ let min_size = {
+ let zero_elems: *const [()] =
+ $crate::macro_util::core_reexport::ptr::slice_from_raw_parts(
+ $crate::macro_util::core_reexport::ptr::NonNull::<()>::dangling()
+ .as_ptr()
+ .cast_const(),
+ 0,
+ );
+ // SAFETY:
+ // - If `$ty` is `Sized`, `size_of_val_raw` is always safe to call.
+ // - Otherwise:
+ // - If `$ty` is not a slice DST, this pointer conversion will
+ // fail due to "mismatched vtable kinds", and compilation will
+ // fail.
+ // - If `$ty` is a slice DST, the safety requirement is that "the
+ // length of the slice tail must be an initialized integer, and
+ // the size of the entire value (dynamic tail length +
+ // statically sized prefix) must fit in isize." The length is
+ // initialized to 0 above, and Rust guarantees that no type's
+ // minimum size may overflow `isize`. [1]
+ //
+ // [1] TODO(#429),
+ // TODO(https://github.com/rust-lang/unsafe-code-guidelines/issues/465#issuecomment-1782206516):
+ // Citation for this?
+ unsafe {
+ #[allow(clippy::as_conversions)]
+ $crate::macro_util::core_reexport::mem::size_of_val_raw(zero_elems as *const $ty)
+ }
+ };
+
+ assert!(min_size <= _64K);
+
+ #[allow(clippy::as_conversions)]
+ let ptr = ALIGNED_64K_ALLOCATION.as_ptr() as *const $ty;
+
+ // SAFETY:
+ // - Thanks to the preceding `assert!`, we know that the value with zero
+ // elements fits in `_64K` bytes, and thus in the allocation addressed
+ // by `ALIGNED_64K_ALLOCATION`. The offset of the trailing field is
+ // guaranteed to be no larger than this size, so this field projection
+ // is guaranteed to remain in-bounds of its allocation.
+ // - Because the minimum size is no larger than `_64K` bytes, and
+ // because an object's size must always be a multiple of its alignment
+ // [1], we know that `$ty`'s alignment is no larger than `_64K`. The
+ // allocation addressed by `ALIGNED_64K_ALLOCATION` is guaranteed to
+ // be aligned to `_64K`, so `ptr` is guaranteed to satisfy `$ty`'s
+ // alignment.
+ //
+ // Note that, as of [2], this requirement is technically unnecessary
+ // for Rust versions >= 1.75.0, but no harm in guaranteeing it anyway
+ // until we bump our MSRV.
+ //
+ // [1] Per https://doc.rust-lang.org/reference/type-layout.html:
+ //
+ // The size of a value is always a multiple of its alignment.
+ //
+ // [2] https://github.com/rust-lang/reference/pull/1387
+ let field = unsafe {
+ $crate::macro_util::core_reexport::ptr::addr_of!((*ptr).$trailing_field_name)
+ };
+ // SAFETY:
+ // - Both `ptr` and `field` are derived from the same allocated object.
+ // - By the preceding safety comment, `field` is in bounds of that
+ // allocated object.
+ // - The distance, in bytes, between `ptr` and `field` is required to be
+ // a multiple of the size of `u8`, which is trivially true because
+ // `u8`'s size is 1.
+ // - The distance, in bytes, cannot overflow `isize`. This is guaranteed
+ // because no allocated object can have a size larger than can fit in
+ // `isize`. [1]
+ // - The distance being in-bounds cannot rely on wrapping around the
+ // address space. This is guaranteed because the same is guaranteed of
+ // allocated objects. [1]
+ //
+ // [1] TODO(#429), TODO(https://github.com/rust-lang/rust/pull/116675):
+ // Once these are guaranteed in the Reference, cite it.
+ let offset = unsafe { field.cast::<u8>().offset_from(ptr.cast::<u8>()) };
+ // Guaranteed not to be lossy: `field` comes after `ptr`, so the offset
+ // from `ptr` to `field` is guaranteed to be positive.
+ assert!(offset >= 0);
+ Some(
+ #[allow(clippy::as_conversions)]
+ {
+ offset as usize
+ },
+ )
+ }};
+}
+
+/// Computes alignment of `$ty: ?Sized`.
+///
+/// `align_of!` produces code which is valid in a `const` context.
+// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this
+// `cfg` when `size_of_val_raw` is stabilized.
+#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
+#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`.
+#[macro_export]
+macro_rules! align_of {
+ ($ty:ty) => {{
+ // SAFETY: `OffsetOfTrailingIsAlignment` is `repr(C)`, and its layout is
+ // guaranteed [1] to begin with the single-byte layout for `_byte`,
+ // followed by the padding needed to align `_trailing`, then the layout
+ // for `_trailing`, and finally any trailing padding bytes needed to
+ // correctly-align the entire struct.
+ //
+ // This macro computes the alignment of `$ty` by counting the number of
+ // bytes preceeding `_trailing`. For instance, if the alignment of `$ty`
+ // is `1`, then no padding is required align `_trailing` and it will be
+ // located immediately after `_byte` at offset 1. If the alignment of
+ // `$ty` is 2, then a single padding byte is required before
+ // `_trailing`, and `_trailing` will be located at offset 2.
+
+ // This correspondence between offset and alignment holds for all valid
+ // Rust alignments, and we confirm this exhaustively (or, at least up to
+ // the maximum alignment supported by `trailing_field_offset!`) in
+ // `test_align_of_dst`.
+ //
+ // [1]: https://doc.rust-lang.org/nomicon/other-reprs.html#reprc
+
+ #[repr(C)]
+ struct OffsetOfTrailingIsAlignment {
+ _byte: u8,
+ _trailing: $ty,
+ }
+
+ trailing_field_offset!(OffsetOfTrailingIsAlignment, _trailing)
+ }};
+}
+
+/// Does the struct type `$t` have padding?
+///
+/// `$ts` is the list of the type of every field in `$t`. `$t` must be a
+/// struct type, or else `struct_has_padding!`'s result may be meaningless.
+///
+/// Note that `struct_has_padding!`'s results are independent of `repr` since
+/// they only consider the size of the type and the sizes of the fields.
+/// Whatever the repr, the size of the type already takes into account any
+/// padding that the compiler has decided to add. Structs with well-defined
+/// representations (such as `repr(C)`) can use this macro to check for padding.
+/// Note that while this may yield some consistent value for some `repr(Rust)`
+/// structs, it is not guaranteed across platforms or compilations.
+#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`.
+#[macro_export]
+macro_rules! struct_has_padding {
+ ($t:ty, $($ts:ty),*) => {
+ core::mem::size_of::<$t>() > 0 $(+ core::mem::size_of::<$ts>())*
+ };
+}
+
+/// Does the union type `$t` have padding?
+///
+/// `$ts` is the list of the type of every field in `$t`. `$t` must be a
+/// union type, or else `union_has_padding!`'s result may be meaningless.
+///
+/// Note that `union_has_padding!`'s results are independent of `repr` since
+/// they only consider the size of the type and the sizes of the fields.
+/// Whatever the repr, the size of the type already takes into account any
+/// padding that the compiler has decided to add. Unions with well-defined
+/// representations (such as `repr(C)`) can use this macro to check for padding.
+/// Note that while this may yield some consistent value for some `repr(Rust)`
+/// unions, it is not guaranteed across platforms or compilations.
+#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`.
+#[macro_export]
+macro_rules! union_has_padding {
+ ($t:ty, $($ts:ty),*) => {
+ false $(|| core::mem::size_of::<$t>() != core::mem::size_of::<$ts>())*
+ };
+}
+
+/// Does `t` have alignment greater than or equal to `u`? If not, this macro
+/// produces a compile error. It must be invoked in a dead codepath. This is
+/// used in `transmute_ref!` and `transmute_mut!`.
+#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`.
+#[macro_export]
+macro_rules! assert_align_gt_eq {
+ ($t:ident, $u: ident) => {{
+ // The comments here should be read in the context of this macro's
+ // invocations in `transmute_ref!` and `transmute_mut!`.
+ if false {
+ // The type wildcard in this bound is inferred to be `T` because
+ // `align_of.into_t()` is assigned to `t` (which has type `T`).
+ let align_of: $crate::macro_util::AlignOf<_> = unreachable!();
+ $t = align_of.into_t();
+ // `max_aligns` is inferred to have type `MaxAlignsOf<T, U>` because
+ // of the inferred types of `t` and `u`.
+ let mut max_aligns = $crate::macro_util::MaxAlignsOf::new($t, $u);
+
+ // This transmute will only compile successfully if
+ // `align_of::<T>() == max(align_of::<T>(), align_of::<U>())` - in
+ // other words, if `align_of::<T>() >= align_of::<U>()`.
+ //
+ // SAFETY: This code is never run.
+ max_aligns = unsafe { $crate::macro_util::core_reexport::mem::transmute(align_of) };
+ } else {
+ loop {}
+ }
+ }};
+}
+
+/// Do `t` and `u` have the same size? If not, this macro produces a compile
+/// error. It must be invoked in a dead codepath. This is used in
+/// `transmute_ref!` and `transmute_mut!`.
+#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`.
+#[macro_export]
+macro_rules! assert_size_eq {
+ ($t:ident, $u: ident) => {{
+ // The comments here should be read in the context of this macro's
+ // invocations in `transmute_ref!` and `transmute_mut!`.
+ if false {
+ // SAFETY: This code is never run.
+ $u = unsafe {
+ // Clippy: It's okay to transmute a type to itself.
+ #[allow(clippy::useless_transmute)]
+ $crate::macro_util::core_reexport::mem::transmute($t)
+ };
+ } else {
+ loop {}
+ }
+ }};
+}
+
+/// Transmutes a reference of one type to a reference of another type.
+///
+/// # Safety
+///
+/// The caller must guarantee that:
+/// - `Src: AsBytes`
+/// - `Dst: FromBytes`
+/// - `size_of::<Src>() == size_of::<Dst>()`
+/// - `align_of::<Src>() >= align_of::<Dst>()`
+#[inline(always)]
+pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>(
+ src: &'src Src,
+) -> &'dst Dst {
+ let src: *const Src = src;
+ let dst = src.cast::<Dst>();
+ // SAFETY:
+ // - We know that it is sound to view the target type of the input reference
+ // (`Src`) as the target type of the output reference (`Dst`) because the
+ // caller has guaranteed that `Src: AsBytes`, `Dst: FromBytes`, and
+ // `size_of::<Src>() == size_of::<Dst>()`.
+ // - We know that there are no `UnsafeCell`s, and thus we don't have to
+ // worry about `UnsafeCell` overlap, because `Src: AsBytes` and `Dst:
+ // FromBytes` both forbid `UnsafeCell`s.
+ // - The caller has guaranteed that alignment is not increased.
+ // - We know that the returned lifetime will not outlive the input lifetime
+ // thanks to the lifetime bounds on this function.
+ unsafe { &*dst }
+}
+
+/// Transmutes a mutable reference of one type to a mutable reference of another
+/// type.
+///
+/// # Safety
+///
+/// The caller must guarantee that:
+/// - `Src: FromBytes + AsBytes`
+/// - `Dst: FromBytes + AsBytes`
+/// - `size_of::<Src>() == size_of::<Dst>()`
+/// - `align_of::<Src>() >= align_of::<Dst>()`
+#[inline(always)]
+pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>(
+ src: &'src mut Src,
+) -> &'dst mut Dst {
+ let src: *mut Src = src;
+ let dst = src.cast::<Dst>();
+ // SAFETY:
+ // - We know that it is sound to view the target type of the input reference
+ // (`Src`) as the target type of the output reference (`Dst`) and
+ // vice-versa because the caller has guaranteed that `Src: FromBytes +
+ // AsBytes`, `Dst: FromBytes + AsBytes`, and `size_of::<Src>() ==
+ // size_of::<Dst>()`.
+ // - We know that there are no `UnsafeCell`s, and thus we don't have to
+ // worry about `UnsafeCell` overlap, because `Src: FromBytes + AsBytes`
+ // and `Dst: FromBytes + AsBytes` forbid `UnsafeCell`s.
+ // - The caller has guaranteed that alignment is not increased.
+ // - We know that the returned lifetime will not outlive the input lifetime
+ // thanks to the lifetime bounds on this function.
+ unsafe { &mut *dst }
+}
+
+// NOTE: We can't change this to a `pub use core as core_reexport` until [1] is
+// fixed or we update to a semver-breaking version (as of this writing, 0.8.0)
+// on the `main` branch.
+//
+// [1] https://github.com/obi1kenobi/cargo-semver-checks/issues/573
+pub mod core_reexport {
+ pub use core::*;
+
+ pub mod mem {
+ pub use core::mem::*;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use core::mem;
+
+ use super::*;
+ use crate::util::testutil::*;
+
+ #[test]
+ fn test_align_of() {
+ macro_rules! test {
+ ($ty:ty) => {
+ assert_eq!(mem::size_of::<AlignOf<$ty>>(), mem::align_of::<$ty>());
+ };
+ }
+
+ test!(());
+ test!(u8);
+ test!(AU64);
+ test!([AU64; 2]);
+ }
+
+ #[test]
+ fn test_max_aligns_of() {
+ macro_rules! test {
+ ($t:ty, $u:ty) => {
+ assert_eq!(
+ mem::size_of::<MaxAlignsOf<$t, $u>>(),
+ core::cmp::max(mem::align_of::<$t>(), mem::align_of::<$u>())
+ );
+ };
+ }
+
+ test!(u8, u8);
+ test!(u8, AU64);
+ test!(AU64, u8);
+ }
+
+ #[test]
+ fn test_typed_align_check() {
+ // Test that the type-based alignment check used in
+ // `assert_align_gt_eq!` behaves as expected.
+
+ macro_rules! assert_t_align_gteq_u_align {
+ ($t:ty, $u:ty, $gteq:expr) => {
+ assert_eq!(
+ mem::size_of::<MaxAlignsOf<$t, $u>>() == mem::size_of::<AlignOf<$t>>(),
+ $gteq
+ );
+ };
+ }
+
+ assert_t_align_gteq_u_align!(u8, u8, true);
+ assert_t_align_gteq_u_align!(AU64, AU64, true);
+ assert_t_align_gteq_u_align!(AU64, u8, true);
+ assert_t_align_gteq_u_align!(u8, AU64, false);
+ }
+
+ // TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove
+ // this `cfg` when `size_of_val_raw` is stabilized.
+ #[allow(clippy::decimal_literal_representation)]
+ #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
+ #[test]
+ fn test_trailing_field_offset() {
+ assert_eq!(mem::align_of::<Aligned64kAllocation>(), _64K);
+
+ macro_rules! test {
+ (#[$cfg:meta] ($($ts:ty),* ; $trailing_field_ty:ty) => $expect:expr) => {{
+ #[$cfg]
+ struct Test($($ts,)* $trailing_field_ty);
+ assert_eq!(test!(@offset $($ts),* ; $trailing_field_ty), $expect);
+ }};
+ (#[$cfg:meta] $(#[$cfgs:meta])* ($($ts:ty),* ; $trailing_field_ty:ty) => $expect:expr) => {
+ test!(#[$cfg] ($($ts),* ; $trailing_field_ty) => $expect);
+ test!($(#[$cfgs])* ($($ts),* ; $trailing_field_ty) => $expect);
+ };
+ (@offset ; $_trailing:ty) => { trailing_field_offset!(Test, 0) };
+ (@offset $_t:ty ; $_trailing:ty) => { trailing_field_offset!(Test, 1) };
+ }
+
+ test!(#[repr(C)] #[repr(transparent)] #[repr(packed)](; u8) => Some(0));
+ test!(#[repr(C)] #[repr(transparent)] #[repr(packed)](; [u8]) => Some(0));
+ test!(#[repr(C)] #[repr(packed)] (u8; u8) => Some(1));
+ test!(#[repr(C)] (; AU64) => Some(0));
+ test!(#[repr(C)] (; [AU64]) => Some(0));
+ test!(#[repr(C)] (u8; AU64) => Some(8));
+ test!(#[repr(C)] (u8; [AU64]) => Some(8));
+ test!(#[repr(C)] (; Nested<u8, AU64>) => Some(0));
+ test!(#[repr(C)] (; Nested<u8, [AU64]>) => Some(0));
+ test!(#[repr(C)] (u8; Nested<u8, AU64>) => Some(8));
+ test!(#[repr(C)] (u8; Nested<u8, [AU64]>) => Some(8));
+
+ // Test that `packed(N)` limits the offset of the trailing field.
+ test!(#[repr(C, packed( 1))] (u8; elain::Align< 2>) => Some( 1));
+ test!(#[repr(C, packed( 2))] (u8; elain::Align< 4>) => Some( 2));
+ test!(#[repr(C, packed( 4))] (u8; elain::Align< 8>) => Some( 4));
+ test!(#[repr(C, packed( 8))] (u8; elain::Align< 16>) => Some( 8));
+ test!(#[repr(C, packed( 16))] (u8; elain::Align< 32>) => Some( 16));
+ test!(#[repr(C, packed( 32))] (u8; elain::Align< 64>) => Some( 32));
+ test!(#[repr(C, packed( 64))] (u8; elain::Align< 128>) => Some( 64));
+ test!(#[repr(C, packed( 128))] (u8; elain::Align< 256>) => Some( 128));
+ test!(#[repr(C, packed( 256))] (u8; elain::Align< 512>) => Some( 256));
+ test!(#[repr(C, packed( 512))] (u8; elain::Align< 1024>) => Some( 512));
+ test!(#[repr(C, packed( 1024))] (u8; elain::Align< 2048>) => Some( 1024));
+ test!(#[repr(C, packed( 2048))] (u8; elain::Align< 4096>) => Some( 2048));
+ test!(#[repr(C, packed( 4096))] (u8; elain::Align< 8192>) => Some( 4096));
+ test!(#[repr(C, packed( 8192))] (u8; elain::Align< 16384>) => Some( 8192));
+ test!(#[repr(C, packed( 16384))] (u8; elain::Align< 32768>) => Some( 16384));
+ test!(#[repr(C, packed( 32768))] (u8; elain::Align< 65536>) => Some( 32768));
+ test!(#[repr(C, packed( 65536))] (u8; elain::Align< 131072>) => Some( 65536));
+ /* Alignments above 65536 are not yet supported.
+ test!(#[repr(C, packed( 131072))] (u8; elain::Align< 262144>) => Some( 131072));
+ test!(#[repr(C, packed( 262144))] (u8; elain::Align< 524288>) => Some( 262144));
+ test!(#[repr(C, packed( 524288))] (u8; elain::Align< 1048576>) => Some( 524288));
+ test!(#[repr(C, packed( 1048576))] (u8; elain::Align< 2097152>) => Some( 1048576));
+ test!(#[repr(C, packed( 2097152))] (u8; elain::Align< 4194304>) => Some( 2097152));
+ test!(#[repr(C, packed( 4194304))] (u8; elain::Align< 8388608>) => Some( 4194304));
+ test!(#[repr(C, packed( 8388608))] (u8; elain::Align< 16777216>) => Some( 8388608));
+ test!(#[repr(C, packed( 16777216))] (u8; elain::Align< 33554432>) => Some( 16777216));
+ test!(#[repr(C, packed( 33554432))] (u8; elain::Align< 67108864>) => Some( 33554432));
+ test!(#[repr(C, packed( 67108864))] (u8; elain::Align< 33554432>) => Some( 67108864));
+ test!(#[repr(C, packed( 33554432))] (u8; elain::Align<134217728>) => Some( 33554432));
+ test!(#[repr(C, packed(134217728))] (u8; elain::Align<268435456>) => Some(134217728));
+ test!(#[repr(C, packed(268435456))] (u8; elain::Align<268435456>) => Some(268435456));
+ */
+
+ // Test that `align(N)` does not limit the offset of the trailing field.
+ test!(#[repr(C, align( 1))] (u8; elain::Align< 2>) => Some( 2));
+ test!(#[repr(C, align( 2))] (u8; elain::Align< 4>) => Some( 4));
+ test!(#[repr(C, align( 4))] (u8; elain::Align< 8>) => Some( 8));
+ test!(#[repr(C, align( 8))] (u8; elain::Align< 16>) => Some( 16));
+ test!(#[repr(C, align( 16))] (u8; elain::Align< 32>) => Some( 32));
+ test!(#[repr(C, align( 32))] (u8; elain::Align< 64>) => Some( 64));
+ test!(#[repr(C, align( 64))] (u8; elain::Align< 128>) => Some( 128));
+ test!(#[repr(C, align( 128))] (u8; elain::Align< 256>) => Some( 256));
+ test!(#[repr(C, align( 256))] (u8; elain::Align< 512>) => Some( 512));
+ test!(#[repr(C, align( 512))] (u8; elain::Align< 1024>) => Some( 1024));
+ test!(#[repr(C, align( 1024))] (u8; elain::Align< 2048>) => Some( 2048));
+ test!(#[repr(C, align( 2048))] (u8; elain::Align< 4096>) => Some( 4096));
+ test!(#[repr(C, align( 4096))] (u8; elain::Align< 8192>) => Some( 8192));
+ test!(#[repr(C, align( 8192))] (u8; elain::Align< 16384>) => Some( 16384));
+ test!(#[repr(C, align( 16384))] (u8; elain::Align< 32768>) => Some( 32768));
+ test!(#[repr(C, align( 32768))] (u8; elain::Align< 65536>) => Some( 65536));
+ /* Alignments above 65536 are not yet supported.
+ test!(#[repr(C, align( 65536))] (u8; elain::Align< 131072>) => Some( 131072));
+ test!(#[repr(C, align( 131072))] (u8; elain::Align< 262144>) => Some( 262144));
+ test!(#[repr(C, align( 262144))] (u8; elain::Align< 524288>) => Some( 524288));
+ test!(#[repr(C, align( 524288))] (u8; elain::Align< 1048576>) => Some( 1048576));
+ test!(#[repr(C, align( 1048576))] (u8; elain::Align< 2097152>) => Some( 2097152));
+ test!(#[repr(C, align( 2097152))] (u8; elain::Align< 4194304>) => Some( 4194304));
+ test!(#[repr(C, align( 4194304))] (u8; elain::Align< 8388608>) => Some( 8388608));
+ test!(#[repr(C, align( 8388608))] (u8; elain::Align< 16777216>) => Some( 16777216));
+ test!(#[repr(C, align( 16777216))] (u8; elain::Align< 33554432>) => Some( 33554432));
+ test!(#[repr(C, align( 33554432))] (u8; elain::Align< 67108864>) => Some( 67108864));
+ test!(#[repr(C, align( 67108864))] (u8; elain::Align< 33554432>) => Some( 33554432));
+ test!(#[repr(C, align( 33554432))] (u8; elain::Align<134217728>) => Some(134217728));
+ test!(#[repr(C, align(134217728))] (u8; elain::Align<268435456>) => Some(268435456));
+ */
+ }
+
+ // TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove
+ // this `cfg` when `size_of_val_raw` is stabilized.
+ #[allow(clippy::decimal_literal_representation)]
+ #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
+ #[test]
+ fn test_align_of_dst() {
+ // Test that `align_of!` correctly computes the alignment of DSTs.
+ assert_eq!(align_of!([elain::Align<1>]), Some(1));
+ assert_eq!(align_of!([elain::Align<2>]), Some(2));
+ assert_eq!(align_of!([elain::Align<4>]), Some(4));
+ assert_eq!(align_of!([elain::Align<8>]), Some(8));
+ assert_eq!(align_of!([elain::Align<16>]), Some(16));
+ assert_eq!(align_of!([elain::Align<32>]), Some(32));
+ assert_eq!(align_of!([elain::Align<64>]), Some(64));
+ assert_eq!(align_of!([elain::Align<128>]), Some(128));
+ assert_eq!(align_of!([elain::Align<256>]), Some(256));
+ assert_eq!(align_of!([elain::Align<512>]), Some(512));
+ assert_eq!(align_of!([elain::Align<1024>]), Some(1024));
+ assert_eq!(align_of!([elain::Align<2048>]), Some(2048));
+ assert_eq!(align_of!([elain::Align<4096>]), Some(4096));
+ assert_eq!(align_of!([elain::Align<8192>]), Some(8192));
+ assert_eq!(align_of!([elain::Align<16384>]), Some(16384));
+ assert_eq!(align_of!([elain::Align<32768>]), Some(32768));
+ assert_eq!(align_of!([elain::Align<65536>]), Some(65536));
+ /* Alignments above 65536 are not yet supported.
+ assert_eq!(align_of!([elain::Align<131072>]), Some(131072));
+ assert_eq!(align_of!([elain::Align<262144>]), Some(262144));
+ assert_eq!(align_of!([elain::Align<524288>]), Some(524288));
+ assert_eq!(align_of!([elain::Align<1048576>]), Some(1048576));
+ assert_eq!(align_of!([elain::Align<2097152>]), Some(2097152));
+ assert_eq!(align_of!([elain::Align<4194304>]), Some(4194304));
+ assert_eq!(align_of!([elain::Align<8388608>]), Some(8388608));
+ assert_eq!(align_of!([elain::Align<16777216>]), Some(16777216));
+ assert_eq!(align_of!([elain::Align<33554432>]), Some(33554432));
+ assert_eq!(align_of!([elain::Align<67108864>]), Some(67108864));
+ assert_eq!(align_of!([elain::Align<33554432>]), Some(33554432));
+ assert_eq!(align_of!([elain::Align<134217728>]), Some(134217728));
+ assert_eq!(align_of!([elain::Align<268435456>]), Some(268435456));
+ */
+ }
+
+ #[test]
+ fn test_struct_has_padding() {
+ // Test that, for each provided repr, `struct_has_padding!` reports the
+ // expected value.
+ macro_rules! test {
+ (#[$cfg:meta] ($($ts:ty),*) => $expect:expr) => {{
+ #[$cfg]
+ struct Test($($ts),*);
+ assert_eq!(struct_has_padding!(Test, $($ts),*), $expect);
+ }};
+ (#[$cfg:meta] $(#[$cfgs:meta])* ($($ts:ty),*) => $expect:expr) => {
+ test!(#[$cfg] ($($ts),*) => $expect);
+ test!($(#[$cfgs])* ($($ts),*) => $expect);
+ };
+ }
+
+ test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] () => false);
+ test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] (u8) => false);
+ test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] (u8, ()) => false);
+ test!(#[repr(C)] #[repr(packed)] (u8, u8) => false);
+
+ test!(#[repr(C)] (u8, AU64) => true);
+ // Rust won't let you put `#[repr(packed)]` on a type which contains a
+ // `#[repr(align(n > 1))]` type (`AU64`), so we have to use `u64` here.
+ // It's not ideal, but it definitely has align > 1 on /some/ of our CI
+ // targets, and this isn't a particularly complex macro we're testing
+ // anyway.
+ test!(#[repr(packed)] (u8, u64) => false);
+ }
+
+ #[test]
+ fn test_union_has_padding() {
+ // Test that, for each provided repr, `union_has_padding!` reports the
+ // expected value.
+ macro_rules! test {
+ (#[$cfg:meta] {$($fs:ident: $ts:ty),*} => $expect:expr) => {{
+ #[$cfg]
+ #[allow(unused)] // fields are never read
+ union Test{ $($fs: $ts),* }
+ assert_eq!(union_has_padding!(Test, $($ts),*), $expect);
+ }};
+ (#[$cfg:meta] $(#[$cfgs:meta])* {$($fs:ident: $ts:ty),*} => $expect:expr) => {
+ test!(#[$cfg] {$($fs: $ts),*} => $expect);
+ test!($(#[$cfgs])* {$($fs: $ts),*} => $expect);
+ };
+ }
+
+ test!(#[repr(C)] #[repr(packed)] {a: u8} => false);
+ test!(#[repr(C)] #[repr(packed)] {a: u8, b: u8} => false);
+
+ // Rust won't let you put `#[repr(packed)]` on a type which contains a
+ // `#[repr(align(n > 1))]` type (`AU64`), so we have to use `u64` here.
+ // It's not ideal, but it definitely has align > 1 on /some/ of our CI
+ // targets, and this isn't a particularly complex macro we're testing
+ // anyway.
+ test!(#[repr(C)] #[repr(packed)] {a: u8, b: u64} => true);
+ }
+}
diff --git a/third_party/rust/zerocopy/src/macros.rs b/third_party/rust/zerocopy/src/macros.rs
new file mode 100644
index 0000000000..2da78af7df
--- /dev/null
+++ b/third_party/rust/zerocopy/src/macros.rs
@@ -0,0 +1,417 @@
+// Copyright 2023 The Fuchsia Authors
+//
+// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
+// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
+// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
+// This file may not be copied, modified, or distributed except according to
+// those terms.
+
+/// Documents multiple unsafe blocks with a single safety comment.
+///
+/// Invoked as:
+///
+/// ```rust,ignore
+/// safety_comment! {
+/// // Non-doc comments come first.
+/// /// SAFETY:
+/// /// Safety comment starts on its own line.
+/// macro_1!(args);
+/// macro_2! { args };
+/// /// SAFETY:
+/// /// Subsequent safety comments are allowed but not required.
+/// macro_3! { args };
+/// }
+/// ```
+///
+/// The macro invocations are emitted, each decorated with the following
+/// attribute: `#[allow(clippy::undocumented_unsafe_blocks)]`.
+macro_rules! safety_comment {
+ (#[doc = r" SAFETY:"] $($(#[$attr:meta])* $macro:ident!$args:tt;)*) => {
+ #[allow(clippy::undocumented_unsafe_blocks, unused_attributes)]
+ const _: () = { $($(#[$attr])* $macro!$args;)* };
+ }
+}
+
+/// Unsafely implements trait(s) for a type.
+///
+/// # Safety
+///
+/// The trait impl must be sound.
+///
+/// When implementing `TryFromBytes`:
+/// - If no `is_bit_valid` impl is provided, then it must be valid for
+/// `is_bit_valid` to unconditionally return `true`. In other words, it must
+/// be the case that any initialized sequence of bytes constitutes a valid
+/// instance of `$ty`.
+/// - If an `is_bit_valid` impl is provided, then:
+/// - Regardless of whether the provided closure takes a `Ptr<$repr>` or
+/// `&$repr` argument, it must be the case that, given `t: *mut $ty` and
+/// `let r = t as *mut $repr`, `r` refers to an object of equal or lesser
+/// size than the object referred to by `t`.
+/// - If the provided closure takes a `&$repr` argument, then given a `Ptr<'a,
+/// $ty>` which satisfies the preconditions of
+/// `TryFromBytes::<$ty>::is_bit_valid`, it must be guaranteed that the
+/// memory referenced by that `Ptr` always contains a valid `$repr`.
+/// - The alignment of `$repr` is less than or equal to the alignment of
+/// `$ty`.
+/// - The impl of `is_bit_valid` must only return `true` for its argument
+/// `Ptr<$repr>` if the original `Ptr<$ty>` refers to a valid `$ty`.
+macro_rules! unsafe_impl {
+ // Implement `$trait` for `$ty` with no bounds.
+ ($(#[$attr:meta])* $ty:ty: $trait:ident $(; |$candidate:ident: &$repr:ty| $is_bit_valid:expr)?) => {
+ $(#[$attr])*
+ unsafe impl $trait for $ty {
+ unsafe_impl!(@method $trait $(; |$candidate: &$repr| $is_bit_valid)?);
+ }
+ };
+ // Implement all `$traits` for `$ty` with no bounds.
+ ($ty:ty: $($traits:ident),*) => {
+ $( unsafe_impl!($ty: $traits); )*
+ };
+ // This arm is identical to the following one, except it contains a
+ // preceding `const`. If we attempt to handle these with a single arm, there
+ // is an inherent ambiguity between `const` (the keyword) and `const` (the
+ // ident match for `$tyvar:ident`).
+ //
+ // To explain how this works, consider the following invocation:
+ //
+ // unsafe_impl!(const N: usize, T: ?Sized + Copy => Clone for Foo<T>);
+ //
+ // In this invocation, here are the assignments to meta-variables:
+ //
+ // |---------------|------------|
+ // | Meta-variable | Assignment |
+ // |---------------|------------|
+ // | $constname | N |
+ // | $constty | usize |
+ // | $tyvar | T |
+ // | $optbound | Sized |
+ // | $bound | Copy |
+ // | $trait | Clone |
+ // | $ty | Foo<T> |
+ // |---------------|------------|
+ //
+ // The following arm has the same behavior with the exception of the lack of
+ // support for a leading `const` parameter.
+ (
+ $(#[$attr:meta])*
+ const $constname:ident : $constty:ident $(,)?
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty $(; |$candidate:ident $(: &$ref_repr:ty)? $(: Ptr<$ptr_repr:ty>)?| $is_bit_valid:expr)?
+ ) => {
+ unsafe_impl!(
+ @inner
+ $(#[$attr])*
+ @const $constname: $constty,
+ $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)*
+ => $trait for $ty $(; |$candidate $(: &$ref_repr)? $(: Ptr<$ptr_repr>)?| $is_bit_valid)?
+ );
+ };
+ (
+ $(#[$attr:meta])*
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty $(; |$candidate:ident $(: &$ref_repr:ty)? $(: Ptr<$ptr_repr:ty>)?| $is_bit_valid:expr)?
+ ) => {
+ unsafe_impl!(
+ @inner
+ $(#[$attr])*
+ $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)*
+ => $trait for $ty $(; |$candidate $(: &$ref_repr)? $(: Ptr<$ptr_repr>)?| $is_bit_valid)?
+ );
+ };
+ (
+ @inner
+ $(#[$attr:meta])*
+ $(@const $constname:ident : $constty:ident,)*
+ $($tyvar:ident $(: $(? $optbound:ident +)* + $($bound:ident +)* )?,)*
+ => $trait:ident for $ty:ty $(; |$candidate:ident $(: &$ref_repr:ty)? $(: Ptr<$ptr_repr:ty>)?| $is_bit_valid:expr)?
+ ) => {
+ $(#[$attr])*
+ unsafe impl<$(const $constname: $constty,)* $($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> $trait for $ty {
+ unsafe_impl!(@method $trait $(; |$candidate: $(&$ref_repr)? $(Ptr<$ptr_repr>)?| $is_bit_valid)?);
+ }
+ };
+
+ (@method TryFromBytes ; |$candidate:ident: &$repr:ty| $is_bit_valid:expr) => {
+ #[inline]
+ unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool {
+ // SAFETY:
+ // - The argument to `cast_unsized` is `|p| p as *mut _` as required
+ // by that method's safety precondition.
+ // - The caller has promised that the cast results in an object of
+ // equal or lesser size.
+ // - The caller has promised that `$repr`'s alignment is less than
+ // or equal to `Self`'s alignment.
+ #[allow(clippy::as_conversions)]
+ let candidate = unsafe { candidate.cast_unsized::<$repr, _>(|p| p as *mut _) };
+ // SAFETY:
+ // - The caller has promised that the referenced memory region will
+ // contain a valid `$repr` for `'a`.
+ // - The memory may not be referenced by any mutable references.
+ // This is a precondition of `is_bit_valid`.
+ // - The memory may not be mutated even via `UnsafeCell`s. This is a
+ // precondition of `is_bit_valid`.
+ // - There must not exist any references to the same memory region
+ // which contain `UnsafeCell`s at byte ranges which are not
+ // identical to the byte ranges at which `T` contains
+ // `UnsafeCell`s. This is a precondition of `is_bit_valid`.
+ let $candidate: &$repr = unsafe { candidate.as_ref() };
+ $is_bit_valid
+ }
+ };
+ (@method TryFromBytes ; |$candidate:ident: Ptr<$repr:ty>| $is_bit_valid:expr) => {
+ #[inline]
+ unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool {
+ // SAFETY:
+ // - The argument to `cast_unsized` is `|p| p as *mut _` as required
+ // by that method's safety precondition.
+ // - The caller has promised that the cast results in an object of
+ // equal or lesser size.
+ // - The caller has promised that `$repr`'s alignment is less than
+ // or equal to `Self`'s alignment.
+ #[allow(clippy::as_conversions)]
+ let $candidate = unsafe { candidate.cast_unsized::<$repr, _>(|p| p as *mut _) };
+ $is_bit_valid
+ }
+ };
+ (@method TryFromBytes) => { #[inline(always)] unsafe fn is_bit_valid(_: Ptr<'_, Self>) -> bool { true } };
+ (@method $trait:ident) => {
+ #[allow(clippy::missing_inline_in_public_items)]
+ fn only_derive_is_allowed_to_implement_this_trait() {}
+ };
+ (@method $trait:ident; |$_candidate:ident $(: &$_ref_repr:ty)? $(: NonNull<$_ptr_repr:ty>)?| $_is_bit_valid:expr) => {
+ compile_error!("Can't provide `is_bit_valid` impl for trait other than `TryFromBytes`");
+ };
+}
+
+/// Implements a trait for a type, bounding on each memeber of the power set of
+/// a set of type variables. This is useful for implementing traits for tuples
+/// or `fn` types.
+///
+/// The last argument is the name of a macro which will be called in every
+/// `impl` block, and is expected to expand to the name of the type for which to
+/// implement the trait.
+///
+/// For example, the invocation:
+/// ```ignore
+/// unsafe_impl_for_power_set!(A, B => Foo for type!(...))
+/// ```
+/// ...expands to:
+/// ```ignore
+/// unsafe impl Foo for type!() { ... }
+/// unsafe impl<B> Foo for type!(B) { ... }
+/// unsafe impl<A, B> Foo for type!(A, B) { ... }
+/// ```
+macro_rules! unsafe_impl_for_power_set {
+ ($first:ident $(, $rest:ident)* $(-> $ret:ident)? => $trait:ident for $macro:ident!(...)) => {
+ unsafe_impl_for_power_set!($($rest),* $(-> $ret)? => $trait for $macro!(...));
+ unsafe_impl_for_power_set!(@impl $first $(, $rest)* $(-> $ret)? => $trait for $macro!(...));
+ };
+ ($(-> $ret:ident)? => $trait:ident for $macro:ident!(...)) => {
+ unsafe_impl_for_power_set!(@impl $(-> $ret)? => $trait for $macro!(...));
+ };
+ (@impl $($vars:ident),* $(-> $ret:ident)? => $trait:ident for $macro:ident!(...)) => {
+ unsafe impl<$($vars,)* $($ret)?> $trait for $macro!($($vars),* $(-> $ret)?) {
+ #[allow(clippy::missing_inline_in_public_items)]
+ fn only_derive_is_allowed_to_implement_this_trait() {}
+ }
+ };
+}
+
+/// Expands to an `Option<extern "C" fn>` type with the given argument types and
+/// return type. Designed for use with `unsafe_impl_for_power_set`.
+macro_rules! opt_extern_c_fn {
+ ($($args:ident),* -> $ret:ident) => { Option<extern "C" fn($($args),*) -> $ret> };
+}
+
+/// Expands to a `Option<fn>` type with the given argument types and return
+/// type. Designed for use with `unsafe_impl_for_power_set`.
+macro_rules! opt_fn {
+ ($($args:ident),* -> $ret:ident) => { Option<fn($($args),*) -> $ret> };
+}
+
+/// Implements trait(s) for a type or verifies the given implementation by
+/// referencing an existing (derived) implementation.
+///
+/// This macro exists so that we can provide zerocopy-derive as an optional
+/// dependency and still get the benefit of using its derives to validate that
+/// our trait impls are sound.
+///
+/// When compiling without `--cfg 'feature = "derive"` and without `--cfg test`,
+/// `impl_or_verify!` emits the provided trait impl. When compiling with either
+/// of those cfgs, it is expected that the type in question is deriving the
+/// traits instead. In this case, `impl_or_verify!` emits code which validates
+/// that the given trait impl is at least as restrictive as the the impl emitted
+/// by the custom derive. This has the effect of confirming that the impl which
+/// is emitted when the `derive` feature is disabled is actually sound (on the
+/// assumption that the impl emitted by the custom derive is sound).
+///
+/// The caller is still required to provide a safety comment (e.g. using the
+/// `safety_comment!` macro) . The reason for this restriction is that, while
+/// `impl_or_verify!` can guarantee that the provided impl is sound when it is
+/// compiled with the appropriate cfgs, there is no way to guarantee that it is
+/// ever compiled with those cfgs. In particular, it would be possible to
+/// accidentally place an `impl_or_verify!` call in a context that is only ever
+/// compiled when the `derive` feature is disabled. If that were to happen,
+/// there would be nothing to prevent an unsound trait impl from being emitted.
+/// Requiring a safety comment reduces the likelihood of emitting an unsound
+/// impl in this case, and also provides useful documentation for readers of the
+/// code.
+///
+/// ## Example
+///
+/// ```rust,ignore
+/// // Note that these derives are gated by `feature = "derive"`
+/// #[cfg_attr(any(feature = "derive", test), derive(FromZeroes, FromBytes, AsBytes, Unaligned))]
+/// #[repr(transparent)]
+/// struct Wrapper<T>(T);
+///
+/// safety_comment! {
+/// /// SAFETY:
+/// /// `Wrapper<T>` is `repr(transparent)`, so it is sound to implement any
+/// /// zerocopy trait if `T` implements that trait.
+/// impl_or_verify!(T: FromZeroes => FromZeroes for Wrapper<T>);
+/// impl_or_verify!(T: FromBytes => FromBytes for Wrapper<T>);
+/// impl_or_verify!(T: AsBytes => AsBytes for Wrapper<T>);
+/// impl_or_verify!(T: Unaligned => Unaligned for Wrapper<T>);
+/// }
+/// ```
+macro_rules! impl_or_verify {
+ // The following two match arms follow the same pattern as their
+ // counterparts in `unsafe_impl!`; see the documentation on those arms for
+ // more details.
+ (
+ const $constname:ident : $constty:ident $(,)?
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty
+ ) => {
+ impl_or_verify!(@impl { unsafe_impl!(
+ const $constname: $constty, $($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty
+ ); });
+ impl_or_verify!(@verify $trait, {
+ impl<const $constname: $constty, $($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
+ });
+ };
+ (
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty
+ ) => {
+ impl_or_verify!(@impl { unsafe_impl!(
+ $($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty
+ ); });
+ impl_or_verify!(@verify $trait, {
+ impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
+ });
+ };
+ (
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty
+ ) => {
+ unsafe_impl!(
+ @inner
+ $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)*
+ => $trait for $ty
+ );
+ };
+ (@impl $impl_block:tt) => {
+ #[cfg(not(any(feature = "derive", test)))]
+ const _: () = { $impl_block };
+ };
+ (@verify $trait:ident, $impl_block:tt) => {
+ #[cfg(any(feature = "derive", test))]
+ const _: () = {
+ trait Subtrait: $trait {}
+ $impl_block
+ };
+ };
+}
+
+/// Implements `KnownLayout` for a sized type.
+macro_rules! impl_known_layout {
+ ($(const $constvar:ident : $constty:ty, $tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => {
+ $(impl_known_layout!(@inner const $constvar: $constty, $tyvar $(: ?$optbound)? => $ty);)*
+ };
+ ($($tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => {
+ $(impl_known_layout!(@inner , $tyvar $(: ?$optbound)? => $ty);)*
+ };
+ ($($ty:ty),*) => { $(impl_known_layout!(@inner , => $ty);)* };
+ (@inner $(const $constvar:ident : $constty:ty)? , $($tyvar:ident $(: ?$optbound:ident)?)? => $ty:ty) => {
+ const _: () = {
+ use core::ptr::NonNull;
+
+ // SAFETY: Delegates safety to `DstLayout::for_type`.
+ unsafe impl<$(const $constvar : $constty,)? $($tyvar $(: ?$optbound)?)?> KnownLayout for $ty {
+ #[allow(clippy::missing_inline_in_public_items)]
+ fn only_derive_is_allowed_to_implement_this_trait() where Self: Sized {}
+
+ const LAYOUT: DstLayout = DstLayout::for_type::<$ty>();
+
+ // SAFETY: `.cast` preserves address and provenance.
+ //
+ // TODO(#429): Add documentation to `.cast` that promises that
+ // it preserves provenance.
+ #[inline(always)]
+ fn raw_from_ptr_len(bytes: NonNull<u8>, _elems: usize) -> NonNull<Self> {
+ bytes.cast::<Self>()
+ }
+ }
+ };
+ };
+}
+
+/// Implements `KnownLayout` for a type in terms of the implementation of
+/// another type with the same representation.
+///
+/// # Safety
+///
+/// - `$ty` and `$repr` must have the same:
+/// - Fixed prefix size
+/// - Alignment
+/// - (For DSTs) trailing slice element size
+/// - It must be valid to perform an `as` cast from `*mut $repr` to `*mut $ty`,
+/// and this operation must preserve referent size (ie, `size_of_val_raw`).
+macro_rules! unsafe_impl_known_layout {
+ ($($tyvar:ident: ?Sized + KnownLayout =>)? #[repr($repr:ty)] $ty:ty) => {
+ const _: () = {
+ use core::ptr::NonNull;
+
+ unsafe impl<$($tyvar: ?Sized + KnownLayout)?> KnownLayout for $ty {
+ #[allow(clippy::missing_inline_in_public_items)]
+ fn only_derive_is_allowed_to_implement_this_trait() {}
+
+ const LAYOUT: DstLayout = <$repr as KnownLayout>::LAYOUT;
+
+ // SAFETY: All operations preserve address and provenance.
+ // Caller has promised that the `as` cast preserves size.
+ //
+ // TODO(#429): Add documentation to `NonNull::new_unchecked`
+ // that it preserves provenance.
+ #[inline(always)]
+ #[allow(unused_qualifications)] // for `core::ptr::NonNull`
+ fn raw_from_ptr_len(bytes: NonNull<u8>, elems: usize) -> NonNull<Self> {
+ #[allow(clippy::as_conversions)]
+ let ptr = <$repr>::raw_from_ptr_len(bytes, elems).as_ptr() as *mut Self;
+ // SAFETY: `ptr` was converted from `bytes`, which is non-null.
+ unsafe { NonNull::new_unchecked(ptr) }
+ }
+ }
+ };
+ };
+}
+
+/// Uses `align_of` to confirm that a type or set of types have alignment 1.
+///
+/// Note that `align_of<T>` requires `T: Sized`, so this macro doesn't work for
+/// unsized types.
+macro_rules! assert_unaligned {
+ ($ty:ty) => {
+ // We only compile this assertion under `cfg(test)` to avoid taking an
+ // extra non-dev dependency (and making this crate more expensive to
+ // compile for our dependents).
+ #[cfg(test)]
+ static_assertions::const_assert_eq!(core::mem::align_of::<$ty>(), 1);
+ };
+ ($($ty:ty),*) => {
+ $(assert_unaligned!($ty);)*
+ };
+}
diff --git a/third_party/rust/zerocopy/src/post_monomorphization_compile_fail_tests.rs b/third_party/rust/zerocopy/src/post_monomorphization_compile_fail_tests.rs
new file mode 100644
index 0000000000..32505b6693
--- /dev/null
+++ b/third_party/rust/zerocopy/src/post_monomorphization_compile_fail_tests.rs
@@ -0,0 +1,118 @@
+// Copyright 2018 The Fuchsia Authors
+//
+// Licensed under the 2-Clause BSD License <LICENSE-BSD or
+// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
+// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
+// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
+// This file may not be copied, modified, or distributed except according to
+// those terms.
+
+//! Code that should fail to compile during the post-monomorphization compiler
+//! pass.
+//!
+//! Due to [a limitation with the `trybuild` crate][trybuild-issue], we cannot
+//! use our UI testing framework to test compilation failures that are
+//! encountered after monomorphization has complated. This module has one item
+//! for each such test we would prefer to have as a UI test, with the code in
+//! question appearing as a rustdoc example which is marked with `compile_fail`.
+//! This has the effect of causing doctests to fail if any of these examples
+//! compile successfully.
+//!
+//! This is very much a hack and not a complete replacement for UI tests - most
+//! notably because this only provides a single "compile vs fail" bit of
+//! information, but does not allow us to depend upon the specific error that
+//! causes compilation to fail.
+//!
+//! [trybuild-issue]: https://github.com/dtolnay/trybuild/issues/241
+
+// Miri doesn't detect post-monimorphization failures as compile-time failures,
+// but instead as runtime failures.
+#![cfg(not(miri))]
+
+/// ```compile_fail
+/// use core::cell::{Ref, RefCell};
+///
+/// let refcell = RefCell::new([0u8, 1, 2, 3]);
+/// let core_ref = refcell.borrow();
+/// let core_ref = Ref::map(core_ref, |bytes| &bytes[..]);
+///
+/// // `zc_ref` now stores `core_ref` internally.
+/// let zc_ref = zerocopy::Ref::<_, u32>::new(core_ref).unwrap();
+///
+/// // This causes `core_ref` to get dropped and synthesizes a Rust
+/// // reference to the memory `core_ref` was pointing at.
+/// let rust_ref = zc_ref.into_ref();
+///
+/// // UB!!! This mutates `rust_ref`'s referent while it's alive.
+/// *refcell.borrow_mut() = [0, 0, 0, 0];
+///
+/// println!("{}", rust_ref);
+/// ```
+#[allow(unused)]
+const REFCELL_REF_INTO_REF: () = ();
+
+/// ```compile_fail
+/// use core::cell::{RefCell, RefMut};
+///
+/// let refcell = RefCell::new([0u8, 1, 2, 3]);
+/// let core_ref_mut = refcell.borrow_mut();
+/// let core_ref_mut = RefMut::map(core_ref_mut, |bytes| &mut bytes[..]);
+///
+/// // `zc_ref` now stores `core_ref_mut` internally.
+/// let zc_ref = zerocopy::Ref::<_, u32>::new(core_ref_mut).unwrap();
+///
+/// // This causes `core_ref_mut` to get dropped and synthesizes a Rust
+/// // reference to the memory `core_ref` was pointing at.
+/// let rust_ref_mut = zc_ref.into_mut();
+///
+/// // UB!!! This mutates `rust_ref_mut`'s referent while it's alive.
+/// *refcell.borrow_mut() = [0, 0, 0, 0];
+///
+/// println!("{}", rust_ref_mut);
+/// ```
+#[allow(unused)]
+const REFCELL_REFMUT_INTO_MUT: () = ();
+
+/// ```compile_fail
+/// use core::cell::{Ref, RefCell};
+///
+/// let refcell = RefCell::new([0u8, 1, 2, 3]);
+/// let core_ref = refcell.borrow();
+/// let core_ref = Ref::map(core_ref, |bytes| &bytes[..]);
+///
+/// // `zc_ref` now stores `core_ref` internally.
+/// let zc_ref = zerocopy::Ref::<_, [u16]>::new_slice(core_ref).unwrap();
+///
+/// // This causes `core_ref` to get dropped and synthesizes a Rust
+/// // reference to the memory `core_ref` was pointing at.
+/// let rust_ref = zc_ref.into_slice();
+///
+/// // UB!!! This mutates `rust_ref`'s referent while it's alive.
+/// *refcell.borrow_mut() = [0, 0, 0, 0];
+///
+/// println!("{:?}", rust_ref);
+/// ```
+#[allow(unused)]
+const REFCELL_REFMUT_INTO_SLICE: () = ();
+
+/// ```compile_fail
+/// use core::cell::{RefCell, RefMut};
+///
+/// let refcell = RefCell::new([0u8, 1, 2, 3]);
+/// let core_ref_mut = refcell.borrow_mut();
+/// let core_ref_mut = RefMut::map(core_ref_mut, |bytes| &mut bytes[..]);
+///
+/// // `zc_ref` now stores `core_ref_mut` internally.
+/// let zc_ref = zerocopy::Ref::<_, [u16]>::new_slice(core_ref_mut).unwrap();
+///
+/// // This causes `core_ref_mut` to get dropped and synthesizes a Rust
+/// // reference to the memory `core_ref` was pointing at.
+/// let rust_ref_mut = zc_ref.into_mut_slice();
+///
+/// // UB!!! This mutates `rust_ref_mut`'s referent while it's alive.
+/// *refcell.borrow_mut() = [0, 0, 0, 0];
+///
+/// println!("{:?}", rust_ref_mut);
+/// ```
+#[allow(unused)]
+const REFCELL_REFMUT_INTO_MUT_SLICE: () = ();
diff --git a/third_party/rust/zerocopy/src/third_party/rust/LICENSE-APACHE b/third_party/rust/zerocopy/src/third_party/rust/LICENSE-APACHE
new file mode 100644
index 0000000000..1b5ec8b78e
--- /dev/null
+++ b/third_party/rust/zerocopy/src/third_party/rust/LICENSE-APACHE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/third_party/rust/zerocopy/src/third_party/rust/LICENSE-MIT b/third_party/rust/zerocopy/src/third_party/rust/LICENSE-MIT
new file mode 100644
index 0000000000..31aa79387f
--- /dev/null
+++ b/third_party/rust/zerocopy/src/third_party/rust/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/zerocopy/src/third_party/rust/README.fuchsia b/third_party/rust/zerocopy/src/third_party/rust/README.fuchsia
new file mode 100644
index 0000000000..e0a23dd8e5
--- /dev/null
+++ b/third_party/rust/zerocopy/src/third_party/rust/README.fuchsia
@@ -0,0 +1,7 @@
+Name: rust
+License File: LICENSE-APACHE
+License File: LICENSE-MIT
+Description:
+
+See https://github.com/google/zerocopy/pull/492 for an explanation of why this
+file exists.
diff --git a/third_party/rust/zerocopy/src/third_party/rust/layout.rs b/third_party/rust/zerocopy/src/third_party/rust/layout.rs
new file mode 100644
index 0000000000..19ef7c6982
--- /dev/null
+++ b/third_party/rust/zerocopy/src/third_party/rust/layout.rs
@@ -0,0 +1,45 @@
+use core::num::NonZeroUsize;
+
+/// Returns the amount of padding we must insert after `len` bytes to ensure
+/// that the following address will satisfy `align` (measured in bytes).
+///
+/// e.g., if `len` is 9, then `padding_needed_for(len, 4)` returns 3, because
+/// that is the minimum number of bytes of padding required to get a 4-aligned
+/// address (assuming that the corresponding memory block starts at a 4-aligned
+/// address).
+///
+/// The return value of this function has no meaning if `align` is not a
+/// power-of-two.
+///
+/// # Panics
+///
+/// May panic if `align` is not a power of two.
+//
+// TODO(#419): Replace `len` with a witness type for region size.
+#[allow(unused)]
+#[inline(always)]
+pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize {
+ // Rounded up value is:
+ // len_rounded_up = (len + align - 1) & !(align - 1);
+ // and then we return the padding difference: `len_rounded_up - len`.
+ //
+ // We use modular arithmetic throughout:
+ //
+ // 1. align is guaranteed to be > 0, so align - 1 is always
+ // valid.
+ //
+ // 2. `len + align - 1` can overflow by at most `align - 1`,
+ // so the &-mask with `!(align - 1)` will ensure that in the
+ // case of overflow, `len_rounded_up` will itself be 0.
+ // Thus the returned padding, when added to `len`, yields 0,
+ // which trivially satisfies the alignment `align`.
+ //
+ // (Of course, attempts to allocate blocks of memory whose
+ // size and padding overflow in the above manner should cause
+ // the allocator to yield an error anyway.)
+
+ let align = align.get();
+ debug_assert!(align.is_power_of_two());
+ let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
+ len_rounded_up.wrapping_sub(len)
+}
diff --git a/third_party/rust/zerocopy/src/util.rs b/third_party/rust/zerocopy/src/util.rs
new file mode 100644
index 0000000000..b35cc079c1
--- /dev/null
+++ b/third_party/rust/zerocopy/src/util.rs
@@ -0,0 +1,808 @@
+// Copyright 2023 The Fuchsia Authors
+//
+// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
+// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
+// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
+// This file may not be copied, modified, or distributed except according to
+// those terms.
+
+#[path = "third_party/rust/layout.rs"]
+pub(crate) mod core_layout;
+
+use core::{mem, num::NonZeroUsize};
+
+pub(crate) mod ptr {
+ use core::{
+ fmt::{Debug, Formatter},
+ marker::PhantomData,
+ ptr::NonNull,
+ };
+
+ use crate::{util::AsAddress, KnownLayout, _CastType};
+
+ /// A raw pointer with more restrictions.
+ ///
+ /// `Ptr<T>` is similar to `NonNull<T>`, but it is more restrictive in the
+ /// following ways:
+ /// - It must derive from a valid allocation
+ /// - It must reference a byte range which is contained inside the
+ /// allocation from which it derives
+ /// - As a consequence, the byte range it references must have a size
+ /// which does not overflow `isize`
+ /// - It must satisfy `T`'s alignment requirement
+ ///
+ /// Thanks to these restrictions, it is easier to prove the soundness of
+ /// some operations using `Ptr`s.
+ ///
+ /// `Ptr<'a, T>` is [covariant] in `'a` and `T`.
+ ///
+ /// [covariant]: https://doc.rust-lang.org/reference/subtyping.html
+ pub struct Ptr<'a, T: 'a + ?Sized> {
+ // INVARIANTS:
+ // 1. `ptr` is derived from some valid Rust allocation, `A`
+ // 2. `ptr` has the same provenance as `A`
+ // 3. `ptr` addresses a byte range which is entirely contained in `A`
+ // 4. `ptr` addresses a byte range whose length fits in an `isize`
+ // 5. `ptr` addresses a byte range which does not wrap around the address
+ // space
+ // 6. `ptr` is validly-aligned for `T`
+ // 7. `A` is guaranteed to live for at least `'a`
+ // 8. `T: 'a`
+ ptr: NonNull<T>,
+ _lifetime: PhantomData<&'a ()>,
+ }
+
+ impl<'a, T: ?Sized> Copy for Ptr<'a, T> {}
+ impl<'a, T: ?Sized> Clone for Ptr<'a, T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ impl<'a, T: ?Sized> Ptr<'a, T> {
+ /// Returns a shared reference to the value.
+ ///
+ /// # Safety
+ ///
+ /// For the duration of `'a`:
+ /// - The referenced memory must contain a validly-initialized `T` for
+ /// the duration of `'a`.
+ /// - The referenced memory must not also be referenced by any mutable
+ /// references.
+ /// - The referenced memory must not be mutated, even via an
+ /// [`UnsafeCell`].
+ /// - There must not exist any references to the same memory region
+ /// which contain `UnsafeCell`s at byte ranges which are not identical
+ /// to the byte ranges at which `T` contains `UnsafeCell`s.
+ ///
+ /// [`UnsafeCell`]: core::cell::UnsafeCell
+ // TODO(#429): The safety requirements are likely overly-restrictive.
+ // Notably, mutation via `UnsafeCell`s is probably fine. Once the rules
+ // are more clearly defined, we should relax the safety requirements.
+ // For an example of why this is subtle, see:
+ // https://github.com/rust-lang/unsafe-code-guidelines/issues/463#issuecomment-1736771593
+ #[allow(unused)]
+ pub(crate) unsafe fn as_ref(&self) -> &'a T {
+ // SAFETY:
+ // - By invariant, `self.ptr` is properly-aligned for `T`.
+ // - By invariant, `self.ptr` is "dereferenceable" in that it points
+ // to a single allocation.
+ // - By invariant, the allocation is live for `'a`.
+ // - The caller promises that no mutable references exist to this
+ // region during `'a`.
+ // - The caller promises that `UnsafeCell`s match exactly.
+ // - The caller promises that no mutation will happen during `'a`,
+ // even via `UnsafeCell`s.
+ // - The caller promises that the memory region contains a
+ // validly-intialized `T`.
+ unsafe { self.ptr.as_ref() }
+ }
+
+ /// Casts to a different (unsized) target type.
+ ///
+ /// # Safety
+ ///
+ /// The caller promises that
+ /// - `cast(p)` is implemented exactly as follows: `|p: *mut T| p as
+ /// *mut U`.
+ /// - The size of the object referenced by the resulting pointer is less
+ /// than or equal to the size of the object referenced by `self`.
+ /// - The alignment of `U` is less than or equal to the alignment of
+ /// `T`.
+ pub(crate) unsafe fn cast_unsized<U: 'a + ?Sized, F: FnOnce(*mut T) -> *mut U>(
+ self,
+ cast: F,
+ ) -> Ptr<'a, U> {
+ let ptr = cast(self.ptr.as_ptr());
+ // SAFETY: Caller promises that `cast` is just an `as` cast. We call
+ // `cast` on `self.ptr.as_ptr()`, which is non-null by construction.
+ let ptr = unsafe { NonNull::new_unchecked(ptr) };
+ // SAFETY:
+ // - By invariant, `self.ptr` is derived from some valid Rust
+ // allocation, and since `ptr` is just `self.ptr as *mut U`, so is
+ // `ptr`.
+ // - By invariant, `self.ptr` has the same provenance as `A`, and so
+ // the same is true of `ptr`.
+ // - By invariant, `self.ptr` addresses a byte range which is
+ // entirely contained in `A`, and so the same is true of `ptr`.
+ // - By invariant, `self.ptr` addresses a byte range whose length
+ // fits in an `isize`, and so the same is true of `ptr`.
+ // - By invariant, `self.ptr` addresses a byte range which does not
+ // wrap around the address space, and so the same is true of
+ // `ptr`.
+ // - By invariant, `self.ptr` is validly-aligned for `T`. Since
+ // `ptr` has the same address, and since the caller promises that
+ // the alignment of `U` is less than or equal to the alignment of
+ // `T`, `ptr` is validly-aligned for `U`.
+ // - By invariant, `A` is guaranteed to live for at least `'a`.
+ // - `U: 'a`
+ Ptr { ptr, _lifetime: PhantomData }
+ }
+ }
+
+ impl<'a> Ptr<'a, [u8]> {
+ /// Attempts to cast `self` to a `U` using the given cast type.
+ ///
+ /// Returns `None` if the resulting `U` would be invalidly-aligned or if
+ /// no `U` can fit in `self`. On success, returns a pointer to the
+ /// largest-possible `U` which fits in `self`.
+ ///
+ /// # Safety
+ ///
+ /// The caller may assume that this implementation is correct, and may
+ /// rely on that assumption for the soundness of their code. In
+ /// particular, the caller may assume that, if `try_cast_into` returns
+ /// `Some((ptr, split_at))`, then:
+ /// - If this is a prefix cast, `ptr` refers to the byte range `[0,
+ /// split_at)` in `self`.
+ /// - If this is a suffix cast, `ptr` refers to the byte range
+ /// `[split_at, self.len())` in `self`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `U` is a DST whose trailing slice element is zero-sized.
+ pub(crate) fn try_cast_into<U: 'a + ?Sized + KnownLayout>(
+ &self,
+ cast_type: _CastType,
+ ) -> Option<(Ptr<'a, U>, usize)> {
+ // PANICS: By invariant, the byte range addressed by `self.ptr` does
+ // not wrap around the address space. This implies that the sum of
+ // the address (represented as a `usize`) and length do not overflow
+ // `usize`, as required by `validate_cast_and_convert_metadata`.
+ // Thus, this call to `validate_cast_and_convert_metadata` won't
+ // panic.
+ let (elems, split_at) = U::LAYOUT.validate_cast_and_convert_metadata(
+ AsAddress::addr(self.ptr.as_ptr()),
+ self.len(),
+ cast_type,
+ )?;
+ let offset = match cast_type {
+ _CastType::_Prefix => 0,
+ _CastType::_Suffix => split_at,
+ };
+
+ let ptr = self.ptr.cast::<u8>().as_ptr();
+ // SAFETY: `offset` is either `0` or `split_at`.
+ // `validate_cast_and_convert_metadata` promises that `split_at` is
+ // in the range `[0, self.len()]`. Thus, in both cases, `offset` is
+ // in `[0, self.len()]`. Thus:
+ // - The resulting pointer is in or one byte past the end of the
+ // same byte range as `self.ptr`. Since, by invariant, `self.ptr`
+ // addresses a byte range entirely contained within a single
+ // allocation, the pointer resulting from this operation is within
+ // or one byte past the end of that same allocation.
+ // - By invariant, `self.len() <= isize::MAX`. Since `offset <=
+ // self.len()`, `offset <= isize::MAX`.
+ // - By invariant, `self.ptr` addresses a byte range which does not
+ // wrap around the address space. This means that the base pointer
+ // plus the `self.len()` does not overflow `usize`. Since `offset
+ // <= self.len()`, this addition does not overflow `usize`.
+ let base = unsafe { ptr.add(offset) };
+ // SAFETY: Since `add` is not allowed to wrap around, the preceding line
+ // produces a pointer whose address is greater than or equal to that of
+ // `ptr`. Since `ptr` is a `NonNull`, `base` is also non-null.
+ let base = unsafe { NonNull::new_unchecked(base) };
+ let ptr = U::raw_from_ptr_len(base, elems);
+ // SAFETY:
+ // - By invariant, `self.ptr` is derived from some valid Rust
+ // allocation, `A`, and has the same provenance as `A`. All
+ // operations performed on `self.ptr` and values derived from it
+ // in this method preserve provenance, so:
+ // - `ptr` is derived from a valid Rust allocation, `A`.
+ // - `ptr` has the same provenance as `A`.
+ // - `validate_cast_and_convert_metadata` promises that the object
+ // described by `elems` and `split_at` lives at a byte range which
+ // is a subset of the input byte range. Thus:
+ // - Since, by invariant, `self.ptr` addresses a byte range
+ // entirely contained in `A`, so does `ptr`.
+ // - Since, by invariant, `self.ptr` addresses a range whose
+ // length is not longer than `isize::MAX` bytes, so does `ptr`.
+ // - Since, by invariant, `self.ptr` addresses a range which does
+ // not wrap around the address space, so does `ptr`.
+ // - `validate_cast_and_convert_metadata` promises that the object
+ // described by `split_at` is validly-aligned for `U`.
+ // - By invariant on `self`, `A` is guaranteed to live for at least
+ // `'a`.
+ // - `U: 'a` by trait bound.
+ Some((Ptr { ptr, _lifetime: PhantomData }, split_at))
+ }
+
+ /// Attempts to cast `self` into a `U`, failing if all of the bytes of
+ /// `self` cannot be treated as a `U`.
+ ///
+ /// In particular, this method fails if `self` is not validly-aligned
+ /// for `U` or if `self`'s size is not a valid size for `U`.
+ ///
+ /// # Safety
+ ///
+ /// On success, the caller may assume that the returned pointer
+ /// references the same byte range as `self`.
+ #[allow(unused)]
+ #[inline(always)]
+ pub(crate) fn try_cast_into_no_leftover<U: 'a + ?Sized + KnownLayout>(
+ &self,
+ ) -> Option<Ptr<'a, U>> {
+ // TODO(#67): Remove this allow. See NonNulSlicelExt for more
+ // details.
+ #[allow(unstable_name_collisions)]
+ match self.try_cast_into(_CastType::_Prefix) {
+ Some((slf, split_at)) if split_at == self.len() => Some(slf),
+ Some(_) | None => None,
+ }
+ }
+ }
+
+ impl<'a, T> Ptr<'a, [T]> {
+ /// The number of slice elements referenced by `self`.
+ ///
+ /// # Safety
+ ///
+ /// Unsafe code my rely on `len` satisfying the above contract.
+ fn len(&self) -> usize {
+ #[allow(clippy::as_conversions)]
+ let slc = self.ptr.as_ptr() as *const [()];
+ // SAFETY:
+ // - `()` has alignment 1, so `slc` is trivially aligned.
+ // - `slc` was derived from a non-null pointer.
+ // - The size is 0 regardless of the length, so it is sound to
+ // materialize a reference regardless of location.
+ // - By invariant, `self.ptr` has valid provenance.
+ let slc = unsafe { &*slc };
+ // This is correct because the preceding `as` cast preserves the
+ // number of slice elements. Per
+ // https://doc.rust-lang.org/nightly/reference/expressions/operator-expr.html#slice-dst-pointer-to-pointer-cast:
+ //
+ // For slice types like `[T]` and `[U]`, the raw pointer types
+ // `*const [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode
+ // the number of elements in this slice. Casts between these raw
+ // pointer types preserve the number of elements. Note that, as a
+ // consequence, such casts do *not* necessarily preserve the size
+ // of the pointer's referent (e.g., casting `*const [u16]` to
+ // `*const [u8]` will result in a raw pointer which refers to an
+ // object of half the size of the original). The same holds for
+ // `str` and any compound type whose unsized tail is a slice type,
+ // such as struct `Foo(i32, [u8])` or `(u64, Foo)`.
+ //
+ // TODO(#429),
+ // TODO(https://github.com/rust-lang/reference/pull/1417): Once this
+ // text is available on the Stable docs, cite those instead of the
+ // Nightly docs.
+ slc.len()
+ }
+
+ pub(crate) fn iter(&self) -> impl Iterator<Item = Ptr<'a, T>> {
+ // TODO(#429): Once `NonNull::cast` documents that it preserves
+ // provenance, cite those docs.
+ let base = self.ptr.cast::<T>().as_ptr();
+ (0..self.len()).map(move |i| {
+ // TODO(https://github.com/rust-lang/rust/issues/74265): Use
+ // `NonNull::get_unchecked_mut`.
+
+ // SAFETY: If the following conditions are not satisfied
+ // `pointer::cast` may induce Undefined Behavior [1]:
+ // > 1. Both the starting and resulting pointer must be either
+ // > in bounds or one byte past the end of the same allocated
+ // > object.
+ // > 2. The computed offset, in bytes, cannot overflow an
+ // > `isize`.
+ // > 3. The offset being in bounds cannot rely on “wrapping
+ // > around” the address space. That is, the
+ // > infinite-precision sum must fit in a `usize`.
+ //
+ // [1] https://doc.rust-lang.org/std/primitive.pointer.html#method.add
+ //
+ // We satisfy all three of these conditions here:
+ // 1. `base` (by invariant on `self`) points to an allocated
+ // object. By contract, `self.len()` accurately reflects the
+ // number of elements in the slice. `i` is in bounds of
+ // `c.len()` by construction, and so the result of this
+ // addition cannot overflow past the end of the allocation
+ // referred to by `c`.
+ // 2. By invariant on `Ptr`, `self` addresses a byte range whose
+ // length fits in an `isize`. Since `elem` is contained in
+ // `self`, the computed offset of `elem` must fit within
+ // `isize.`
+ // 3. By invariant on `Ptr`, `self` addresses a byte range which
+ // does not wrap around the address space. Since `elem` is
+ // contained in `self`, the computed offset of `elem` must
+ // wrap around the address space.
+ //
+ // TODO(#429): Once `pointer::add` documents that it preserves
+ // provenance, cite those docs.
+ let elem = unsafe { base.add(i) };
+
+ // SAFETY:
+ // - `elem` must not be null. `base` is constructed from a
+ // `NonNull` pointer, and the addition that produces `elem`
+ // must not overflow or wrap around, so `elem >= base > 0`.
+ //
+ // TODO(#429): Once `NonNull::new_unchecked` documents that it
+ // preserves provenance, cite those docs.
+ let elem = unsafe { NonNull::new_unchecked(elem) };
+
+ // SAFETY: The safety invariants of `Ptr` (see definition) are
+ // satisfied:
+ // 1. `elem` is derived from a valid Rust allocation, because
+ // `self` is derived from a valid Rust allocation, by
+ // invariant on `Ptr`
+ // 2. `elem` has the same provenance as `self`, because it
+ // derived from `self` using a series of
+ // provenance-preserving operations
+ // 3. `elem` is entirely contained in the allocation of `self`
+ // (see above)
+ // 4. `elem` addresses a byte range whose length fits in an
+ // `isize` (see above)
+ // 5. `elem` addresses a byte range which does not wrap around
+ // the address space (see above)
+ // 6. `elem` is validly-aligned for `T`. `self`, which
+ // represents a `[T]` is validly aligned for `T`, and `elem`
+ // is an element within that `[T]`
+ // 7. The allocation of `elem` is guaranteed to live for at
+ // least `'a`, because `elem` is entirely contained in
+ // `self`, which lives for at least `'a` by invariant on
+ // `Ptr`.
+ // 8. `T: 'a`, because `elem` is an element within `[T]`, and
+ // `[T]: 'a` by invariant on `Ptr`
+ Ptr { ptr: elem, _lifetime: PhantomData }
+ })
+ }
+ }
+
+ impl<'a, T: 'a + ?Sized> From<&'a T> for Ptr<'a, T> {
+ #[inline(always)]
+ fn from(t: &'a T) -> Ptr<'a, T> {
+ // SAFETY: `t` points to a valid Rust allocation, `A`, by
+ // construction. Thus:
+ // - `ptr` is derived from `A`
+ // - Since we use `NonNull::from`, which preserves provenance, `ptr`
+ // has the same provenance as `A`
+ // - Since `NonNull::from` creates a pointer which addresses the
+ // same bytes as `t`, `ptr` addresses a byte range entirely
+ // contained in (in this case, identical to) `A`
+ // - Since `t: &T`, it addresses no more than `isize::MAX` bytes [1]
+ // - Since `t: &T`, it addresses a byte range which does not wrap
+ // around the address space [2]
+ // - Since it is constructed from a valid `&T`, `ptr` is
+ // validly-aligned for `T`
+ // - Since `t: &'a T`, the allocation `A` is guaranteed to live for
+ // at least `'a`
+ // - `T: 'a` by trait bound
+ //
+ // TODO(#429),
+ // TODO(https://github.com/rust-lang/rust/issues/116181): Once it's
+ // documented, reference the guarantee that `NonNull::from`
+ // preserves provenance.
+ //
+ // TODO(#429),
+ // TODO(https://github.com/rust-lang/unsafe-code-guidelines/issues/465):
+ // - [1] Where does the reference document that allocations fit in
+ // `isize`?
+ // - [2] Where does the reference document that allocations don't
+ // wrap around the address space?
+ Ptr { ptr: NonNull::from(t), _lifetime: PhantomData }
+ }
+ }
+
+ impl<'a, T: 'a + ?Sized> Debug for Ptr<'a, T> {
+ #[inline]
+ fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
+ self.ptr.fmt(f)
+ }
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use core::mem::{self, MaybeUninit};
+
+ use super::*;
+ use crate::{util::testutil::AU64, FromBytes};
+
+ #[test]
+ fn test_ptrtry_cast_into_soundness() {
+ // This test is designed so that if `Ptr::try_cast_into_xxx` are
+ // buggy, it will manifest as unsoundness that Miri can detect.
+
+ // - If `size_of::<T>() == 0`, `N == 4`
+ // - Else, `N == 4 * size_of::<T>()`
+ fn test<const N: usize, T: ?Sized + KnownLayout + FromBytes>() {
+ let mut bytes = [MaybeUninit::<u8>::uninit(); N];
+ let initialized = [MaybeUninit::new(0u8); N];
+ for start in 0..=bytes.len() {
+ for end in start..=bytes.len() {
+ // Set all bytes to uninitialized other than those in
+ // the range we're going to pass to `try_cast_from`.
+ // This allows Miri to detect out-of-bounds reads
+ // because they read uninitialized memory. Without this,
+ // some out-of-bounds reads would still be in-bounds of
+ // `bytes`, and so might spuriously be accepted.
+ bytes = [MaybeUninit::<u8>::uninit(); N];
+ let bytes = &mut bytes[start..end];
+ // Initialize only the byte range we're going to pass to
+ // `try_cast_from`.
+ bytes.copy_from_slice(&initialized[start..end]);
+
+ let bytes = {
+ let bytes: *const [MaybeUninit<u8>] = bytes;
+ #[allow(clippy::as_conversions)]
+ let bytes = bytes as *const [u8];
+ // SAFETY: We just initialized these bytes to valid
+ // `u8`s.
+ unsafe { &*bytes }
+ };
+
+ /// # Safety
+ ///
+ /// - `slf` must reference a byte range which is
+ /// entirely initialized.
+ /// - `slf` must reference a byte range which is only
+ /// referenced by shared references which do not
+ /// contain `UnsafeCell`s during its lifetime.
+ unsafe fn validate_and_get_len<T: ?Sized + KnownLayout + FromBytes>(
+ slf: Ptr<'_, T>,
+ ) -> usize {
+ // SAFETY:
+ // - Since all bytes in `slf` are initialized and
+ // `T: FromBytes`, `slf` contains a valid `T`.
+ // - The caller promises that the referenced memory
+ // is not also referenced by any mutable
+ // references.
+ // - The caller promises that the referenced memory
+ // is not also referenced as a type which contains
+ // `UnsafeCell`s.
+ let t = unsafe { slf.as_ref() };
+
+ let bytes = {
+ let len = mem::size_of_val(t);
+ let t: *const T = t;
+ // SAFETY:
+ // - We know `t`'s bytes are all initialized
+ // because we just read it from `slf`, which
+ // points to an initialized range of bytes. If
+ // there's a bug and this doesn't hold, then
+ // that's exactly what we're hoping Miri will
+ // catch!
+ // - Since `T: FromBytes`, `T` doesn't contain
+ // any `UnsafeCell`s, so it's okay for `t: T`
+ // and a `&[u8]` to the same memory to be
+ // alive concurrently.
+ unsafe { core::slice::from_raw_parts(t.cast::<u8>(), len) }
+ };
+
+ // This assertion ensures that `t`'s bytes are read
+ // and compared to another value, which in turn
+ // ensures that Miri gets a chance to notice if any
+ // of `t`'s bytes are uninitialized, which they
+ // shouldn't be (see the comment above).
+ assert_eq!(bytes, vec![0u8; bytes.len()]);
+
+ mem::size_of_val(t)
+ }
+
+ for cast_type in [_CastType::_Prefix, _CastType::_Suffix] {
+ if let Some((slf, split_at)) =
+ Ptr::from(bytes).try_cast_into::<T>(cast_type)
+ {
+ // SAFETY: All bytes in `bytes` have been
+ // initialized.
+ let len = unsafe { validate_and_get_len(slf) };
+ match cast_type {
+ _CastType::_Prefix => assert_eq!(split_at, len),
+ _CastType::_Suffix => assert_eq!(split_at, bytes.len() - len),
+ }
+ }
+ }
+
+ if let Some(slf) = Ptr::from(bytes).try_cast_into_no_leftover::<T>() {
+ // SAFETY: All bytes in `bytes` have been
+ // initialized.
+ let len = unsafe { validate_and_get_len(slf) };
+ assert_eq!(len, bytes.len());
+ }
+ }
+ }
+ }
+
+ macro_rules! test {
+ ($($ty:ty),*) => {
+ $({
+ const S: usize = core::mem::size_of::<$ty>();
+ const N: usize = if S == 0 { 4 } else { S * 4 };
+ test::<N, $ty>();
+ // We don't support casting into DSTs whose trailing slice
+ // element is a ZST.
+ if S > 0 {
+ test::<N, [$ty]>();
+ }
+ // TODO: Test with a slice DST once we have any that
+ // implement `KnownLayout + FromBytes`.
+ })*
+ };
+ }
+
+ test!(());
+ test!(u8, u16, u32, u64, u128, usize, AU64);
+ test!(i8, i16, i32, i64, i128, isize);
+ test!(f32, f64);
+ }
+ }
+}
+
+pub(crate) trait AsAddress {
+ fn addr(self) -> usize;
+}
+
+impl<'a, T: ?Sized> AsAddress for &'a T {
+ #[inline(always)]
+ fn addr(self) -> usize {
+ let ptr: *const T = self;
+ AsAddress::addr(ptr)
+ }
+}
+
+impl<'a, T: ?Sized> AsAddress for &'a mut T {
+ #[inline(always)]
+ fn addr(self) -> usize {
+ let ptr: *const T = self;
+ AsAddress::addr(ptr)
+ }
+}
+
+impl<T: ?Sized> AsAddress for *const T {
+ #[inline(always)]
+ fn addr(self) -> usize {
+ // TODO(#181), TODO(https://github.com/rust-lang/rust/issues/95228): Use
+ // `.addr()` instead of `as usize` once it's stable, and get rid of this
+ // `allow`. Currently, `as usize` is the only way to accomplish this.
+ #[allow(clippy::as_conversions)]
+ #[cfg_attr(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS, allow(lossy_provenance_casts))]
+ return self.cast::<()>() as usize;
+ }
+}
+
+impl<T: ?Sized> AsAddress for *mut T {
+ #[inline(always)]
+ fn addr(self) -> usize {
+ let ptr: *const T = self;
+ AsAddress::addr(ptr)
+ }
+}
+
+/// Is `t` aligned to `mem::align_of::<U>()`?
+#[inline(always)]
+pub(crate) fn aligned_to<T: AsAddress, U>(t: T) -> bool {
+ // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in
+ // turn guarantees that this mod operation will not panic.
+ #[allow(clippy::arithmetic_side_effects)]
+ let remainder = t.addr() % mem::align_of::<U>();
+ remainder == 0
+}
+
+/// Round `n` down to the largest value `m` such that `m <= n` and `m % align ==
+/// 0`.
+///
+/// # Panics
+///
+/// May panic if `align` is not a power of two. Even if it doesn't panic in this
+/// case, it will produce nonsense results.
+#[inline(always)]
+pub(crate) const fn round_down_to_next_multiple_of_alignment(
+ n: usize,
+ align: NonZeroUsize,
+) -> usize {
+ let align = align.get();
+ debug_assert!(align.is_power_of_two());
+
+ // Subtraction can't underflow because `align.get() >= 1`.
+ #[allow(clippy::arithmetic_side_effects)]
+ let mask = !(align - 1);
+ n & mask
+}
+
+pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
+ if a.get() < b.get() {
+ b
+ } else {
+ a
+ }
+}
+
+pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
+ if a.get() > b.get() {
+ b
+ } else {
+ a
+ }
+}
+
+/// Since we support multiple versions of Rust, there are often features which
+/// have been stabilized in the most recent stable release which do not yet
+/// exist (stably) on our MSRV. This module provides polyfills for those
+/// features so that we can write more "modern" code, and just remove the
+/// polyfill once our MSRV supports the corresponding feature. Without this,
+/// we'd have to write worse/more verbose code and leave TODO comments sprinkled
+/// throughout the codebase to update to the new pattern once it's stabilized.
+///
+/// Each trait is imported as `_` at the crate root; each polyfill should "just
+/// work" at usage sites.
+pub(crate) mod polyfills {
+ use core::ptr::{self, NonNull};
+
+ // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our
+ // MSRV is 1.70, when that function was stabilized.
+ //
+ // TODO(#67): Once our MSRV is 1.70, remove this.
+ pub(crate) trait NonNullExt<T> {
+ fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>;
+ }
+
+ impl<T> NonNullExt<T> for NonNull<T> {
+ #[inline(always)]
+ fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> {
+ let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len);
+ // SAFETY: `ptr` is converted from `data`, which is non-null.
+ unsafe { NonNull::new_unchecked(ptr) }
+ }
+ }
+}
+
+#[cfg(test)]
+pub(crate) mod testutil {
+ use core::fmt::{self, Display, Formatter};
+
+ use crate::*;
+
+ /// A `T` which is aligned to at least `align_of::<A>()`.
+ #[derive(Default)]
+ pub(crate) struct Align<T, A> {
+ pub(crate) t: T,
+ _a: [A; 0],
+ }
+
+ impl<T: Default, A> Align<T, A> {
+ pub(crate) fn set_default(&mut self) {
+ self.t = T::default();
+ }
+ }
+
+ impl<T, A> Align<T, A> {
+ pub(crate) const fn new(t: T) -> Align<T, A> {
+ Align { t, _a: [] }
+ }
+ }
+
+ // A `u64` with alignment 8.
+ //
+ // Though `u64` has alignment 8 on some platforms, it's not guaranteed.
+ // By contrast, `AU64` is guaranteed to have alignment 8.
+ #[derive(
+ KnownLayout,
+ FromZeroes,
+ FromBytes,
+ AsBytes,
+ Eq,
+ PartialEq,
+ Ord,
+ PartialOrd,
+ Default,
+ Debug,
+ Copy,
+ Clone,
+ )]
+ #[repr(C, align(8))]
+ pub(crate) struct AU64(pub(crate) u64);
+
+ impl AU64 {
+ // Converts this `AU64` to bytes using this platform's endianness.
+ pub(crate) fn to_bytes(self) -> [u8; 8] {
+ crate::transmute!(self)
+ }
+ }
+
+ impl Display for AU64 {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Display::fmt(&self.0, f)
+ }
+ }
+
+ #[derive(
+ FromZeroes, FromBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone,
+ )]
+ #[repr(C)]
+ pub(crate) struct Nested<T, U: ?Sized> {
+ _t: T,
+ _u: U,
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_round_down_to_next_multiple_of_alignment() {
+ fn alt_impl(n: usize, align: NonZeroUsize) -> usize {
+ let mul = n / align.get();
+ mul * align.get()
+ }
+
+ for align in [1, 2, 4, 8, 16] {
+ for n in 0..256 {
+ let align = NonZeroUsize::new(align).unwrap();
+ let want = alt_impl(n, align);
+ let got = round_down_to_next_multiple_of_alignment(n, align);
+ assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({n}, {align})");
+ }
+ }
+ }
+}
+
+#[cfg(kani)]
+mod proofs {
+ use super::*;
+
+ #[kani::proof]
+ fn prove_round_down_to_next_multiple_of_alignment() {
+ fn model_impl(n: usize, align: NonZeroUsize) -> usize {
+ assert!(align.get().is_power_of_two());
+ let mul = n / align.get();
+ mul * align.get()
+ }
+
+ let align: NonZeroUsize = kani::any();
+ kani::assume(align.get().is_power_of_two());
+ let n: usize = kani::any();
+
+ let expected = model_impl(n, align);
+ let actual = round_down_to_next_multiple_of_alignment(n, align);
+ assert_eq!(expected, actual, "round_down_to_next_multiple_of_alignment({n}, {align})");
+ }
+
+ // Restricted to nightly since we use the unstable `usize::next_multiple_of`
+ // in our model implementation.
+ #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
+ #[kani::proof]
+ fn prove_padding_needed_for() {
+ fn model_impl(len: usize, align: NonZeroUsize) -> usize {
+ let padded = len.next_multiple_of(align.get());
+ let padding = padded - len;
+ padding
+ }
+
+ let align: NonZeroUsize = kani::any();
+ kani::assume(align.get().is_power_of_two());
+ let len: usize = kani::any();
+ // Constrain `len` to valid Rust lengths, since our model implementation
+ // isn't robust to overflow.
+ kani::assume(len <= isize::MAX as usize);
+ kani::assume(align.get() < 1 << 29);
+
+ let expected = model_impl(len, align);
+ let actual = core_layout::padding_needed_for(len, align);
+ assert_eq!(expected, actual, "padding_needed_for({len}, {align})");
+
+ let padded_len = actual + len;
+ assert_eq!(padded_len % align, 0);
+ assert!(padded_len / align >= len / align);
+ }
+}
diff --git a/third_party/rust/zerocopy/src/wrappers.rs b/third_party/rust/zerocopy/src/wrappers.rs
new file mode 100644
index 0000000000..532d872978
--- /dev/null
+++ b/third_party/rust/zerocopy/src/wrappers.rs
@@ -0,0 +1,503 @@
+// Copyright 2023 The Fuchsia Authors
+//
+// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
+// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
+// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
+// This file may not be copied, modified, or distributed except according to
+// those terms.
+
+use core::{
+ cmp::Ordering,
+ fmt::{self, Debug, Display, Formatter},
+ hash::Hash,
+ mem::{self, ManuallyDrop},
+ ops::{Deref, DerefMut},
+ ptr,
+};
+
+use super::*;
+
+/// A type with no alignment requirement.
+///
+/// An `Unalign` wraps a `T`, removing any alignment requirement. `Unalign<T>`
+/// has the same size and bit validity as `T`, but not necessarily the same
+/// alignment [or ABI]. This is useful if a type with an alignment requirement
+/// needs to be read from a chunk of memory which provides no alignment
+/// guarantees.
+///
+/// Since `Unalign` has no alignment requirement, the inner `T` may not be
+/// properly aligned in memory. There are five ways to access the inner `T`:
+/// - by value, using [`get`] or [`into_inner`]
+/// - by reference inside of a callback, using [`update`]
+/// - fallibly by reference, using [`try_deref`] or [`try_deref_mut`]; these can
+/// fail if the `Unalign` does not satisfy `T`'s alignment requirement at
+/// runtime
+/// - unsafely by reference, using [`deref_unchecked`] or
+/// [`deref_mut_unchecked`]; it is the caller's responsibility to ensure that
+/// the `Unalign` satisfies `T`'s alignment requirement
+/// - (where `T: Unaligned`) infallibly by reference, using [`Deref::deref`] or
+/// [`DerefMut::deref_mut`]
+///
+/// [or ABI]: https://github.com/google/zerocopy/issues/164
+/// [`get`]: Unalign::get
+/// [`into_inner`]: Unalign::into_inner
+/// [`update`]: Unalign::update
+/// [`try_deref`]: Unalign::try_deref
+/// [`try_deref_mut`]: Unalign::try_deref_mut
+/// [`deref_unchecked`]: Unalign::deref_unchecked
+/// [`deref_mut_unchecked`]: Unalign::deref_mut_unchecked
+// NOTE: This type is sound to use with types that need to be dropped. The
+// reason is that the compiler-generated drop code automatically moves all
+// values to aligned memory slots before dropping them in-place. This is not
+// well-documented, but it's hinted at in places like [1] and [2]. However, this
+// also means that `T` must be `Sized`; unless something changes, we can never
+// support unsized `T`. [3]
+//
+// [1] https://github.com/rust-lang/rust/issues/54148#issuecomment-420529646
+// [2] https://github.com/google/zerocopy/pull/126#discussion_r1018512323
+// [3] https://github.com/google/zerocopy/issues/209
+#[allow(missing_debug_implementations)]
+#[derive(Default, Copy)]
+#[cfg_attr(
+ any(feature = "derive", test),
+ derive(KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned)
+)]
+#[repr(C, packed)]
+pub struct Unalign<T>(T);
+
+#[cfg(not(any(feature = "derive", test)))]
+impl_known_layout!(T => Unalign<T>);
+
+safety_comment! {
+ /// SAFETY:
+ /// - `Unalign<T>` is `repr(packed)`, so it is unaligned regardless of the
+ /// alignment of `T`, and so we don't require that `T: Unaligned`
+ /// - `Unalign<T>` has the same bit validity as `T`, and so it is
+ /// `FromZeroes`, `FromBytes`, or `AsBytes` exactly when `T` is as well.
+ impl_or_verify!(T => Unaligned for Unalign<T>);
+ impl_or_verify!(T: FromZeroes => FromZeroes for Unalign<T>);
+ impl_or_verify!(T: FromBytes => FromBytes for Unalign<T>);
+ impl_or_verify!(T: AsBytes => AsBytes for Unalign<T>);
+}
+
+// Note that `Unalign: Clone` only if `T: Copy`. Since the inner `T` may not be
+// aligned, there's no way to safely call `T::clone`, and so a `T: Clone` bound
+// is not sufficient to implement `Clone` for `Unalign`.
+impl<T: Copy> Clone for Unalign<T> {
+ #[inline(always)]
+ fn clone(&self) -> Unalign<T> {
+ *self
+ }
+}
+
+impl<T> Unalign<T> {
+ /// Constructs a new `Unalign`.
+ #[inline(always)]
+ pub const fn new(val: T) -> Unalign<T> {
+ Unalign(val)
+ }
+
+ /// Consumes `self`, returning the inner `T`.
+ #[inline(always)]
+ pub const fn into_inner(self) -> T {
+ // Use this instead of `mem::transmute` since the latter can't tell
+ // that `Unalign<T>` and `T` have the same size.
+ #[repr(C)]
+ union Transmute<T> {
+ u: ManuallyDrop<Unalign<T>>,
+ t: ManuallyDrop<T>,
+ }
+
+ // SAFETY: Since `Unalign` is `#[repr(C, packed)]`, it has the same
+ // layout as `T`. `ManuallyDrop<U>` is guaranteed to have the same
+ // layout as `U`, and so `ManuallyDrop<Unalign<T>>` has the same layout
+ // as `ManuallyDrop<T>`. Since `Transmute<T>` is `#[repr(C)]`, its `t`
+ // and `u` fields both start at the same offset (namely, 0) within the
+ // union.
+ //
+ // We do this instead of just destructuring in order to prevent
+ // `Unalign`'s `Drop::drop` from being run, since dropping is not
+ // supported in `const fn`s.
+ //
+ // TODO(https://github.com/rust-lang/rust/issues/73255): Destructure
+ // instead of using unsafe.
+ unsafe { ManuallyDrop::into_inner(Transmute { u: ManuallyDrop::new(self) }.t) }
+ }
+
+ /// Attempts to return a reference to the wrapped `T`, failing if `self` is
+ /// not properly aligned.
+ ///
+ /// If `self` does not satisfy `mem::align_of::<T>()`, then it is unsound to
+ /// return a reference to the wrapped `T`, and `try_deref` returns `None`.
+ ///
+ /// If `T: Unaligned`, then `Unalign<T>` implements [`Deref`], and callers
+ /// may prefer [`Deref::deref`], which is infallible.
+ #[inline(always)]
+ pub fn try_deref(&self) -> Option<&T> {
+ if !crate::util::aligned_to::<_, T>(self) {
+ return None;
+ }
+
+ // SAFETY: `deref_unchecked`'s safety requirement is that `self` is
+ // aligned to `align_of::<T>()`, which we just checked.
+ unsafe { Some(self.deref_unchecked()) }
+ }
+
+ /// Attempts to return a mutable reference to the wrapped `T`, failing if
+ /// `self` is not properly aligned.
+ ///
+ /// If `self` does not satisfy `mem::align_of::<T>()`, then it is unsound to
+ /// return a reference to the wrapped `T`, and `try_deref_mut` returns
+ /// `None`.
+ ///
+ /// If `T: Unaligned`, then `Unalign<T>` implements [`DerefMut`], and
+ /// callers may prefer [`DerefMut::deref_mut`], which is infallible.
+ #[inline(always)]
+ pub fn try_deref_mut(&mut self) -> Option<&mut T> {
+ if !crate::util::aligned_to::<_, T>(&*self) {
+ return None;
+ }
+
+ // SAFETY: `deref_mut_unchecked`'s safety requirement is that `self` is
+ // aligned to `align_of::<T>()`, which we just checked.
+ unsafe { Some(self.deref_mut_unchecked()) }
+ }
+
+ /// Returns a reference to the wrapped `T` without checking alignment.
+ ///
+ /// If `T: Unaligned`, then `Unalign<T>` implements[ `Deref`], and callers
+ /// may prefer [`Deref::deref`], which is safe.
+ ///
+ /// # Safety
+ ///
+ /// If `self` does not satisfy `mem::align_of::<T>()`, then
+ /// `self.deref_unchecked()` may cause undefined behavior.
+ #[inline(always)]
+ pub const unsafe fn deref_unchecked(&self) -> &T {
+ // SAFETY: `Unalign<T>` is `repr(transparent)`, so there is a valid `T`
+ // at the same memory location as `self`. It has no alignment guarantee,
+ // but the caller has promised that `self` is properly aligned, so we
+ // know that it is sound to create a reference to `T` at this memory
+ // location.
+ //
+ // We use `mem::transmute` instead of `&*self.get_ptr()` because
+ // dereferencing pointers is not stable in `const` on our current MSRV
+ // (1.56 as of this writing).
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Returns a mutable reference to the wrapped `T` without checking
+ /// alignment.
+ ///
+ /// If `T: Unaligned`, then `Unalign<T>` implements[ `DerefMut`], and
+ /// callers may prefer [`DerefMut::deref_mut`], which is safe.
+ ///
+ /// # Safety
+ ///
+ /// If `self` does not satisfy `mem::align_of::<T>()`, then
+ /// `self.deref_mut_unchecked()` may cause undefined behavior.
+ #[inline(always)]
+ pub unsafe fn deref_mut_unchecked(&mut self) -> &mut T {
+ // SAFETY: `self.get_mut_ptr()` returns a raw pointer to a valid `T` at
+ // the same memory location as `self`. It has no alignment guarantee,
+ // but the caller has promised that `self` is properly aligned, so we
+ // know that the pointer itself is aligned, and thus that it is sound to
+ // create a reference to a `T` at this memory location.
+ unsafe { &mut *self.get_mut_ptr() }
+ }
+
+ /// Gets an unaligned raw pointer to the inner `T`.
+ ///
+ /// # Safety
+ ///
+ /// The returned raw pointer is not necessarily aligned to
+ /// `align_of::<T>()`. Most functions which operate on raw pointers require
+ /// those pointers to be aligned, so calling those functions with the result
+ /// of `get_ptr` will be undefined behavior if alignment is not guaranteed
+ /// using some out-of-band mechanism. In general, the only functions which
+ /// are safe to call with this pointer are those which are explicitly
+ /// documented as being sound to use with an unaligned pointer, such as
+ /// [`read_unaligned`].
+ ///
+ /// [`read_unaligned`]: core::ptr::read_unaligned
+ #[inline(always)]
+ pub const fn get_ptr(&self) -> *const T {
+ ptr::addr_of!(self.0)
+ }
+
+ /// Gets an unaligned mutable raw pointer to the inner `T`.
+ ///
+ /// # Safety
+ ///
+ /// The returned raw pointer is not necessarily aligned to
+ /// `align_of::<T>()`. Most functions which operate on raw pointers require
+ /// those pointers to be aligned, so calling those functions with the result
+ /// of `get_ptr` will be undefined behavior if alignment is not guaranteed
+ /// using some out-of-band mechanism. In general, the only functions which
+ /// are safe to call with this pointer are those which are explicitly
+ /// documented as being sound to use with an unaligned pointer, such as
+ /// [`read_unaligned`].
+ ///
+ /// [`read_unaligned`]: core::ptr::read_unaligned
+ // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`.
+ #[inline(always)]
+ pub fn get_mut_ptr(&mut self) -> *mut T {
+ ptr::addr_of_mut!(self.0)
+ }
+
+ /// Sets the inner `T`, dropping the previous value.
+ // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`.
+ #[inline(always)]
+ pub fn set(&mut self, t: T) {
+ *self = Unalign::new(t);
+ }
+
+ /// Updates the inner `T` by calling a function on it.
+ ///
+ /// If [`T: Unaligned`], then `Unalign<T>` implements [`DerefMut`], and that
+ /// impl should be preferred over this method when performing updates, as it
+ /// will usually be faster and more ergonomic.
+ ///
+ /// For large types, this method may be expensive, as it requires copying
+ /// `2 * size_of::<T>()` bytes. \[1\]
+ ///
+ /// \[1\] Since the inner `T` may not be aligned, it would not be sound to
+ /// invoke `f` on it directly. Instead, `update` moves it into a
+ /// properly-aligned location in the local stack frame, calls `f` on it, and
+ /// then moves it back to its original location in `self`.
+ ///
+ /// [`T: Unaligned`]: Unaligned
+ #[inline]
+ pub fn update<O, F: FnOnce(&mut T) -> O>(&mut self, f: F) -> O {
+ // On drop, this moves `copy` out of itself and uses `ptr::write` to
+ // overwrite `slf`.
+ struct WriteBackOnDrop<T> {
+ copy: ManuallyDrop<T>,
+ slf: *mut Unalign<T>,
+ }
+
+ impl<T> Drop for WriteBackOnDrop<T> {
+ fn drop(&mut self) {
+ // SAFETY: We never use `copy` again as required by
+ // `ManuallyDrop::take`.
+ let copy = unsafe { ManuallyDrop::take(&mut self.copy) };
+ // SAFETY: `slf` is the raw pointer value of `self`. We know it
+ // is valid for writes and properly aligned because `self` is a
+ // mutable reference, which guarantees both of these properties.
+ unsafe { ptr::write(self.slf, Unalign::new(copy)) };
+ }
+ }
+
+ // SAFETY: We know that `self` is valid for reads, properly aligned, and
+ // points to an initialized `Unalign<T>` because it is a mutable
+ // reference, which guarantees all of these properties.
+ //
+ // Since `T: !Copy`, it would be unsound in the general case to allow
+ // both the original `Unalign<T>` and the copy to be used by safe code.
+ // We guarantee that the copy is used to overwrite the original in the
+ // `Drop::drop` impl of `WriteBackOnDrop`. So long as this `drop` is
+ // called before any other safe code executes, soundness is upheld.
+ // While this method can terminate in two ways (by returning normally or
+ // by unwinding due to a panic in `f`), in both cases, `write_back` is
+ // dropped - and its `drop` called - before any other safe code can
+ // execute.
+ let copy = unsafe { ptr::read(self) }.into_inner();
+ let mut write_back = WriteBackOnDrop { copy: ManuallyDrop::new(copy), slf: self };
+
+ let ret = f(&mut write_back.copy);
+
+ drop(write_back);
+ ret
+ }
+}
+
+impl<T: Copy> Unalign<T> {
+ /// Gets a copy of the inner `T`.
+ // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`.
+ #[inline(always)]
+ pub fn get(&self) -> T {
+ let Unalign(val) = *self;
+ val
+ }
+}
+
+impl<T: Unaligned> Deref for Unalign<T> {
+ type Target = T;
+
+ #[inline(always)]
+ fn deref(&self) -> &T {
+ // SAFETY: `deref_unchecked`'s safety requirement is that `self` is
+ // aligned to `align_of::<T>()`. `T: Unaligned` guarantees that
+ // `align_of::<T>() == 1`, and all pointers are one-aligned because all
+ // addresses are divisible by 1.
+ unsafe { self.deref_unchecked() }
+ }
+}
+
+impl<T: Unaligned> DerefMut for Unalign<T> {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: `deref_mut_unchecked`'s safety requirement is that `self` is
+ // aligned to `align_of::<T>()`. `T: Unaligned` guarantees that
+ // `align_of::<T>() == 1`, and all pointers are one-aligned because all
+ // addresses are divisible by 1.
+ unsafe { self.deref_mut_unchecked() }
+ }
+}
+
+impl<T: Unaligned + PartialOrd> PartialOrd<Unalign<T>> for Unalign<T> {
+ #[inline(always)]
+ fn partial_cmp(&self, other: &Unalign<T>) -> Option<Ordering> {
+ PartialOrd::partial_cmp(self.deref(), other.deref())
+ }
+}
+
+impl<T: Unaligned + Ord> Ord for Unalign<T> {
+ #[inline(always)]
+ fn cmp(&self, other: &Unalign<T>) -> Ordering {
+ Ord::cmp(self.deref(), other.deref())
+ }
+}
+
+impl<T: Unaligned + PartialEq> PartialEq<Unalign<T>> for Unalign<T> {
+ #[inline(always)]
+ fn eq(&self, other: &Unalign<T>) -> bool {
+ PartialEq::eq(self.deref(), other.deref())
+ }
+}
+
+impl<T: Unaligned + Eq> Eq for Unalign<T> {}
+
+impl<T: Unaligned + Hash> Hash for Unalign<T> {
+ #[inline(always)]
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: Hasher,
+ {
+ self.deref().hash(state);
+ }
+}
+
+impl<T: Unaligned + Debug> Debug for Unalign<T> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Debug::fmt(self.deref(), f)
+ }
+}
+
+impl<T: Unaligned + Display> Display for Unalign<T> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Display::fmt(self.deref(), f)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use core::panic::AssertUnwindSafe;
+
+ use super::*;
+ use crate::util::testutil::*;
+
+ /// A `T` which is guaranteed not to satisfy `align_of::<A>()`.
+ ///
+ /// It must be the case that `align_of::<T>() < align_of::<A>()` in order
+ /// fot this type to work properly.
+ #[repr(C)]
+ struct ForceUnalign<T, A> {
+ // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is
+ // placed at the minimum offset that guarantees its alignment. If
+ // `align_of::<T>() < align_of::<A>()`, then that offset will be
+ // guaranteed *not* to satisfy `align_of::<A>()`.
+ _u: u8,
+ t: T,
+ _a: [A; 0],
+ }
+
+ impl<T, A> ForceUnalign<T, A> {
+ const fn new(t: T) -> ForceUnalign<T, A> {
+ ForceUnalign { _u: 0, t, _a: [] }
+ }
+ }
+
+ #[test]
+ fn test_unalign() {
+ // Test methods that don't depend on alignment.
+ let mut u = Unalign::new(AU64(123));
+ assert_eq!(u.get(), AU64(123));
+ assert_eq!(u.into_inner(), AU64(123));
+ assert_eq!(u.get_ptr(), <*const _>::cast::<AU64>(&u));
+ assert_eq!(u.get_mut_ptr(), <*mut _>::cast::<AU64>(&mut u));
+ u.set(AU64(321));
+ assert_eq!(u.get(), AU64(321));
+
+ // Test methods that depend on alignment (when alignment is satisfied).
+ let mut u: Align<_, AU64> = Align::new(Unalign::new(AU64(123)));
+ assert_eq!(u.t.try_deref(), Some(&AU64(123)));
+ assert_eq!(u.t.try_deref_mut(), Some(&mut AU64(123)));
+ // SAFETY: The `Align<_, AU64>` guarantees proper alignment.
+ assert_eq!(unsafe { u.t.deref_unchecked() }, &AU64(123));
+ // SAFETY: The `Align<_, AU64>` guarantees proper alignment.
+ assert_eq!(unsafe { u.t.deref_mut_unchecked() }, &mut AU64(123));
+ *u.t.try_deref_mut().unwrap() = AU64(321);
+ assert_eq!(u.t.get(), AU64(321));
+
+ // Test methods that depend on alignment (when alignment is not
+ // satisfied).
+ let mut u: ForceUnalign<_, AU64> = ForceUnalign::new(Unalign::new(AU64(123)));
+ assert_eq!(u.t.try_deref(), None);
+ assert_eq!(u.t.try_deref_mut(), None);
+
+ // Test methods that depend on `T: Unaligned`.
+ let mut u = Unalign::new(123u8);
+ assert_eq!(u.try_deref(), Some(&123));
+ assert_eq!(u.try_deref_mut(), Some(&mut 123));
+ assert_eq!(u.deref(), &123);
+ assert_eq!(u.deref_mut(), &mut 123);
+ *u = 21;
+ assert_eq!(u.get(), 21);
+
+ // Test that some `Unalign` functions and methods are `const`.
+ const _UNALIGN: Unalign<u64> = Unalign::new(0);
+ const _UNALIGN_PTR: *const u64 = _UNALIGN.get_ptr();
+ const _U64: u64 = _UNALIGN.into_inner();
+ // Make sure all code is considered "used".
+ //
+ // TODO(https://github.com/rust-lang/rust/issues/104084): Remove this
+ // attribute.
+ #[allow(dead_code)]
+ const _: () = {
+ let x: Align<_, AU64> = Align::new(Unalign::new(AU64(123)));
+ // Make sure that `deref_unchecked` is `const`.
+ //
+ // SAFETY: The `Align<_, AU64>` guarantees proper alignment.
+ let au64 = unsafe { x.t.deref_unchecked() };
+ match au64 {
+ AU64(123) => {}
+ _ => unreachable!(),
+ }
+ };
+ }
+
+ #[test]
+ fn test_unalign_update() {
+ let mut u = Unalign::new(AU64(123));
+ u.update(|a| a.0 += 1);
+ assert_eq!(u.get(), AU64(124));
+
+ // Test that, even if the callback panics, the original is still
+ // correctly overwritten. Use a `Box` so that Miri is more likely to
+ // catch any unsoundness (which would likely result in two `Box`es for
+ // the same heap object, which is the sort of thing that Miri would
+ // probably catch).
+ let mut u = Unalign::new(Box::new(AU64(123)));
+ let res = std::panic::catch_unwind(AssertUnwindSafe(|| {
+ u.update(|a| {
+ a.0 += 1;
+ panic!();
+ })
+ }));
+ assert!(res.is_err());
+ assert_eq!(u.into_inner(), Box::new(AU64(124)));
+ }
+}