summaryrefslogtreecommitdiffstats
path: root/library/portable-simd/crates/core_simd/src
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--library/portable-simd/crates/core_simd/src/core_simd_docs.md4
-rw-r--r--library/portable-simd/crates/core_simd/src/elements.rs11
-rw-r--r--library/portable-simd/crates/core_simd/src/elements/float.rs357
-rw-r--r--library/portable-simd/crates/core_simd/src/elements/int.rs298
-rw-r--r--library/portable-simd/crates/core_simd/src/elements/uint.rs139
-rw-r--r--library/portable-simd/crates/core_simd/src/eq.rs73
-rw-r--r--library/portable-simd/crates/core_simd/src/fmt.rs39
-rw-r--r--library/portable-simd/crates/core_simd/src/intrinsics.rs153
-rw-r--r--library/portable-simd/crates/core_simd/src/iter.rs58
-rw-r--r--library/portable-simd/crates/core_simd/src/lane_count.rs46
-rw-r--r--library/portable-simd/crates/core_simd/src/lib.rs22
-rw-r--r--library/portable-simd/crates/core_simd/src/masks.rs595
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/bitmask.rs246
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/full_masks.rs323
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs93
-rw-r--r--library/portable-simd/crates/core_simd/src/mod.rs32
-rw-r--r--library/portable-simd/crates/core_simd/src/ops.rs254
-rw-r--r--library/portable-simd/crates/core_simd/src/ops/assign.rs124
-rw-r--r--library/portable-simd/crates/core_simd/src/ops/deref.rs124
-rw-r--r--library/portable-simd/crates/core_simd/src/ops/unary.rs78
-rw-r--r--library/portable-simd/crates/core_simd/src/ord.rs213
-rw-r--r--library/portable-simd/crates/core_simd/src/select.rs59
-rw-r--r--library/portable-simd/crates/core_simd/src/swizzle.rs385
-rw-r--r--library/portable-simd/crates/core_simd/src/to_bytes.rs41
-rw-r--r--library/portable-simd/crates/core_simd/src/vector.rs742
-rw-r--r--library/portable-simd/crates/core_simd/src/vector/float.rs24
-rw-r--r--library/portable-simd/crates/core_simd/src/vector/int.rs63
-rw-r--r--library/portable-simd/crates/core_simd/src/vector/ptr.rs51
-rw-r--r--library/portable-simd/crates/core_simd/src/vector/uint.rs63
-rw-r--r--library/portable-simd/crates/core_simd/src/vendor.rs31
-rw-r--r--library/portable-simd/crates/core_simd/src/vendor/arm.rs76
-rw-r--r--library/portable-simd/crates/core_simd/src/vendor/powerpc.rs11
-rw-r--r--library/portable-simd/crates/core_simd/src/vendor/wasm32.rs30
-rw-r--r--library/portable-simd/crates/core_simd/src/vendor/x86.rs63
34 files changed, 4921 insertions, 0 deletions
diff --git a/library/portable-simd/crates/core_simd/src/core_simd_docs.md b/library/portable-simd/crates/core_simd/src/core_simd_docs.md
new file mode 100644
index 000000000..15e8ed025
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/core_simd_docs.md
@@ -0,0 +1,4 @@
+Portable SIMD module.
+
+This module offers a portable abstraction for SIMD operations
+that is not bound to any particular hardware architecture.
diff --git a/library/portable-simd/crates/core_simd/src/elements.rs b/library/portable-simd/crates/core_simd/src/elements.rs
new file mode 100644
index 000000000..701eb66b2
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/elements.rs
@@ -0,0 +1,11 @@
+mod float;
+mod int;
+mod uint;
+
+mod sealed {
+ pub trait Sealed {}
+}
+
+pub use float::*;
+pub use int::*;
+pub use uint::*;
diff --git a/library/portable-simd/crates/core_simd/src/elements/float.rs b/library/portable-simd/crates/core_simd/src/elements/float.rs
new file mode 100644
index 000000000..d60223270
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/elements/float.rs
@@ -0,0 +1,357 @@
+use super::sealed::Sealed;
+use crate::simd::{
+ intrinsics, LaneCount, Mask, Simd, SimdElement, SimdPartialEq, SimdPartialOrd,
+ SupportedLaneCount,
+};
+
+/// Operations on SIMD vectors of floats.
+pub trait SimdFloat: Copy + Sealed {
+ /// Mask type used for manipulating this SIMD vector type.
+ type Mask;
+
+ /// Scalar type contained by this SIMD vector type.
+ type Scalar;
+
+ /// Bit representation of this SIMD vector type.
+ type Bits;
+
+ /// Raw transmutation to an unsigned integer vector type with the
+ /// same size and number of lanes.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn to_bits(self) -> Self::Bits;
+
+ /// Raw transmutation from an unsigned integer vector type with the
+ /// same size and number of lanes.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn from_bits(bits: Self::Bits) -> Self;
+
+ /// Produces a vector where every lane has the absolute value of the
+ /// equivalently-indexed lane in `self`.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn abs(self) -> Self;
+
+ /// Takes the reciprocal (inverse) of each lane, `1/x`.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn recip(self) -> Self;
+
+ /// Converts each lane from radians to degrees.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn to_degrees(self) -> Self;
+
+ /// Converts each lane from degrees to radians.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn to_radians(self) -> Self;
+
+ /// Returns true for each lane if it has a positive sign, including
+ /// `+0.0`, `NaN`s with positive sign bit and positive infinity.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn is_sign_positive(self) -> Self::Mask;
+
+ /// Returns true for each lane if it has a negative sign, including
+ /// `-0.0`, `NaN`s with negative sign bit and negative infinity.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn is_sign_negative(self) -> Self::Mask;
+
+ /// Returns true for each lane if its value is `NaN`.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn is_nan(self) -> Self::Mask;
+
+ /// Returns true for each lane if its value is positive infinity or negative infinity.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn is_infinite(self) -> Self::Mask;
+
+ /// Returns true for each lane if its value is neither infinite nor `NaN`.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn is_finite(self) -> Self::Mask;
+
+ /// Returns true for each lane if its value is subnormal.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn is_subnormal(self) -> Self::Mask;
+
+ /// Returns true for each lane if its value is neither zero, infinite,
+ /// subnormal, nor `NaN`.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn is_normal(self) -> Self::Mask;
+
+ /// Replaces each lane with a number that represents its sign.
+ ///
+ /// * `1.0` if the number is positive, `+0.0`, or `INFINITY`
+ /// * `-1.0` if the number is negative, `-0.0`, or `NEG_INFINITY`
+ /// * `NAN` if the number is `NAN`
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn signum(self) -> Self;
+
+ /// Returns each lane with the magnitude of `self` and the sign of `sign`.
+ ///
+ /// For any lane containing a `NAN`, a `NAN` with the sign of `sign` is returned.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn copysign(self, sign: Self) -> Self;
+
+ /// Returns the minimum of each lane.
+ ///
+ /// If one of the values is `NAN`, then the other value is returned.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn simd_min(self, other: Self) -> Self;
+
+ /// Returns the maximum of each lane.
+ ///
+ /// If one of the values is `NAN`, then the other value is returned.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn simd_max(self, other: Self) -> Self;
+
+ /// Restrict each lane to a certain interval unless it is NaN.
+ ///
+ /// For each lane in `self`, returns the corresponding lane in `max` if the lane is
+ /// greater than `max`, and the corresponding lane in `min` if the lane is less
+ /// than `min`. Otherwise returns the lane in `self`.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn simd_clamp(self, min: Self, max: Self) -> Self;
+
+ /// Returns the sum of the lanes of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{f32x2, SimdFloat};
+ /// let v = f32x2::from_array([1., 2.]);
+ /// assert_eq!(v.reduce_sum(), 3.);
+ /// ```
+ fn reduce_sum(self) -> Self::Scalar;
+
+ /// Reducing multiply. Returns the product of the lanes of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{f32x2, SimdFloat};
+ /// let v = f32x2::from_array([3., 4.]);
+ /// assert_eq!(v.reduce_product(), 12.);
+ /// ```
+ fn reduce_product(self) -> Self::Scalar;
+
+ /// Returns the maximum lane in the vector.
+ ///
+ /// Returns values based on equality, so a vector containing both `0.` and `-0.` may
+ /// return either.
+ ///
+ /// This function will not return `NaN` unless all lanes are `NaN`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{f32x2, SimdFloat};
+ /// let v = f32x2::from_array([1., 2.]);
+ /// assert_eq!(v.reduce_max(), 2.);
+ ///
+ /// // NaN values are skipped...
+ /// let v = f32x2::from_array([1., f32::NAN]);
+ /// assert_eq!(v.reduce_max(), 1.);
+ ///
+ /// // ...unless all values are NaN
+ /// let v = f32x2::from_array([f32::NAN, f32::NAN]);
+ /// assert!(v.reduce_max().is_nan());
+ /// ```
+ fn reduce_max(self) -> Self::Scalar;
+
+ /// Returns the minimum lane in the vector.
+ ///
+ /// Returns values based on equality, so a vector containing both `0.` and `-0.` may
+ /// return either.
+ ///
+ /// This function will not return `NaN` unless all lanes are `NaN`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{f32x2, SimdFloat};
+ /// let v = f32x2::from_array([3., 7.]);
+ /// assert_eq!(v.reduce_min(), 3.);
+ ///
+ /// // NaN values are skipped...
+ /// let v = f32x2::from_array([1., f32::NAN]);
+ /// assert_eq!(v.reduce_min(), 1.);
+ ///
+ /// // ...unless all values are NaN
+ /// let v = f32x2::from_array([f32::NAN, f32::NAN]);
+ /// assert!(v.reduce_min().is_nan());
+ /// ```
+ fn reduce_min(self) -> Self::Scalar;
+}
+
+macro_rules! impl_trait {
+ { $($ty:ty { bits: $bits_ty:ty, mask: $mask_ty:ty }),* } => {
+ $(
+ impl<const LANES: usize> Sealed for Simd<$ty, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ }
+
+ impl<const LANES: usize> SimdFloat for Simd<$ty, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Mask = Mask<<$mask_ty as SimdElement>::Mask, LANES>;
+ type Scalar = $ty;
+ type Bits = Simd<$bits_ty, LANES>;
+
+ #[inline]
+ fn to_bits(self) -> Simd<$bits_ty, LANES> {
+ assert_eq!(core::mem::size_of::<Self>(), core::mem::size_of::<Self::Bits>());
+ // Safety: transmuting between vector types is safe
+ unsafe { core::mem::transmute_copy(&self) }
+ }
+
+ #[inline]
+ fn from_bits(bits: Simd<$bits_ty, LANES>) -> Self {
+ assert_eq!(core::mem::size_of::<Self>(), core::mem::size_of::<Self::Bits>());
+ // Safety: transmuting between vector types is safe
+ unsafe { core::mem::transmute_copy(&bits) }
+ }
+
+ #[inline]
+ fn abs(self) -> Self {
+ // Safety: `self` is a float vector
+ unsafe { intrinsics::simd_fabs(self) }
+ }
+
+ #[inline]
+ fn recip(self) -> Self {
+ Self::splat(1.0) / self
+ }
+
+ #[inline]
+ fn to_degrees(self) -> Self {
+ // to_degrees uses a special constant for better precision, so extract that constant
+ self * Self::splat(Self::Scalar::to_degrees(1.))
+ }
+
+ #[inline]
+ fn to_radians(self) -> Self {
+ self * Self::splat(Self::Scalar::to_radians(1.))
+ }
+
+ #[inline]
+ fn is_sign_positive(self) -> Self::Mask {
+ !self.is_sign_negative()
+ }
+
+ #[inline]
+ fn is_sign_negative(self) -> Self::Mask {
+ let sign_bits = self.to_bits() & Simd::splat((!0 >> 1) + 1);
+ sign_bits.simd_gt(Simd::splat(0))
+ }
+
+ #[inline]
+ fn is_nan(self) -> Self::Mask {
+ self.simd_ne(self)
+ }
+
+ #[inline]
+ fn is_infinite(self) -> Self::Mask {
+ self.abs().simd_eq(Self::splat(Self::Scalar::INFINITY))
+ }
+
+ #[inline]
+ fn is_finite(self) -> Self::Mask {
+ self.abs().simd_lt(Self::splat(Self::Scalar::INFINITY))
+ }
+
+ #[inline]
+ fn is_subnormal(self) -> Self::Mask {
+ self.abs().simd_ne(Self::splat(0.0)) & (self.to_bits() & Self::splat(Self::Scalar::INFINITY).to_bits()).simd_eq(Simd::splat(0))
+ }
+
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn is_normal(self) -> Self::Mask {
+ !(self.abs().simd_eq(Self::splat(0.0)) | self.is_nan() | self.is_subnormal() | self.is_infinite())
+ }
+
+ #[inline]
+ fn signum(self) -> Self {
+ self.is_nan().select(Self::splat(Self::Scalar::NAN), Self::splat(1.0).copysign(self))
+ }
+
+ #[inline]
+ fn copysign(self, sign: Self) -> Self {
+ let sign_bit = sign.to_bits() & Self::splat(-0.).to_bits();
+ let magnitude = self.to_bits() & !Self::splat(-0.).to_bits();
+ Self::from_bits(sign_bit | magnitude)
+ }
+
+ #[inline]
+ fn simd_min(self, other: Self) -> Self {
+ // Safety: `self` and `other` are float vectors
+ unsafe { intrinsics::simd_fmin(self, other) }
+ }
+
+ #[inline]
+ fn simd_max(self, other: Self) -> Self {
+ // Safety: `self` and `other` are floating point vectors
+ unsafe { intrinsics::simd_fmax(self, other) }
+ }
+
+ #[inline]
+ fn simd_clamp(self, min: Self, max: Self) -> Self {
+ assert!(
+ min.simd_le(max).all(),
+ "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ );
+ let mut x = self;
+ x = x.simd_lt(min).select(min, x);
+ x = x.simd_gt(max).select(max, x);
+ x
+ }
+
+ #[inline]
+ fn reduce_sum(self) -> Self::Scalar {
+ // LLVM sum is inaccurate on i586
+ if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
+ self.as_array().iter().sum()
+ } else {
+ // Safety: `self` is a float vector
+ unsafe { intrinsics::simd_reduce_add_ordered(self, 0.) }
+ }
+ }
+
+ #[inline]
+ fn reduce_product(self) -> Self::Scalar {
+ // LLVM product is inaccurate on i586
+ if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
+ self.as_array().iter().product()
+ } else {
+ // Safety: `self` is a float vector
+ unsafe { intrinsics::simd_reduce_mul_ordered(self, 1.) }
+ }
+ }
+
+ #[inline]
+ fn reduce_max(self) -> Self::Scalar {
+ // Safety: `self` is a float vector
+ unsafe { intrinsics::simd_reduce_max(self) }
+ }
+
+ #[inline]
+ fn reduce_min(self) -> Self::Scalar {
+ // Safety: `self` is a float vector
+ unsafe { intrinsics::simd_reduce_min(self) }
+ }
+ }
+ )*
+ }
+}
+
+impl_trait! { f32 { bits: u32, mask: i32 }, f64 { bits: u64, mask: i64 } }
diff --git a/library/portable-simd/crates/core_simd/src/elements/int.rs b/library/portable-simd/crates/core_simd/src/elements/int.rs
new file mode 100644
index 000000000..9b8c37ed4
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/elements/int.rs
@@ -0,0 +1,298 @@
+use super::sealed::Sealed;
+use crate::simd::{
+ intrinsics, LaneCount, Mask, Simd, SimdElement, SimdPartialOrd, SupportedLaneCount,
+};
+
+/// Operations on SIMD vectors of signed integers.
+pub trait SimdInt: Copy + Sealed {
+ /// Mask type used for manipulating this SIMD vector type.
+ type Mask;
+
+ /// Scalar type contained by this SIMD vector type.
+ type Scalar;
+
+ /// Lanewise saturating add.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdInt};
+ /// use core::i32::{MIN, MAX};
+ /// let x = Simd::from_array([MIN, 0, 1, MAX]);
+ /// let max = Simd::splat(MAX);
+ /// let unsat = x + max;
+ /// let sat = x.saturating_add(max);
+ /// assert_eq!(unsat, Simd::from_array([-1, MAX, MIN, -2]));
+ /// assert_eq!(sat, Simd::from_array([-1, MAX, MAX, MAX]));
+ /// ```
+ fn saturating_add(self, second: Self) -> Self;
+
+ /// Lanewise saturating subtract.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdInt};
+ /// use core::i32::{MIN, MAX};
+ /// let x = Simd::from_array([MIN, -2, -1, MAX]);
+ /// let max = Simd::splat(MAX);
+ /// let unsat = x - max;
+ /// let sat = x.saturating_sub(max);
+ /// assert_eq!(unsat, Simd::from_array([1, MAX, MIN, 0]));
+ /// assert_eq!(sat, Simd::from_array([MIN, MIN, MIN, 0]));
+ fn saturating_sub(self, second: Self) -> Self;
+
+ /// Lanewise absolute value, implemented in Rust.
+ /// Every lane becomes its absolute value.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdInt};
+ /// use core::i32::{MIN, MAX};
+ /// let xs = Simd::from_array([MIN, MIN +1, -5, 0]);
+ /// assert_eq!(xs.abs(), Simd::from_array([MIN, MAX, 5, 0]));
+ /// ```
+ fn abs(self) -> Self;
+
+ /// Lanewise saturating absolute value, implemented in Rust.
+ /// As abs(), except the MIN value becomes MAX instead of itself.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdInt};
+ /// use core::i32::{MIN, MAX};
+ /// let xs = Simd::from_array([MIN, -2, 0, 3]);
+ /// let unsat = xs.abs();
+ /// let sat = xs.saturating_abs();
+ /// assert_eq!(unsat, Simd::from_array([MIN, 2, 0, 3]));
+ /// assert_eq!(sat, Simd::from_array([MAX, 2, 0, 3]));
+ /// ```
+ fn saturating_abs(self) -> Self;
+
+ /// Lanewise saturating negation, implemented in Rust.
+ /// As neg(), except the MIN value becomes MAX instead of itself.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdInt};
+ /// use core::i32::{MIN, MAX};
+ /// let x = Simd::from_array([MIN, -2, 3, MAX]);
+ /// let unsat = -x;
+ /// let sat = x.saturating_neg();
+ /// assert_eq!(unsat, Simd::from_array([MIN, 2, -3, MIN + 1]));
+ /// assert_eq!(sat, Simd::from_array([MAX, 2, -3, MIN + 1]));
+ /// ```
+ fn saturating_neg(self) -> Self;
+
+ /// Returns true for each positive lane and false if it is zero or negative.
+ fn is_positive(self) -> Self::Mask;
+
+ /// Returns true for each negative lane and false if it is zero or positive.
+ fn is_negative(self) -> Self::Mask;
+
+ /// Returns numbers representing the sign of each lane.
+ /// * `0` if the number is zero
+ /// * `1` if the number is positive
+ /// * `-1` if the number is negative
+ fn signum(self) -> Self;
+
+ /// Returns the sum of the lanes of the vector, with wrapping addition.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{i32x4, SimdInt};
+ /// let v = i32x4::from_array([1, 2, 3, 4]);
+ /// assert_eq!(v.reduce_sum(), 10);
+ ///
+ /// // SIMD integer addition is always wrapping
+ /// let v = i32x4::from_array([i32::MAX, 1, 0, 0]);
+ /// assert_eq!(v.reduce_sum(), i32::MIN);
+ /// ```
+ fn reduce_sum(self) -> Self::Scalar;
+
+ /// Returns the product of the lanes of the vector, with wrapping multiplication.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{i32x4, SimdInt};
+ /// let v = i32x4::from_array([1, 2, 3, 4]);
+ /// assert_eq!(v.reduce_product(), 24);
+ ///
+ /// // SIMD integer multiplication is always wrapping
+ /// let v = i32x4::from_array([i32::MAX, 2, 1, 1]);
+ /// assert!(v.reduce_product() < i32::MAX);
+ /// ```
+ fn reduce_product(self) -> Self::Scalar;
+
+ /// Returns the maximum lane in the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{i32x4, SimdInt};
+ /// let v = i32x4::from_array([1, 2, 3, 4]);
+ /// assert_eq!(v.reduce_max(), 4);
+ /// ```
+ fn reduce_max(self) -> Self::Scalar;
+
+ /// Returns the minimum lane in the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{i32x4, SimdInt};
+ /// let v = i32x4::from_array([1, 2, 3, 4]);
+ /// assert_eq!(v.reduce_min(), 1);
+ /// ```
+ fn reduce_min(self) -> Self::Scalar;
+
+ /// Returns the cumulative bitwise "and" across the lanes of the vector.
+ fn reduce_and(self) -> Self::Scalar;
+
+ /// Returns the cumulative bitwise "or" across the lanes of the vector.
+ fn reduce_or(self) -> Self::Scalar;
+
+ /// Returns the cumulative bitwise "xor" across the lanes of the vector.
+ fn reduce_xor(self) -> Self::Scalar;
+}
+
+macro_rules! impl_trait {
+ { $($ty:ty),* } => {
+ $(
+ impl<const LANES: usize> Sealed for Simd<$ty, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ }
+
+ impl<const LANES: usize> SimdInt for Simd<$ty, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Mask = Mask<<$ty as SimdElement>::Mask, LANES>;
+ type Scalar = $ty;
+
+ #[inline]
+ fn saturating_add(self, second: Self) -> Self {
+ // Safety: `self` is a vector
+ unsafe { intrinsics::simd_saturating_add(self, second) }
+ }
+
+ #[inline]
+ fn saturating_sub(self, second: Self) -> Self {
+ // Safety: `self` is a vector
+ unsafe { intrinsics::simd_saturating_sub(self, second) }
+ }
+
+ #[inline]
+ fn abs(self) -> Self {
+ const SHR: $ty = <$ty>::BITS as $ty - 1;
+ let m = self >> Simd::splat(SHR);
+ (self^m) - m
+ }
+
+ #[inline]
+ fn saturating_abs(self) -> Self {
+ // arith shift for -1 or 0 mask based on sign bit, giving 2s complement
+ const SHR: $ty = <$ty>::BITS as $ty - 1;
+ let m = self >> Simd::splat(SHR);
+ (self^m).saturating_sub(m)
+ }
+
+ #[inline]
+ fn saturating_neg(self) -> Self {
+ Self::splat(0).saturating_sub(self)
+ }
+
+ #[inline]
+ fn is_positive(self) -> Self::Mask {
+ self.simd_gt(Self::splat(0))
+ }
+
+ #[inline]
+ fn is_negative(self) -> Self::Mask {
+ self.simd_lt(Self::splat(0))
+ }
+
+ #[inline]
+ fn signum(self) -> Self {
+ self.is_positive().select(
+ Self::splat(1),
+ self.is_negative().select(Self::splat(-1), Self::splat(0))
+ )
+ }
+
+ #[inline]
+ fn reduce_sum(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_add_ordered(self, 0) }
+ }
+
+ #[inline]
+ fn reduce_product(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_mul_ordered(self, 1) }
+ }
+
+ #[inline]
+ fn reduce_max(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_max(self) }
+ }
+
+ #[inline]
+ fn reduce_min(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_min(self) }
+ }
+
+ #[inline]
+ fn reduce_and(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_and(self) }
+ }
+
+ #[inline]
+ fn reduce_or(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_or(self) }
+ }
+
+ #[inline]
+ fn reduce_xor(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_xor(self) }
+ }
+ }
+ )*
+ }
+}
+
+impl_trait! { i8, i16, i32, i64, isize }
diff --git a/library/portable-simd/crates/core_simd/src/elements/uint.rs b/library/portable-simd/crates/core_simd/src/elements/uint.rs
new file mode 100644
index 000000000..21e7e76eb
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/elements/uint.rs
@@ -0,0 +1,139 @@
+use super::sealed::Sealed;
+use crate::simd::{intrinsics, LaneCount, Simd, SupportedLaneCount};
+
+/// Operations on SIMD vectors of unsigned integers.
+pub trait SimdUint: Copy + Sealed {
+ /// Scalar type contained by this SIMD vector type.
+ type Scalar;
+
+ /// Lanewise saturating add.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdUint};
+ /// use core::u32::MAX;
+ /// let x = Simd::from_array([2, 1, 0, MAX]);
+ /// let max = Simd::splat(MAX);
+ /// let unsat = x + max;
+ /// let sat = x.saturating_add(max);
+ /// assert_eq!(unsat, Simd::from_array([1, 0, MAX, MAX - 1]));
+ /// assert_eq!(sat, max);
+ /// ```
+ fn saturating_add(self, second: Self) -> Self;
+
+ /// Lanewise saturating subtract.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdUint};
+ /// use core::u32::MAX;
+ /// let x = Simd::from_array([2, 1, 0, MAX]);
+ /// let max = Simd::splat(MAX);
+ /// let unsat = x - max;
+ /// let sat = x.saturating_sub(max);
+ /// assert_eq!(unsat, Simd::from_array([3, 2, 1, 0]));
+ /// assert_eq!(sat, Simd::splat(0));
+ fn saturating_sub(self, second: Self) -> Self;
+
+ /// Returns the sum of the lanes of the vector, with wrapping addition.
+ fn reduce_sum(self) -> Self::Scalar;
+
+ /// Returns the product of the lanes of the vector, with wrapping multiplication.
+ fn reduce_product(self) -> Self::Scalar;
+
+ /// Returns the maximum lane in the vector.
+ fn reduce_max(self) -> Self::Scalar;
+
+ /// Returns the minimum lane in the vector.
+ fn reduce_min(self) -> Self::Scalar;
+
+ /// Returns the cumulative bitwise "and" across the lanes of the vector.
+ fn reduce_and(self) -> Self::Scalar;
+
+ /// Returns the cumulative bitwise "or" across the lanes of the vector.
+ fn reduce_or(self) -> Self::Scalar;
+
+ /// Returns the cumulative bitwise "xor" across the lanes of the vector.
+ fn reduce_xor(self) -> Self::Scalar;
+}
+
+macro_rules! impl_trait {
+ { $($ty:ty),* } => {
+ $(
+ impl<const LANES: usize> Sealed for Simd<$ty, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ }
+
+ impl<const LANES: usize> SimdUint for Simd<$ty, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Scalar = $ty;
+
+ #[inline]
+ fn saturating_add(self, second: Self) -> Self {
+ // Safety: `self` is a vector
+ unsafe { intrinsics::simd_saturating_add(self, second) }
+ }
+
+ #[inline]
+ fn saturating_sub(self, second: Self) -> Self {
+ // Safety: `self` is a vector
+ unsafe { intrinsics::simd_saturating_sub(self, second) }
+ }
+
+ #[inline]
+ fn reduce_sum(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_add_ordered(self, 0) }
+ }
+
+ #[inline]
+ fn reduce_product(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_mul_ordered(self, 1) }
+ }
+
+ #[inline]
+ fn reduce_max(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_max(self) }
+ }
+
+ #[inline]
+ fn reduce_min(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_min(self) }
+ }
+
+ #[inline]
+ fn reduce_and(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_and(self) }
+ }
+
+ #[inline]
+ fn reduce_or(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_or(self) }
+ }
+
+ #[inline]
+ fn reduce_xor(self) -> Self::Scalar {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_reduce_xor(self) }
+ }
+ }
+ )*
+ }
+}
+
+impl_trait! { u8, u16, u32, u64, usize }
diff --git a/library/portable-simd/crates/core_simd/src/eq.rs b/library/portable-simd/crates/core_simd/src/eq.rs
new file mode 100644
index 000000000..c7111f720
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/eq.rs
@@ -0,0 +1,73 @@
+use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdElement, SupportedLaneCount};
+
+/// Parallel `PartialEq`.
+pub trait SimdPartialEq {
+ /// The mask type returned by each comparison.
+ type Mask;
+
+ /// Test if each lane is equal to the corresponding lane in `other`.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn simd_eq(self, other: Self) -> Self::Mask;
+
+ /// Test if each lane is equal to the corresponding lane in `other`.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn simd_ne(self, other: Self) -> Self::Mask;
+}
+
+macro_rules! impl_number {
+ { $($number:ty),* } => {
+ $(
+ impl<const LANES: usize> SimdPartialEq for Simd<$number, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Mask = Mask<<$number as SimdElement>::Mask, LANES>;
+
+ #[inline]
+ fn simd_eq(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_eq(self, other)) }
+ }
+
+ #[inline]
+ fn simd_ne(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_ne(self, other)) }
+ }
+ }
+ )*
+ }
+}
+
+impl_number! { f32, f64, u8, u16, u32, u64, usize, i8, i16, i32, i64, isize }
+
+macro_rules! impl_mask {
+ { $($integer:ty),* } => {
+ $(
+ impl<const LANES: usize> SimdPartialEq for Mask<$integer, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Mask = Self;
+
+ #[inline]
+ fn simd_eq(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Self::from_int_unchecked(intrinsics::simd_eq(self.to_int(), other.to_int())) }
+ }
+
+ #[inline]
+ fn simd_ne(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Self::from_int_unchecked(intrinsics::simd_ne(self.to_int(), other.to_int())) }
+ }
+ }
+ )*
+ }
+}
+
+impl_mask! { i8, i16, i32, i64, isize }
diff --git a/library/portable-simd/crates/core_simd/src/fmt.rs b/library/portable-simd/crates/core_simd/src/fmt.rs
new file mode 100644
index 000000000..dbd9839c4
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/fmt.rs
@@ -0,0 +1,39 @@
+use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
+use core::fmt;
+
+macro_rules! impl_fmt_trait {
+ { $($trait:ident,)* } => {
+ $(
+ impl<T, const LANES: usize> fmt::$trait for Simd<T, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement + fmt::$trait,
+ {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ #[repr(transparent)]
+ struct Wrapper<'a, T: fmt::$trait>(&'a T);
+
+ impl<T: fmt::$trait> fmt::Debug for Wrapper<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+ }
+
+ f.debug_list()
+ .entries(self.as_array().iter().map(|x| Wrapper(x)))
+ .finish()
+ }
+ }
+ )*
+ }
+}
+
+impl_fmt_trait! {
+ Debug,
+ Binary,
+ LowerExp,
+ UpperExp,
+ Octal,
+ LowerHex,
+ UpperHex,
+}
diff --git a/library/portable-simd/crates/core_simd/src/intrinsics.rs b/library/portable-simd/crates/core_simd/src/intrinsics.rs
new file mode 100644
index 000000000..962c83a78
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/intrinsics.rs
@@ -0,0 +1,153 @@
+//! This module contains the LLVM intrinsics bindings that provide the functionality for this
+//! crate.
+//!
+//! The LLVM assembly language is documented here: <https://llvm.org/docs/LangRef.html>
+//!
+//! A quick glossary of jargon that may appear in this module, mostly paraphrasing LLVM's LangRef:
+//! - poison: "undefined behavior as a value". specifically, it is like uninit memory (such as padding bytes). it is "safe" to create poison, BUT
+//! poison MUST NOT be observed from safe code, as operations on poison return poison, like NaN. unlike NaN, which has defined comparisons,
+//! poison is neither true nor false, and LLVM may also convert it to undef (at which point it is both). so, it can't be conditioned on, either.
+//! - undef: "a value that is every value". functionally like poison, insofar as Rust is concerned. poison may become this. note:
+//! this means that division by poison or undef is like division by zero, which means it inflicts...
+//! - "UB": poison and undef cover most of what people call "UB". "UB" means this operation immediately invalidates the program:
+//! LLVM is allowed to lower it to `ud2` or other opcodes that may cause an illegal instruction exception, and this is the "good end".
+//! The "bad end" is that LLVM may reverse time to the moment control flow diverged on a path towards undefined behavior,
+//! and destroy the other branch, potentially deleting safe code and violating Rust's `unsafe` contract.
+//!
+//! Note that according to LLVM, vectors are not arrays, but they are equivalent when stored to and loaded from memory.
+//!
+//! Unless stated otherwise, all intrinsics for binary operations require SIMD vectors of equal types and lengths.
+
+// These intrinsics aren't linked directly from LLVM and are mostly undocumented, however they are
+// mostly lowered to the matching LLVM instructions by the compiler in a fairly straightforward manner.
+// The associated LLVM instruction or intrinsic is documented alongside each Rust intrinsic function.
+extern "platform-intrinsic" {
+ /// add/fadd
+ pub(crate) fn simd_add<T>(x: T, y: T) -> T;
+
+ /// sub/fsub
+ pub(crate) fn simd_sub<T>(lhs: T, rhs: T) -> T;
+
+ /// mul/fmul
+ pub(crate) fn simd_mul<T>(x: T, y: T) -> T;
+
+ /// udiv/sdiv/fdiv
+ /// ints and uints: {s,u}div incur UB if division by zero occurs.
+ /// ints: sdiv is UB for int::MIN / -1.
+ /// floats: fdiv is never UB, but may create NaNs or infinities.
+ pub(crate) fn simd_div<T>(lhs: T, rhs: T) -> T;
+
+ /// urem/srem/frem
+ /// ints and uints: {s,u}rem incur UB if division by zero occurs.
+ /// ints: srem is UB for int::MIN / -1.
+ /// floats: frem is equivalent to libm::fmod in the "default" floating point environment, sans errno.
+ pub(crate) fn simd_rem<T>(lhs: T, rhs: T) -> T;
+
+ /// shl
+ /// for (u)ints. poison if rhs >= lhs::BITS
+ pub(crate) fn simd_shl<T>(lhs: T, rhs: T) -> T;
+
+ /// ints: ashr
+ /// uints: lshr
+ /// poison if rhs >= lhs::BITS
+ pub(crate) fn simd_shr<T>(lhs: T, rhs: T) -> T;
+
+ /// and
+ pub(crate) fn simd_and<T>(x: T, y: T) -> T;
+
+ /// or
+ pub(crate) fn simd_or<T>(x: T, y: T) -> T;
+
+ /// xor
+ pub(crate) fn simd_xor<T>(x: T, y: T) -> T;
+
+ /// getelementptr (without inbounds)
+ pub(crate) fn simd_arith_offset<T, U>(ptrs: T, offsets: U) -> T;
+
+ /// fptoui/fptosi/uitofp/sitofp
+ /// casting floats to integers is truncating, so it is safe to convert values like e.g. 1.5
+ /// but the truncated value must fit in the target type or the result is poison.
+ /// use `simd_as` instead for a cast that performs a saturating conversion.
+ pub(crate) fn simd_cast<T, U>(x: T) -> U;
+ /// follows Rust's `T as U` semantics, including saturating float casts
+ /// which amounts to the same as `simd_cast` for many cases
+ pub(crate) fn simd_as<T, U>(x: T) -> U;
+
+ /// neg/fneg
+ /// ints: ultimately becomes a call to cg_ssa's BuilderMethods::neg. cg_llvm equates this to `simd_sub(Simd::splat(0), x)`.
+ /// floats: LLVM's fneg, which changes the floating point sign bit. Some arches have instructions for it.
+ /// Rust panics for Neg::neg(int::MIN) due to overflow, but it is not UB in LLVM without `nsw`.
+ pub(crate) fn simd_neg<T>(x: T) -> T;
+
+ /// fabs
+ pub(crate) fn simd_fabs<T>(x: T) -> T;
+
+ // minnum/maxnum
+ pub(crate) fn simd_fmin<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_fmax<T>(x: T, y: T) -> T;
+
+ // these return Simd<int, N> with the same BITS size as the inputs
+ pub(crate) fn simd_eq<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_ne<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_lt<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_le<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_gt<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_ge<T, U>(x: T, y: T) -> U;
+
+ // shufflevector
+ // idx: LLVM calls it a "shuffle mask vector constant", a vector of i32s
+ pub(crate) fn simd_shuffle<T, U, V>(x: T, y: T, idx: U) -> V;
+
+ /// llvm.masked.gather
+ /// like a loop of pointer reads
+ /// val: vector of values to select if a lane is masked
+ /// ptr: vector of pointers to read from
+ /// mask: a "wide" mask of integers, selects as if simd_select(mask, read(ptr), val)
+ /// note, the LLVM intrinsic accepts a mask vector of <N x i1>
+ /// FIXME: review this if/when we fix up our mask story in general?
+ pub(crate) fn simd_gather<T, U, V>(val: T, ptr: U, mask: V) -> T;
+ /// llvm.masked.scatter
+ /// like gather, but more spicy, as it writes instead of reads
+ pub(crate) fn simd_scatter<T, U, V>(val: T, ptr: U, mask: V);
+
+ // {s,u}add.sat
+ pub(crate) fn simd_saturating_add<T>(x: T, y: T) -> T;
+
+ // {s,u}sub.sat
+ pub(crate) fn simd_saturating_sub<T>(lhs: T, rhs: T) -> T;
+
+ // reductions
+ // llvm.vector.reduce.{add,fadd}
+ pub(crate) fn simd_reduce_add_ordered<T, U>(x: T, y: U) -> U;
+ // llvm.vector.reduce.{mul,fmul}
+ pub(crate) fn simd_reduce_mul_ordered<T, U>(x: T, y: U) -> U;
+ #[allow(unused)]
+ pub(crate) fn simd_reduce_all<T>(x: T) -> bool;
+ #[allow(unused)]
+ pub(crate) fn simd_reduce_any<T>(x: T) -> bool;
+ pub(crate) fn simd_reduce_max<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_min<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_and<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_or<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_xor<T, U>(x: T) -> U;
+
+ // truncate integer vector to bitmask
+ // `fn simd_bitmask(vector) -> unsigned integer` takes a vector of integers and
+ // returns either an unsigned integer or array of `u8`.
+ // Every element in the vector becomes a single bit in the returned bitmask.
+ // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
+ // The bit order of the result depends on the byte endianness. LSB-first for little
+ // endian and MSB-first for big endian.
+ //
+ // UB if called on a vector with values other than 0 and -1.
+ #[allow(unused)]
+ pub(crate) fn simd_bitmask<T, U>(x: T) -> U;
+
+ // select
+ // first argument is a vector of integers, -1 (all bits 1) is "true"
+ // logically equivalent to (yes & m) | (no & (m^-1),
+ // but you can use it on floats.
+ pub(crate) fn simd_select<M, T>(m: M, yes: T, no: T) -> T;
+ #[allow(unused)]
+ pub(crate) fn simd_select_bitmask<M, T>(m: M, yes: T, no: T) -> T;
+}
diff --git a/library/portable-simd/crates/core_simd/src/iter.rs b/library/portable-simd/crates/core_simd/src/iter.rs
new file mode 100644
index 000000000..3275b4db8
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/iter.rs
@@ -0,0 +1,58 @@
+use crate::simd::{LaneCount, Simd, SupportedLaneCount};
+use core::{
+ iter::{Product, Sum},
+ ops::{Add, Mul},
+};
+
+macro_rules! impl_traits {
+ { $type:ty } => {
+ impl<const LANES: usize> Sum<Self> for Simd<$type, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
+ iter.fold(Simd::splat(0 as $type), Add::add)
+ }
+ }
+
+ impl<const LANES: usize> Product<Self> for Simd<$type, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
+ iter.fold(Simd::splat(1 as $type), Mul::mul)
+ }
+ }
+
+ impl<'a, const LANES: usize> Sum<&'a Self> for Simd<$type, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
+ iter.fold(Simd::splat(0 as $type), Add::add)
+ }
+ }
+
+ impl<'a, const LANES: usize> Product<&'a Self> for Simd<$type, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ fn product<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
+ iter.fold(Simd::splat(1 as $type), Mul::mul)
+ }
+ }
+ }
+}
+
+impl_traits! { f32 }
+impl_traits! { f64 }
+impl_traits! { u8 }
+impl_traits! { u16 }
+impl_traits! { u32 }
+impl_traits! { u64 }
+impl_traits! { usize }
+impl_traits! { i8 }
+impl_traits! { i16 }
+impl_traits! { i32 }
+impl_traits! { i64 }
+impl_traits! { isize }
diff --git a/library/portable-simd/crates/core_simd/src/lane_count.rs b/library/portable-simd/crates/core_simd/src/lane_count.rs
new file mode 100644
index 000000000..63723e2ec
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/lane_count.rs
@@ -0,0 +1,46 @@
+mod sealed {
+ pub trait Sealed {}
+}
+use sealed::Sealed;
+
+/// Specifies the number of lanes in a SIMD vector as a type.
+pub struct LaneCount<const LANES: usize>;
+
+impl<const LANES: usize> LaneCount<LANES> {
+ /// The number of bytes in a bitmask with this many lanes.
+ pub const BITMASK_LEN: usize = (LANES + 7) / 8;
+}
+
+/// Statically guarantees that a lane count is marked as supported.
+///
+/// This trait is *sealed*: the list of implementors below is total.
+/// Users do not have the ability to mark additional `LaneCount<N>` values as supported.
+/// Only SIMD vectors with supported lane counts are constructable.
+pub trait SupportedLaneCount: Sealed {
+ #[doc(hidden)]
+ type BitMask: Copy + Default + AsRef<[u8]> + AsMut<[u8]>;
+}
+
+impl<const LANES: usize> Sealed for LaneCount<LANES> {}
+
+impl SupportedLaneCount for LaneCount<1> {
+ type BitMask = [u8; 1];
+}
+impl SupportedLaneCount for LaneCount<2> {
+ type BitMask = [u8; 1];
+}
+impl SupportedLaneCount for LaneCount<4> {
+ type BitMask = [u8; 1];
+}
+impl SupportedLaneCount for LaneCount<8> {
+ type BitMask = [u8; 1];
+}
+impl SupportedLaneCount for LaneCount<16> {
+ type BitMask = [u8; 2];
+}
+impl SupportedLaneCount for LaneCount<32> {
+ type BitMask = [u8; 4];
+}
+impl SupportedLaneCount for LaneCount<64> {
+ type BitMask = [u8; 8];
+}
diff --git a/library/portable-simd/crates/core_simd/src/lib.rs b/library/portable-simd/crates/core_simd/src/lib.rs
new file mode 100644
index 000000000..715f258f6
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/lib.rs
@@ -0,0 +1,22 @@
+#![no_std]
+#![feature(
+ convert_float_to_int,
+ decl_macro,
+ intra_doc_pointers,
+ platform_intrinsics,
+ repr_simd,
+ simd_ffi,
+ staged_api,
+ stdsimd
+)]
+#![cfg_attr(feature = "generic_const_exprs", feature(generic_const_exprs))]
+#![cfg_attr(feature = "generic_const_exprs", allow(incomplete_features))]
+#![warn(missing_docs)]
+#![deny(unsafe_op_in_unsafe_fn, clippy::undocumented_unsafe_blocks)]
+#![unstable(feature = "portable_simd", issue = "86656")]
+//! Portable SIMD module.
+
+#[path = "mod.rs"]
+mod core_simd;
+pub use self::core_simd::simd;
+pub use simd::*;
diff --git a/library/portable-simd/crates/core_simd/src/masks.rs b/library/portable-simd/crates/core_simd/src/masks.rs
new file mode 100644
index 000000000..c36c336d8
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/masks.rs
@@ -0,0 +1,595 @@
+//! Types and traits associated with masking lanes of vectors.
+//! Types representing
+#![allow(non_camel_case_types)]
+
+#[cfg_attr(
+ not(all(target_arch = "x86_64", target_feature = "avx512f")),
+ path = "masks/full_masks.rs"
+)]
+#[cfg_attr(
+ all(target_arch = "x86_64", target_feature = "avx512f"),
+ path = "masks/bitmask.rs"
+)]
+mod mask_impl;
+
+mod to_bitmask;
+pub use to_bitmask::ToBitMask;
+
+#[cfg(feature = "generic_const_exprs")]
+pub use to_bitmask::{bitmask_len, ToBitMaskArray};
+
+use crate::simd::{intrinsics, LaneCount, Simd, SimdElement, SimdPartialEq, SupportedLaneCount};
+use core::cmp::Ordering;
+use core::{fmt, mem};
+
+mod sealed {
+ use super::*;
+
+ /// Not only does this seal the `MaskElement` trait, but these functions prevent other traits
+ /// from bleeding into the parent bounds.
+ ///
+ /// For example, `eq` could be provided by requiring `MaskElement: PartialEq`, but that would
+ /// prevent us from ever removing that bound, or from implementing `MaskElement` on
+ /// non-`PartialEq` types in the future.
+ pub trait Sealed {
+ fn valid<const LANES: usize>(values: Simd<Self, LANES>) -> bool
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ Self: SimdElement;
+
+ fn eq(self, other: Self) -> bool;
+
+ const TRUE: Self;
+
+ const FALSE: Self;
+ }
+}
+use sealed::Sealed;
+
+/// Marker trait for types that may be used as SIMD mask elements.
+///
+/// # Safety
+/// Type must be a signed integer.
+pub unsafe trait MaskElement: SimdElement + Sealed {}
+
+macro_rules! impl_element {
+ { $ty:ty } => {
+ impl Sealed for $ty {
+ fn valid<const LANES: usize>(value: Simd<Self, LANES>) -> bool
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ (value.simd_eq(Simd::splat(0 as _)) | value.simd_eq(Simd::splat(-1 as _))).all()
+ }
+
+ fn eq(self, other: Self) -> bool { self == other }
+
+ const TRUE: Self = -1;
+ const FALSE: Self = 0;
+ }
+
+ // Safety: this is a valid mask element type
+ unsafe impl MaskElement for $ty {}
+ }
+}
+
+impl_element! { i8 }
+impl_element! { i16 }
+impl_element! { i32 }
+impl_element! { i64 }
+impl_element! { isize }
+
+/// A SIMD vector mask for `LANES` elements of width specified by `Element`.
+///
+/// Masks represent boolean inclusion/exclusion on a per-lane basis.
+///
+/// The layout of this type is unspecified.
+#[repr(transparent)]
+pub struct Mask<T, const LANES: usize>(mask_impl::Mask<T, LANES>)
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount;
+
+impl<T, const LANES: usize> Copy for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+}
+
+impl<T, const LANES: usize> Clone for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T, const LANES: usize> Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ /// Construct a mask by setting all lanes to the given value.
+ pub fn splat(value: bool) -> Self {
+ Self(mask_impl::Mask::splat(value))
+ }
+
+ /// Converts an array of bools to a SIMD mask.
+ pub fn from_array(array: [bool; LANES]) -> Self {
+ // SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of
+ // true: 0b_0000_0001
+ // false: 0b_0000_0000
+ // Thus, an array of bools is also a valid array of bytes: [u8; N]
+ // This would be hypothetically valid as an "in-place" transmute,
+ // but these are "dependently-sized" types, so copy elision it is!
+ unsafe {
+ let bytes: [u8; LANES] = mem::transmute_copy(&array);
+ let bools: Simd<i8, LANES> =
+ intrinsics::simd_ne(Simd::from_array(bytes), Simd::splat(0u8));
+ Mask::from_int_unchecked(intrinsics::simd_cast(bools))
+ }
+ }
+
+ /// Converts a SIMD mask to an array of bools.
+ pub fn to_array(self) -> [bool; LANES] {
+ // This follows mostly the same logic as from_array.
+ // SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of
+ // true: 0b_0000_0001
+ // false: 0b_0000_0000
+ // Thus, an array of bools is also a valid array of bytes: [u8; N]
+ // Since our masks are equal to integers where all bits are set,
+ // we can simply convert them to i8s, and then bitand them by the
+ // bitpattern for Rust's "true" bool.
+ // This would be hypothetically valid as an "in-place" transmute,
+ // but these are "dependently-sized" types, so copy elision it is!
+ unsafe {
+ let mut bytes: Simd<i8, LANES> = intrinsics::simd_cast(self.to_int());
+ bytes &= Simd::splat(1i8);
+ mem::transmute_copy(&bytes)
+ }
+ }
+
+ /// Converts a vector of integers to a mask, where 0 represents `false` and -1
+ /// represents `true`.
+ ///
+ /// # Safety
+ /// All lanes must be either 0 or -1.
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub unsafe fn from_int_unchecked(value: Simd<T, LANES>) -> Self {
+ // Safety: the caller must confirm this invariant
+ unsafe { Self(mask_impl::Mask::from_int_unchecked(value)) }
+ }
+
+ /// Converts a vector of integers to a mask, where 0 represents `false` and -1
+ /// represents `true`.
+ ///
+ /// # Panics
+ /// Panics if any lane is not 0 or -1.
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn from_int(value: Simd<T, LANES>) -> Self {
+ assert!(T::valid(value), "all values must be either 0 or -1",);
+ // Safety: the validity has been checked
+ unsafe { Self::from_int_unchecked(value) }
+ }
+
+ /// Converts the mask to a vector of integers, where 0 represents `false` and -1
+ /// represents `true`.
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ pub fn to_int(self) -> Simd<T, LANES> {
+ self.0.to_int()
+ }
+
+ /// Converts the mask to a mask of any other lane size.
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn cast<U: MaskElement>(self) -> Mask<U, LANES> {
+ Mask(self.0.convert())
+ }
+
+ /// Tests the value of the specified lane.
+ ///
+ /// # Safety
+ /// `lane` must be less than `LANES`.
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
+ // Safety: the caller must confirm this invariant
+ unsafe { self.0.test_unchecked(lane) }
+ }
+
+ /// Tests the value of the specified lane.
+ ///
+ /// # Panics
+ /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ pub fn test(&self, lane: usize) -> bool {
+ assert!(lane < LANES, "lane index out of range");
+ // Safety: the lane index has been checked
+ unsafe { self.test_unchecked(lane) }
+ }
+
+ /// Sets the value of the specified lane.
+ ///
+ /// # Safety
+ /// `lane` must be less than `LANES`.
+ #[inline]
+ pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
+ // Safety: the caller must confirm this invariant
+ unsafe {
+ self.0.set_unchecked(lane, value);
+ }
+ }
+
+ /// Sets the value of the specified lane.
+ ///
+ /// # Panics
+ /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ #[inline]
+ pub fn set(&mut self, lane: usize, value: bool) {
+ assert!(lane < LANES, "lane index out of range");
+ // Safety: the lane index has been checked
+ unsafe {
+ self.set_unchecked(lane, value);
+ }
+ }
+
+ /// Returns true if any lane is set, or false otherwise.
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ pub fn any(self) -> bool {
+ self.0.any()
+ }
+
+ /// Returns true if all lanes are set, or false otherwise.
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ pub fn all(self) -> bool {
+ self.0.all()
+ }
+}
+
+// vector/array conversion
+impl<T, const LANES: usize> From<[bool; LANES]> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn from(array: [bool; LANES]) -> Self {
+ Self::from_array(array)
+ }
+}
+
+impl<T, const LANES: usize> From<Mask<T, LANES>> for [bool; LANES]
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn from(vector: Mask<T, LANES>) -> Self {
+ vector.to_array()
+ }
+}
+
+impl<T, const LANES: usize> Default for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ #[must_use = "method returns a defaulted mask with all lanes set to false (0)"]
+ fn default() -> Self {
+ Self::splat(false)
+ }
+}
+
+impl<T, const LANES: usize> PartialEq for Mask<T, LANES>
+where
+ T: MaskElement + PartialEq,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+}
+
+impl<T, const LANES: usize> PartialOrd for Mask<T, LANES>
+where
+ T: MaskElement + PartialOrd,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ #[must_use = "method returns a new Ordering and does not mutate the original value"]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.0.partial_cmp(&other.0)
+ }
+}
+
+impl<T, const LANES: usize> fmt::Debug for Mask<T, LANES>
+where
+ T: MaskElement + fmt::Debug,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list()
+ .entries((0..LANES).map(|lane| self.test(lane)))
+ .finish()
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitAnd for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitand(self, rhs: Self) -> Self {
+ Self(self.0 & rhs.0)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitAnd<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitand(self, rhs: bool) -> Self {
+ self & Self::splat(rhs)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitAnd<Mask<T, LANES>> for bool
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Mask<T, LANES>;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitand(self, rhs: Mask<T, LANES>) -> Mask<T, LANES> {
+ Mask::splat(self) & rhs
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitOr for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitor(self, rhs: Self) -> Self {
+ Self(self.0 | rhs.0)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitOr<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitor(self, rhs: bool) -> Self {
+ self | Self::splat(rhs)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitOr<Mask<T, LANES>> for bool
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Mask<T, LANES>;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitor(self, rhs: Mask<T, LANES>) -> Mask<T, LANES> {
+ Mask::splat(self) | rhs
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXor for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitxor(self, rhs: Self) -> Self::Output {
+ Self(self.0 ^ rhs.0)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXor<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitxor(self, rhs: bool) -> Self::Output {
+ self ^ Self::splat(rhs)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXor<Mask<T, LANES>> for bool
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Mask<T, LANES>;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitxor(self, rhs: Mask<T, LANES>) -> Self::Output {
+ Mask::splat(self) ^ rhs
+ }
+}
+
+impl<T, const LANES: usize> core::ops::Not for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Mask<T, LANES>;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn not(self) -> Self::Output {
+ Self(!self.0)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitAndAssign for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: Self) {
+ self.0 = self.0 & rhs.0;
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitAndAssign<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: bool) {
+ *self &= Self::splat(rhs);
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitOrAssign for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Self) {
+ self.0 = self.0 | rhs.0;
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitOrAssign<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: bool) {
+ *self |= Self::splat(rhs);
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXorAssign for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: Self) {
+ self.0 = self.0 ^ rhs.0;
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXorAssign<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: bool) {
+ *self ^= Self::splat(rhs);
+ }
+}
+
+/// A mask for SIMD vectors with eight elements of 8 bits.
+pub type mask8x8 = Mask<i8, 8>;
+
+/// A mask for SIMD vectors with 16 elements of 8 bits.
+pub type mask8x16 = Mask<i8, 16>;
+
+/// A mask for SIMD vectors with 32 elements of 8 bits.
+pub type mask8x32 = Mask<i8, 32>;
+
+/// A mask for SIMD vectors with 64 elements of 8 bits.
+pub type mask8x64 = Mask<i8, 64>;
+
+/// A mask for SIMD vectors with four elements of 16 bits.
+pub type mask16x4 = Mask<i16, 4>;
+
+/// A mask for SIMD vectors with eight elements of 16 bits.
+pub type mask16x8 = Mask<i16, 8>;
+
+/// A mask for SIMD vectors with 16 elements of 16 bits.
+pub type mask16x16 = Mask<i16, 16>;
+
+/// A mask for SIMD vectors with 32 elements of 16 bits.
+pub type mask16x32 = Mask<i16, 32>;
+
+/// A mask for SIMD vectors with two elements of 32 bits.
+pub type mask32x2 = Mask<i32, 2>;
+
+/// A mask for SIMD vectors with four elements of 32 bits.
+pub type mask32x4 = Mask<i32, 4>;
+
+/// A mask for SIMD vectors with eight elements of 32 bits.
+pub type mask32x8 = Mask<i32, 8>;
+
+/// A mask for SIMD vectors with 16 elements of 32 bits.
+pub type mask32x16 = Mask<i32, 16>;
+
+/// A mask for SIMD vectors with two elements of 64 bits.
+pub type mask64x2 = Mask<i64, 2>;
+
+/// A mask for SIMD vectors with four elements of 64 bits.
+pub type mask64x4 = Mask<i64, 4>;
+
+/// A mask for SIMD vectors with eight elements of 64 bits.
+pub type mask64x8 = Mask<i64, 8>;
+
+/// A mask for SIMD vectors with two elements of pointer width.
+pub type masksizex2 = Mask<isize, 2>;
+
+/// A mask for SIMD vectors with four elements of pointer width.
+pub type masksizex4 = Mask<isize, 4>;
+
+/// A mask for SIMD vectors with eight elements of pointer width.
+pub type masksizex8 = Mask<isize, 8>;
+
+macro_rules! impl_from {
+ { $from:ty => $($to:ty),* } => {
+ $(
+ impl<const LANES: usize> From<Mask<$from, LANES>> for Mask<$to, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ fn from(value: Mask<$from, LANES>) -> Self {
+ value.cast()
+ }
+ }
+ )*
+ }
+}
+impl_from! { i8 => i16, i32, i64, isize }
+impl_from! { i16 => i32, i64, isize, i8 }
+impl_from! { i32 => i64, isize, i8, i16 }
+impl_from! { i64 => isize, i8, i16, i32 }
+impl_from! { isize => i8, i16, i32, i64 }
diff --git a/library/portable-simd/crates/core_simd/src/masks/bitmask.rs b/library/portable-simd/crates/core_simd/src/masks/bitmask.rs
new file mode 100644
index 000000000..365ecc0a3
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/masks/bitmask.rs
@@ -0,0 +1,246 @@
+#![allow(unused_imports)]
+use super::MaskElement;
+use crate::simd::intrinsics;
+use crate::simd::{LaneCount, Simd, SupportedLaneCount, ToBitMask};
+use core::marker::PhantomData;
+
+/// A mask where each lane is represented by a single bit.
+#[repr(transparent)]
+pub struct Mask<T, const LANES: usize>(
+ <LaneCount<LANES> as SupportedLaneCount>::BitMask,
+ PhantomData<T>,
+)
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount;
+
+impl<T, const LANES: usize> Copy for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+}
+
+impl<T, const LANES: usize> Clone for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T, const LANES: usize> PartialEq for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn eq(&self, other: &Self) -> bool {
+ self.0.as_ref() == other.0.as_ref()
+ }
+}
+
+impl<T, const LANES: usize> PartialOrd for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
+ self.0.as_ref().partial_cmp(other.0.as_ref())
+ }
+}
+
+impl<T, const LANES: usize> Eq for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+}
+
+impl<T, const LANES: usize> Ord for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn cmp(&self, other: &Self) -> core::cmp::Ordering {
+ self.0.as_ref().cmp(other.0.as_ref())
+ }
+}
+
+impl<T, const LANES: usize> Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn splat(value: bool) -> Self {
+ let mut mask = <LaneCount<LANES> as SupportedLaneCount>::BitMask::default();
+ if value {
+ mask.as_mut().fill(u8::MAX)
+ } else {
+ mask.as_mut().fill(u8::MIN)
+ }
+ if LANES % 8 > 0 {
+ *mask.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - LANES % 8);
+ }
+ Self(mask, PhantomData)
+ }
+
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
+ (self.0.as_ref()[lane / 8] >> (lane % 8)) & 0x1 > 0
+ }
+
+ #[inline]
+ pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
+ unsafe {
+ self.0.as_mut()[lane / 8] ^= ((value ^ self.test_unchecked(lane)) as u8) << (lane % 8)
+ }
+ }
+
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ pub fn to_int(self) -> Simd<T, LANES> {
+ unsafe {
+ intrinsics::simd_select_bitmask(self.0, Simd::splat(T::TRUE), Simd::splat(T::FALSE))
+ }
+ }
+
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub unsafe fn from_int_unchecked(value: Simd<T, LANES>) -> Self {
+ unsafe { Self(intrinsics::simd_bitmask(value), PhantomData) }
+ }
+
+ #[cfg(feature = "generic_const_exprs")]
+ #[inline]
+ #[must_use = "method returns a new array and does not mutate the original value"]
+ pub fn to_bitmask_array<const N: usize>(self) -> [u8; N] {
+ assert!(core::mem::size_of::<Self>() == N);
+
+ // Safety: converting an integer to an array of bytes of the same size is safe
+ unsafe { core::mem::transmute_copy(&self.0) }
+ }
+
+ #[cfg(feature = "generic_const_exprs")]
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn from_bitmask_array<const N: usize>(bitmask: [u8; N]) -> Self {
+ assert!(core::mem::size_of::<Self>() == N);
+
+ // Safety: converting an array of bytes to an integer of the same size is safe
+ Self(unsafe { core::mem::transmute_copy(&bitmask) }, PhantomData)
+ }
+
+ #[inline]
+ pub fn to_bitmask_integer<U>(self) -> U
+ where
+ super::Mask<T, LANES>: ToBitMask<BitMask = U>,
+ {
+ // Safety: these are the same types
+ unsafe { core::mem::transmute_copy(&self.0) }
+ }
+
+ #[inline]
+ pub fn from_bitmask_integer<U>(bitmask: U) -> Self
+ where
+ super::Mask<T, LANES>: ToBitMask<BitMask = U>,
+ {
+ // Safety: these are the same types
+ unsafe { Self(core::mem::transmute_copy(&bitmask), PhantomData) }
+ }
+
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn convert<U>(self) -> Mask<U, LANES>
+ where
+ U: MaskElement,
+ {
+ // Safety: bitmask layout does not depend on the element width
+ unsafe { core::mem::transmute_copy(&self) }
+ }
+
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ pub fn any(self) -> bool {
+ self != Self::splat(false)
+ }
+
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ pub fn all(self) -> bool {
+ self == Self::splat(true)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitAnd for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+ <LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitand(mut self, rhs: Self) -> Self {
+ for (l, r) in self.0.as_mut().iter_mut().zip(rhs.0.as_ref().iter()) {
+ *l &= r;
+ }
+ self
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitOr for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+ <LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitor(mut self, rhs: Self) -> Self {
+ for (l, r) in self.0.as_mut().iter_mut().zip(rhs.0.as_ref().iter()) {
+ *l |= r;
+ }
+ self
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXor for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitxor(mut self, rhs: Self) -> Self::Output {
+ for (l, r) in self.0.as_mut().iter_mut().zip(rhs.0.as_ref().iter()) {
+ *l ^= r;
+ }
+ self
+ }
+}
+
+impl<T, const LANES: usize> core::ops::Not for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn not(mut self) -> Self::Output {
+ for x in self.0.as_mut() {
+ *x = !*x;
+ }
+ if LANES % 8 > 0 {
+ *self.0.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - LANES % 8);
+ }
+ self
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/masks/full_masks.rs b/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
new file mode 100644
index 000000000..adf0fcbea
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
@@ -0,0 +1,323 @@
+//! Masks that take up full SIMD vector registers.
+
+use super::MaskElement;
+use crate::simd::intrinsics;
+use crate::simd::{LaneCount, Simd, SupportedLaneCount, ToBitMask};
+
+#[cfg(feature = "generic_const_exprs")]
+use crate::simd::ToBitMaskArray;
+
+#[repr(transparent)]
+pub struct Mask<T, const LANES: usize>(Simd<T, LANES>)
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount;
+
+impl<T, const LANES: usize> Copy for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+}
+
+impl<T, const LANES: usize> Clone for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T, const LANES: usize> PartialEq for Mask<T, LANES>
+where
+ T: MaskElement + PartialEq,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn eq(&self, other: &Self) -> bool {
+ self.0.eq(&other.0)
+ }
+}
+
+impl<T, const LANES: usize> PartialOrd for Mask<T, LANES>
+where
+ T: MaskElement + PartialOrd,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
+ self.0.partial_cmp(&other.0)
+ }
+}
+
+impl<T, const LANES: usize> Eq for Mask<T, LANES>
+where
+ T: MaskElement + Eq,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+}
+
+impl<T, const LANES: usize> Ord for Mask<T, LANES>
+where
+ T: MaskElement + Ord,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn cmp(&self, other: &Self) -> core::cmp::Ordering {
+ self.0.cmp(&other.0)
+ }
+}
+
+// Used for bitmask bit order workaround
+pub(crate) trait ReverseBits {
+ // Reverse the least significant `n` bits of `self`.
+ // (Remaining bits must be 0.)
+ fn reverse_bits(self, n: usize) -> Self;
+}
+
+macro_rules! impl_reverse_bits {
+ { $($int:ty),* } => {
+ $(
+ impl ReverseBits for $int {
+ #[inline(always)]
+ fn reverse_bits(self, n: usize) -> Self {
+ let rev = <$int>::reverse_bits(self);
+ let bitsize = core::mem::size_of::<$int>() * 8;
+ if n < bitsize {
+ // Shift things back to the right
+ rev >> (bitsize - n)
+ } else {
+ rev
+ }
+ }
+ }
+ )*
+ }
+}
+
+impl_reverse_bits! { u8, u16, u32, u64 }
+
+impl<T, const LANES: usize> Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn splat(value: bool) -> Self {
+ Self(Simd::splat(if value { T::TRUE } else { T::FALSE }))
+ }
+
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
+ T::eq(self.0[lane], T::TRUE)
+ }
+
+ #[inline]
+ pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
+ self.0[lane] = if value { T::TRUE } else { T::FALSE }
+ }
+
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ pub fn to_int(self) -> Simd<T, LANES> {
+ self.0
+ }
+
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub unsafe fn from_int_unchecked(value: Simd<T, LANES>) -> Self {
+ Self(value)
+ }
+
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn convert<U>(self) -> Mask<U, LANES>
+ where
+ U: MaskElement,
+ {
+ // Safety: masks are simply integer vectors of 0 and -1, and we can cast the element type.
+ unsafe { Mask(intrinsics::simd_cast(self.0)) }
+ }
+
+ #[cfg(feature = "generic_const_exprs")]
+ #[inline]
+ #[must_use = "method returns a new array and does not mutate the original value"]
+ pub fn to_bitmask_array<const N: usize>(self) -> [u8; N]
+ where
+ super::Mask<T, LANES>: ToBitMaskArray,
+ [(); <super::Mask<T, LANES> as ToBitMaskArray>::BYTES]: Sized,
+ {
+ assert_eq!(<super::Mask<T, LANES> as ToBitMaskArray>::BYTES, N);
+
+ // Safety: N is the correct bitmask size
+ unsafe {
+ // Compute the bitmask
+ let bitmask: [u8; <super::Mask<T, LANES> as ToBitMaskArray>::BYTES] =
+ intrinsics::simd_bitmask(self.0);
+
+ // Transmute to the return type, previously asserted to be the same size
+ let mut bitmask: [u8; N] = core::mem::transmute_copy(&bitmask);
+
+ // LLVM assumes bit order should match endianness
+ if cfg!(target_endian = "big") {
+ for x in bitmask.as_mut() {
+ *x = x.reverse_bits();
+ }
+ };
+
+ bitmask
+ }
+ }
+
+ #[cfg(feature = "generic_const_exprs")]
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn from_bitmask_array<const N: usize>(mut bitmask: [u8; N]) -> Self
+ where
+ super::Mask<T, LANES>: ToBitMaskArray,
+ [(); <super::Mask<T, LANES> as ToBitMaskArray>::BYTES]: Sized,
+ {
+ assert_eq!(<super::Mask<T, LANES> as ToBitMaskArray>::BYTES, N);
+
+ // Safety: N is the correct bitmask size
+ unsafe {
+ // LLVM assumes bit order should match endianness
+ if cfg!(target_endian = "big") {
+ for x in bitmask.as_mut() {
+ *x = x.reverse_bits();
+ }
+ }
+
+ // Transmute to the bitmask type, previously asserted to be the same size
+ let bitmask: [u8; <super::Mask<T, LANES> as ToBitMaskArray>::BYTES] =
+ core::mem::transmute_copy(&bitmask);
+
+ // Compute the regular mask
+ Self::from_int_unchecked(intrinsics::simd_select_bitmask(
+ bitmask,
+ Self::splat(true).to_int(),
+ Self::splat(false).to_int(),
+ ))
+ }
+ }
+
+ #[inline]
+ pub(crate) fn to_bitmask_integer<U: ReverseBits>(self) -> U
+ where
+ super::Mask<T, LANES>: ToBitMask<BitMask = U>,
+ {
+ // Safety: U is required to be the appropriate bitmask type
+ let bitmask: U = unsafe { intrinsics::simd_bitmask(self.0) };
+
+ // LLVM assumes bit order should match endianness
+ if cfg!(target_endian = "big") {
+ bitmask.reverse_bits(LANES)
+ } else {
+ bitmask
+ }
+ }
+
+ #[inline]
+ pub(crate) fn from_bitmask_integer<U: ReverseBits>(bitmask: U) -> Self
+ where
+ super::Mask<T, LANES>: ToBitMask<BitMask = U>,
+ {
+ // LLVM assumes bit order should match endianness
+ let bitmask = if cfg!(target_endian = "big") {
+ bitmask.reverse_bits(LANES)
+ } else {
+ bitmask
+ };
+
+ // Safety: U is required to be the appropriate bitmask type
+ unsafe {
+ Self::from_int_unchecked(intrinsics::simd_select_bitmask(
+ bitmask,
+ Self::splat(true).to_int(),
+ Self::splat(false).to_int(),
+ ))
+ }
+ }
+
+ #[inline]
+ #[must_use = "method returns a new bool and does not mutate the original value"]
+ pub fn any(self) -> bool {
+ // Safety: use `self` as an integer vector
+ unsafe { intrinsics::simd_reduce_any(self.to_int()) }
+ }
+
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ pub fn all(self) -> bool {
+ // Safety: use `self` as an integer vector
+ unsafe { intrinsics::simd_reduce_all(self.to_int()) }
+ }
+}
+
+impl<T, const LANES: usize> core::convert::From<Mask<T, LANES>> for Simd<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn from(value: Mask<T, LANES>) -> Self {
+ value.0
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitAnd for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitand(self, rhs: Self) -> Self {
+ // Safety: `self` is an integer vector
+ unsafe { Self(intrinsics::simd_and(self.0, rhs.0)) }
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitOr for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitor(self, rhs: Self) -> Self {
+ // Safety: `self` is an integer vector
+ unsafe { Self(intrinsics::simd_or(self.0, rhs.0)) }
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXor for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn bitxor(self, rhs: Self) -> Self {
+ // Safety: `self` is an integer vector
+ unsafe { Self(intrinsics::simd_xor(self.0, rhs.0)) }
+ }
+}
+
+impl<T, const LANES: usize> core::ops::Not for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn not(self) -> Self::Output {
+ Self::splat(true) ^ self
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs b/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
new file mode 100644
index 000000000..65d3ce9be
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
@@ -0,0 +1,93 @@
+use super::{mask_impl, Mask, MaskElement};
+use crate::simd::{LaneCount, SupportedLaneCount};
+
+mod sealed {
+ pub trait Sealed {}
+}
+pub use sealed::Sealed;
+
+impl<T, const LANES: usize> Sealed for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+}
+
+/// Converts masks to and from integer bitmasks.
+///
+/// Each bit of the bitmask corresponds to a mask lane, starting with the LSB.
+pub trait ToBitMask: Sealed {
+ /// The integer bitmask type.
+ type BitMask;
+
+ /// Converts a mask to a bitmask.
+ fn to_bitmask(self) -> Self::BitMask;
+
+ /// Converts a bitmask to a mask.
+ fn from_bitmask(bitmask: Self::BitMask) -> Self;
+}
+
+/// Converts masks to and from byte array bitmasks.
+///
+/// Each bit of the bitmask corresponds to a mask lane, starting with the LSB of the first byte.
+#[cfg(feature = "generic_const_exprs")]
+pub trait ToBitMaskArray: Sealed {
+ /// The length of the bitmask array.
+ const BYTES: usize;
+
+ /// Converts a mask to a bitmask.
+ fn to_bitmask_array(self) -> [u8; Self::BYTES];
+
+ /// Converts a bitmask to a mask.
+ fn from_bitmask_array(bitmask: [u8; Self::BYTES]) -> Self;
+}
+
+macro_rules! impl_integer_intrinsic {
+ { $(impl ToBitMask<BitMask=$int:ty> for Mask<_, $lanes:literal>)* } => {
+ $(
+ impl<T: MaskElement> ToBitMask for Mask<T, $lanes> {
+ type BitMask = $int;
+
+ fn to_bitmask(self) -> $int {
+ self.0.to_bitmask_integer()
+ }
+
+ fn from_bitmask(bitmask: $int) -> Self {
+ Self(mask_impl::Mask::from_bitmask_integer(bitmask))
+ }
+ }
+ )*
+ }
+}
+
+impl_integer_intrinsic! {
+ impl ToBitMask<BitMask=u8> for Mask<_, 1>
+ impl ToBitMask<BitMask=u8> for Mask<_, 2>
+ impl ToBitMask<BitMask=u8> for Mask<_, 4>
+ impl ToBitMask<BitMask=u8> for Mask<_, 8>
+ impl ToBitMask<BitMask=u16> for Mask<_, 16>
+ impl ToBitMask<BitMask=u32> for Mask<_, 32>
+ impl ToBitMask<BitMask=u64> for Mask<_, 64>
+}
+
+/// Returns the minimum numnber of bytes in a bitmask with `lanes` lanes.
+#[cfg(feature = "generic_const_exprs")]
+pub const fn bitmask_len(lanes: usize) -> usize {
+ (lanes + 7) / 8
+}
+
+#[cfg(feature = "generic_const_exprs")]
+impl<T: MaskElement, const LANES: usize> ToBitMaskArray for Mask<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ const BYTES: usize = bitmask_len(LANES);
+
+ fn to_bitmask_array(self) -> [u8; Self::BYTES] {
+ self.0.to_bitmask_array()
+ }
+
+ fn from_bitmask_array(bitmask: [u8; Self::BYTES]) -> Self {
+ Mask(mask_impl::Mask::from_bitmask_array(bitmask))
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/mod.rs b/library/portable-simd/crates/core_simd/src/mod.rs
new file mode 100644
index 000000000..b472aa3ab
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/mod.rs
@@ -0,0 +1,32 @@
+#[macro_use]
+mod swizzle;
+
+pub(crate) mod intrinsics;
+
+#[cfg(feature = "generic_const_exprs")]
+mod to_bytes;
+
+mod elements;
+mod eq;
+mod fmt;
+mod iter;
+mod lane_count;
+mod masks;
+mod ops;
+mod ord;
+mod select;
+mod vector;
+mod vendor;
+
+#[doc = include_str!("core_simd_docs.md")]
+pub mod simd {
+ pub(crate) use crate::core_simd::intrinsics;
+
+ pub use crate::core_simd::elements::*;
+ pub use crate::core_simd::eq::*;
+ pub use crate::core_simd::lane_count::{LaneCount, SupportedLaneCount};
+ pub use crate::core_simd::masks::*;
+ pub use crate::core_simd::ord::*;
+ pub use crate::core_simd::swizzle::*;
+ pub use crate::core_simd::vector::*;
+}
diff --git a/library/portable-simd/crates/core_simd/src/ops.rs b/library/portable-simd/crates/core_simd/src/ops.rs
new file mode 100644
index 000000000..5a077a469
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/ops.rs
@@ -0,0 +1,254 @@
+use crate::simd::{LaneCount, Simd, SimdElement, SimdPartialEq, SupportedLaneCount};
+use core::ops::{Add, Mul};
+use core::ops::{BitAnd, BitOr, BitXor};
+use core::ops::{Div, Rem, Sub};
+use core::ops::{Shl, Shr};
+
+mod assign;
+mod deref;
+mod unary;
+
+impl<I, T, const LANES: usize> core::ops::Index<I> for Simd<T, LANES>
+where
+ T: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+ I: core::slice::SliceIndex<[T]>,
+{
+ type Output = I::Output;
+ fn index(&self, index: I) -> &Self::Output {
+ &self.as_array()[index]
+ }
+}
+
+impl<I, T, const LANES: usize> core::ops::IndexMut<I> for Simd<T, LANES>
+where
+ T: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+ I: core::slice::SliceIndex<[T]>,
+{
+ fn index_mut(&mut self, index: I) -> &mut Self::Output {
+ &mut self.as_mut_array()[index]
+ }
+}
+
+macro_rules! unsafe_base {
+ ($lhs:ident, $rhs:ident, {$simd_call:ident}, $($_:tt)*) => {
+ // Safety: $lhs and $rhs are vectors
+ unsafe { $crate::simd::intrinsics::$simd_call($lhs, $rhs) }
+ };
+}
+
+/// SAFETY: This macro should not be used for anything except Shl or Shr, and passed the appropriate shift intrinsic.
+/// It handles performing a bitand in addition to calling the shift operator, so that the result
+/// is well-defined: LLVM can return a poison value if you shl, lshr, or ashr if rhs >= <Int>::BITS
+/// At worst, this will maybe add another instruction and cycle,
+/// at best, it may open up more optimization opportunities,
+/// or simply be elided entirely, especially for SIMD ISAs which default to this.
+///
+// FIXME: Consider implementing this in cg_llvm instead?
+// cg_clif defaults to this, and scalar MIR shifts also default to wrapping
+macro_rules! wrap_bitshift {
+ ($lhs:ident, $rhs:ident, {$simd_call:ident}, $int:ident) => {
+ #[allow(clippy::suspicious_arithmetic_impl)]
+ // Safety: $lhs and the bitand result are vectors
+ unsafe {
+ $crate::simd::intrinsics::$simd_call(
+ $lhs,
+ $rhs.bitand(Simd::splat(<$int>::BITS as $int - 1)),
+ )
+ }
+ };
+}
+
+/// SAFETY: This macro must only be used to impl Div or Rem and given the matching intrinsic.
+/// It guards against LLVM's UB conditions for integer div or rem using masks and selects,
+/// thus guaranteeing a Rust value returns instead.
+///
+/// | | LLVM | Rust
+/// | :--------------: | :--- | :----------
+/// | N {/,%} 0 | UB | panic!()
+/// | <$int>::MIN / -1 | UB | <$int>::MIN
+/// | <$int>::MIN % -1 | UB | 0
+///
+macro_rules! int_divrem_guard {
+ ( $lhs:ident,
+ $rhs:ident,
+ { const PANIC_ZERO: &'static str = $zero:literal;
+ $simd_call:ident
+ },
+ $int:ident ) => {
+ if $rhs.simd_eq(Simd::splat(0 as _)).any() {
+ panic!($zero);
+ } else {
+ // Prevent otherwise-UB overflow on the MIN / -1 case.
+ let rhs = if <$int>::MIN != 0 {
+ // This should, at worst, optimize to a few branchless logical ops
+ // Ideally, this entire conditional should evaporate
+ // Fire LLVM and implement those manually if it doesn't get the hint
+ ($lhs.simd_eq(Simd::splat(<$int>::MIN))
+ // type inference can break here, so cut an SInt to size
+ & $rhs.simd_eq(Simd::splat(-1i64 as _)))
+ .select(Simd::splat(1 as _), $rhs)
+ } else {
+ // Nice base case to make it easy to const-fold away the other branch.
+ $rhs
+ };
+ // Safety: $lhs and rhs are vectors
+ unsafe { $crate::simd::intrinsics::$simd_call($lhs, rhs) }
+ }
+ };
+}
+
+macro_rules! for_base_types {
+ ( T = ($($scalar:ident),*);
+ type Lhs = Simd<T, N>;
+ type Rhs = Simd<T, N>;
+ type Output = $out:ty;
+
+ impl $op:ident::$call:ident {
+ $macro_impl:ident $inner:tt
+ }) => {
+ $(
+ impl<const N: usize> $op<Self> for Simd<$scalar, N>
+ where
+ $scalar: SimdElement,
+ LaneCount<N>: SupportedLaneCount,
+ {
+ type Output = $out;
+
+ #[inline]
+ #[must_use = "operator returns a new vector without mutating the inputs"]
+ fn $call(self, rhs: Self) -> Self::Output {
+ $macro_impl!(self, rhs, $inner, $scalar)
+ }
+ })*
+ }
+}
+
+// A "TokenTree muncher": takes a set of scalar types `T = {};`
+// type parameters for the ops it implements, `Op::fn` names,
+// and a macro that expands into an expr, substituting in an intrinsic.
+// It passes that to for_base_types, which expands an impl for the types,
+// using the expanded expr in the function, and recurses with itself.
+//
+// tl;dr impls a set of ops::{Traits} for a set of types
+macro_rules! for_base_ops {
+ (
+ T = $types:tt;
+ type Lhs = Simd<T, N>;
+ type Rhs = Simd<T, N>;
+ type Output = $out:ident;
+ impl $op:ident::$call:ident
+ $inner:tt
+ $($rest:tt)*
+ ) => {
+ for_base_types! {
+ T = $types;
+ type Lhs = Simd<T, N>;
+ type Rhs = Simd<T, N>;
+ type Output = $out;
+ impl $op::$call
+ $inner
+ }
+ for_base_ops! {
+ T = $types;
+ type Lhs = Simd<T, N>;
+ type Rhs = Simd<T, N>;
+ type Output = $out;
+ $($rest)*
+ }
+ };
+ ($($done:tt)*) => {
+ // Done.
+ }
+}
+
+// Integers can always accept add, mul, sub, bitand, bitor, and bitxor.
+// For all of these operations, simd_* intrinsics apply wrapping logic.
+for_base_ops! {
+ T = (i8, i16, i32, i64, isize, u8, u16, u32, u64, usize);
+ type Lhs = Simd<T, N>;
+ type Rhs = Simd<T, N>;
+ type Output = Self;
+
+ impl Add::add {
+ unsafe_base { simd_add }
+ }
+
+ impl Mul::mul {
+ unsafe_base { simd_mul }
+ }
+
+ impl Sub::sub {
+ unsafe_base { simd_sub }
+ }
+
+ impl BitAnd::bitand {
+ unsafe_base { simd_and }
+ }
+
+ impl BitOr::bitor {
+ unsafe_base { simd_or }
+ }
+
+ impl BitXor::bitxor {
+ unsafe_base { simd_xor }
+ }
+
+ impl Div::div {
+ int_divrem_guard {
+ const PANIC_ZERO: &'static str = "attempt to divide by zero";
+ simd_div
+ }
+ }
+
+ impl Rem::rem {
+ int_divrem_guard {
+ const PANIC_ZERO: &'static str = "attempt to calculate the remainder with a divisor of zero";
+ simd_rem
+ }
+ }
+
+ // The only question is how to handle shifts >= <Int>::BITS?
+ // Our current solution uses wrapping logic.
+ impl Shl::shl {
+ wrap_bitshift { simd_shl }
+ }
+
+ impl Shr::shr {
+ wrap_bitshift {
+ // This automatically monomorphizes to lshr or ashr, depending,
+ // so it's fine to use it for both UInts and SInts.
+ simd_shr
+ }
+ }
+}
+
+// We don't need any special precautions here:
+// Floats always accept arithmetic ops, but may become NaN.
+for_base_ops! {
+ T = (f32, f64);
+ type Lhs = Simd<T, N>;
+ type Rhs = Simd<T, N>;
+ type Output = Self;
+
+ impl Add::add {
+ unsafe_base { simd_add }
+ }
+
+ impl Mul::mul {
+ unsafe_base { simd_mul }
+ }
+
+ impl Sub::sub {
+ unsafe_base { simd_sub }
+ }
+
+ impl Div::div {
+ unsafe_base { simd_div }
+ }
+
+ impl Rem::rem {
+ unsafe_base { simd_rem }
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/ops/assign.rs b/library/portable-simd/crates/core_simd/src/ops/assign.rs
new file mode 100644
index 000000000..d2b48614f
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/ops/assign.rs
@@ -0,0 +1,124 @@
+//! Assignment operators
+use super::*;
+use core::ops::{AddAssign, MulAssign}; // commutative binary op-assignment
+use core::ops::{BitAndAssign, BitOrAssign, BitXorAssign}; // commutative bit binary op-assignment
+use core::ops::{DivAssign, RemAssign, SubAssign}; // non-commutative binary op-assignment
+use core::ops::{ShlAssign, ShrAssign}; // non-commutative bit binary op-assignment
+
+// Arithmetic
+
+macro_rules! assign_ops {
+ ($(impl<T, U, const LANES: usize> $assignTrait:ident<U> for Simd<T, LANES>
+ where
+ Self: $trait:ident,
+ {
+ fn $assign_call:ident(rhs: U) {
+ $call:ident
+ }
+ })*) => {
+ $(impl<T, U, const LANES: usize> $assignTrait<U> for Simd<T, LANES>
+ where
+ Self: $trait<U, Output = Self>,
+ T: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ #[inline]
+ fn $assign_call(&mut self, rhs: U) {
+ *self = self.$call(rhs);
+ }
+ })*
+ }
+}
+
+assign_ops! {
+ // Arithmetic
+ impl<T, U, const LANES: usize> AddAssign<U> for Simd<T, LANES>
+ where
+ Self: Add,
+ {
+ fn add_assign(rhs: U) {
+ add
+ }
+ }
+
+ impl<T, U, const LANES: usize> MulAssign<U> for Simd<T, LANES>
+ where
+ Self: Mul,
+ {
+ fn mul_assign(rhs: U) {
+ mul
+ }
+ }
+
+ impl<T, U, const LANES: usize> SubAssign<U> for Simd<T, LANES>
+ where
+ Self: Sub,
+ {
+ fn sub_assign(rhs: U) {
+ sub
+ }
+ }
+
+ impl<T, U, const LANES: usize> DivAssign<U> for Simd<T, LANES>
+ where
+ Self: Div,
+ {
+ fn div_assign(rhs: U) {
+ div
+ }
+ }
+ impl<T, U, const LANES: usize> RemAssign<U> for Simd<T, LANES>
+ where
+ Self: Rem,
+ {
+ fn rem_assign(rhs: U) {
+ rem
+ }
+ }
+
+ // Bitops
+ impl<T, U, const LANES: usize> BitAndAssign<U> for Simd<T, LANES>
+ where
+ Self: BitAnd,
+ {
+ fn bitand_assign(rhs: U) {
+ bitand
+ }
+ }
+
+ impl<T, U, const LANES: usize> BitOrAssign<U> for Simd<T, LANES>
+ where
+ Self: BitOr,
+ {
+ fn bitor_assign(rhs: U) {
+ bitor
+ }
+ }
+
+ impl<T, U, const LANES: usize> BitXorAssign<U> for Simd<T, LANES>
+ where
+ Self: BitXor,
+ {
+ fn bitxor_assign(rhs: U) {
+ bitxor
+ }
+ }
+
+ impl<T, U, const LANES: usize> ShlAssign<U> for Simd<T, LANES>
+ where
+ Self: Shl,
+ {
+ fn shl_assign(rhs: U) {
+ shl
+ }
+ }
+
+ impl<T, U, const LANES: usize> ShrAssign<U> for Simd<T, LANES>
+ where
+ Self: Shr,
+ {
+ fn shr_assign(rhs: U) {
+ shr
+ }
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/ops/deref.rs b/library/portable-simd/crates/core_simd/src/ops/deref.rs
new file mode 100644
index 000000000..9883a74c9
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/ops/deref.rs
@@ -0,0 +1,124 @@
+//! This module hacks in "implicit deref" for Simd's operators.
+//! Ideally, Rust would take care of this itself,
+//! and method calls usually handle the LHS implicitly.
+//! But this is not the case with arithmetic ops.
+use super::*;
+
+macro_rules! deref_lhs {
+ (impl<T, const LANES: usize> $trait:ident for $simd:ty {
+ fn $call:ident
+ }) => {
+ impl<T, const LANES: usize> $trait<$simd> for &$simd
+ where
+ T: SimdElement,
+ $simd: $trait<$simd, Output = $simd>,
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Output = Simd<T, LANES>;
+
+ #[inline]
+ #[must_use = "operator returns a new vector without mutating the inputs"]
+ fn $call(self, rhs: $simd) -> Self::Output {
+ (*self).$call(rhs)
+ }
+ }
+ };
+}
+
+macro_rules! deref_rhs {
+ (impl<T, const LANES: usize> $trait:ident for $simd:ty {
+ fn $call:ident
+ }) => {
+ impl<T, const LANES: usize> $trait<&$simd> for $simd
+ where
+ T: SimdElement,
+ $simd: $trait<$simd, Output = $simd>,
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Output = Simd<T, LANES>;
+
+ #[inline]
+ #[must_use = "operator returns a new vector without mutating the inputs"]
+ fn $call(self, rhs: &$simd) -> Self::Output {
+ self.$call(*rhs)
+ }
+ }
+ };
+}
+
+macro_rules! deref_ops {
+ ($(impl<T, const LANES: usize> $trait:ident for $simd:ty {
+ fn $call:ident
+ })*) => {
+ $(
+ deref_rhs! {
+ impl<T, const LANES: usize> $trait for $simd {
+ fn $call
+ }
+ }
+ deref_lhs! {
+ impl<T, const LANES: usize> $trait for $simd {
+ fn $call
+ }
+ }
+ impl<'lhs, 'rhs, T, const LANES: usize> $trait<&'rhs $simd> for &'lhs $simd
+ where
+ T: SimdElement,
+ $simd: $trait<$simd, Output = $simd>,
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Output = $simd;
+
+ #[inline]
+ #[must_use = "operator returns a new vector without mutating the inputs"]
+ fn $call(self, rhs: &$simd) -> Self::Output {
+ (*self).$call(*rhs)
+ }
+ }
+ )*
+ }
+}
+
+deref_ops! {
+ // Arithmetic
+ impl<T, const LANES: usize> Add for Simd<T, LANES> {
+ fn add
+ }
+
+ impl<T, const LANES: usize> Mul for Simd<T, LANES> {
+ fn mul
+ }
+
+ impl<T, const LANES: usize> Sub for Simd<T, LANES> {
+ fn sub
+ }
+
+ impl<T, const LANES: usize> Div for Simd<T, LANES> {
+ fn div
+ }
+
+ impl<T, const LANES: usize> Rem for Simd<T, LANES> {
+ fn rem
+ }
+
+ // Bitops
+ impl<T, const LANES: usize> BitAnd for Simd<T, LANES> {
+ fn bitand
+ }
+
+ impl<T, const LANES: usize> BitOr for Simd<T, LANES> {
+ fn bitor
+ }
+
+ impl<T, const LANES: usize> BitXor for Simd<T, LANES> {
+ fn bitxor
+ }
+
+ impl<T, const LANES: usize> Shl for Simd<T, LANES> {
+ fn shl
+ }
+
+ impl<T, const LANES: usize> Shr for Simd<T, LANES> {
+ fn shr
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/ops/unary.rs b/library/portable-simd/crates/core_simd/src/ops/unary.rs
new file mode 100644
index 000000000..4ad022150
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/ops/unary.rs
@@ -0,0 +1,78 @@
+use crate::simd::intrinsics;
+use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
+use core::ops::{Neg, Not}; // unary ops
+
+macro_rules! neg {
+ ($(impl<const LANES: usize> Neg for Simd<$scalar:ty, LANES>)*) => {
+ $(impl<const LANES: usize> Neg for Simd<$scalar, LANES>
+ where
+ $scalar: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Output = Self;
+
+ #[inline]
+ #[must_use = "operator returns a new vector without mutating the input"]
+ fn neg(self) -> Self::Output {
+ // Safety: `self` is a signed vector
+ unsafe { intrinsics::simd_neg(self) }
+ }
+ })*
+ }
+}
+
+neg! {
+ impl<const LANES: usize> Neg for Simd<f32, LANES>
+
+ impl<const LANES: usize> Neg for Simd<f64, LANES>
+
+ impl<const LANES: usize> Neg for Simd<i8, LANES>
+
+ impl<const LANES: usize> Neg for Simd<i16, LANES>
+
+ impl<const LANES: usize> Neg for Simd<i32, LANES>
+
+ impl<const LANES: usize> Neg for Simd<i64, LANES>
+
+ impl<const LANES: usize> Neg for Simd<isize, LANES>
+}
+
+macro_rules! not {
+ ($(impl<const LANES: usize> Not for Simd<$scalar:ty, LANES>)*) => {
+ $(impl<const LANES: usize> Not for Simd<$scalar, LANES>
+ where
+ $scalar: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Output = Self;
+
+ #[inline]
+ #[must_use = "operator returns a new vector without mutating the input"]
+ fn not(self) -> Self::Output {
+ self ^ (Simd::splat(!(0 as $scalar)))
+ }
+ })*
+ }
+}
+
+not! {
+ impl<const LANES: usize> Not for Simd<i8, LANES>
+
+ impl<const LANES: usize> Not for Simd<i16, LANES>
+
+ impl<const LANES: usize> Not for Simd<i32, LANES>
+
+ impl<const LANES: usize> Not for Simd<i64, LANES>
+
+ impl<const LANES: usize> Not for Simd<isize, LANES>
+
+ impl<const LANES: usize> Not for Simd<u8, LANES>
+
+ impl<const LANES: usize> Not for Simd<u16, LANES>
+
+ impl<const LANES: usize> Not for Simd<u32, LANES>
+
+ impl<const LANES: usize> Not for Simd<u64, LANES>
+
+ impl<const LANES: usize> Not for Simd<usize, LANES>
+}
diff --git a/library/portable-simd/crates/core_simd/src/ord.rs b/library/portable-simd/crates/core_simd/src/ord.rs
new file mode 100644
index 000000000..9a87bc2e3
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/ord.rs
@@ -0,0 +1,213 @@
+use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
+
+/// Parallel `PartialOrd`.
+pub trait SimdPartialOrd: SimdPartialEq {
+ /// Test if each lane is less than the corresponding lane in `other`.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn simd_lt(self, other: Self) -> Self::Mask;
+
+ /// Test if each lane is less than or equal to the corresponding lane in `other`.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn simd_le(self, other: Self) -> Self::Mask;
+
+ /// Test if each lane is greater than the corresponding lane in `other`.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn simd_gt(self, other: Self) -> Self::Mask;
+
+ /// Test if each lane is greater than or equal to the corresponding lane in `other`.
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ fn simd_ge(self, other: Self) -> Self::Mask;
+}
+
+/// Parallel `Ord`.
+pub trait SimdOrd: SimdPartialOrd {
+ /// Returns the lane-wise maximum with `other`.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn simd_max(self, other: Self) -> Self;
+
+ /// Returns the lane-wise minimum with `other`.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn simd_min(self, other: Self) -> Self;
+
+ /// Restrict each lane to a certain interval.
+ ///
+ /// For each lane, returns `max` if `self` is greater than `max`, and `min` if `self` is
+ /// less than `min`. Otherwise returns `self`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `min > max` on any lane.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ fn simd_clamp(self, min: Self, max: Self) -> Self;
+}
+
+macro_rules! impl_integer {
+ { $($integer:ty),* } => {
+ $(
+ impl<const LANES: usize> SimdPartialOrd for Simd<$integer, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ #[inline]
+ fn simd_lt(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_lt(self, other)) }
+ }
+
+ #[inline]
+ fn simd_le(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_le(self, other)) }
+ }
+
+ #[inline]
+ fn simd_gt(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_gt(self, other)) }
+ }
+
+ #[inline]
+ fn simd_ge(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_ge(self, other)) }
+ }
+ }
+
+ impl<const LANES: usize> SimdOrd for Simd<$integer, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ #[inline]
+ fn simd_max(self, other: Self) -> Self {
+ self.simd_lt(other).select(other, self)
+ }
+
+ #[inline]
+ fn simd_min(self, other: Self) -> Self {
+ self.simd_gt(other).select(other, self)
+ }
+
+ #[inline]
+ fn simd_clamp(self, min: Self, max: Self) -> Self {
+ assert!(
+ min.simd_le(max).all(),
+ "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ );
+ self.simd_max(min).simd_min(max)
+ }
+ }
+ )*
+ }
+}
+
+impl_integer! { u8, u16, u32, u64, usize, i8, i16, i32, i64, isize }
+
+macro_rules! impl_float {
+ { $($float:ty),* } => {
+ $(
+ impl<const LANES: usize> SimdPartialOrd for Simd<$float, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ #[inline]
+ fn simd_lt(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_lt(self, other)) }
+ }
+
+ #[inline]
+ fn simd_le(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_le(self, other)) }
+ }
+
+ #[inline]
+ fn simd_gt(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_gt(self, other)) }
+ }
+
+ #[inline]
+ fn simd_ge(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Mask::from_int_unchecked(intrinsics::simd_ge(self, other)) }
+ }
+ }
+ )*
+ }
+}
+
+impl_float! { f32, f64 }
+
+macro_rules! impl_mask {
+ { $($integer:ty),* } => {
+ $(
+ impl<const LANES: usize> SimdPartialOrd for Mask<$integer, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ #[inline]
+ fn simd_lt(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Self::from_int_unchecked(intrinsics::simd_lt(self.to_int(), other.to_int())) }
+ }
+
+ #[inline]
+ fn simd_le(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Self::from_int_unchecked(intrinsics::simd_le(self.to_int(), other.to_int())) }
+ }
+
+ #[inline]
+ fn simd_gt(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Self::from_int_unchecked(intrinsics::simd_gt(self.to_int(), other.to_int())) }
+ }
+
+ #[inline]
+ fn simd_ge(self, other: Self) -> Self::Mask {
+ // Safety: `self` is a vector, and the result of the comparison
+ // is always a valid mask.
+ unsafe { Self::from_int_unchecked(intrinsics::simd_ge(self.to_int(), other.to_int())) }
+ }
+ }
+
+ impl<const LANES: usize> SimdOrd for Mask<$integer, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ #[inline]
+ fn simd_max(self, other: Self) -> Self {
+ self.simd_gt(other).select_mask(other, self)
+ }
+
+ #[inline]
+ fn simd_min(self, other: Self) -> Self {
+ self.simd_lt(other).select_mask(other, self)
+ }
+
+ #[inline]
+ fn simd_clamp(self, min: Self, max: Self) -> Self {
+ assert!(
+ min.simd_le(max).all(),
+ "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ );
+ self.simd_max(min).simd_min(max)
+ }
+ }
+ )*
+ }
+}
+
+impl_mask! { i8, i16, i32, i64, isize }
diff --git a/library/portable-simd/crates/core_simd/src/select.rs b/library/portable-simd/crates/core_simd/src/select.rs
new file mode 100644
index 000000000..065c5987d
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/select.rs
@@ -0,0 +1,59 @@
+use crate::simd::intrinsics;
+use crate::simd::{LaneCount, Mask, MaskElement, Simd, SimdElement, SupportedLaneCount};
+
+impl<T, const LANES: usize> Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ /// Choose lanes from two vectors.
+ ///
+ /// For each lane in the mask, choose the corresponding lane from `true_values` if
+ /// that lane mask is true, and `false_values` if that lane mask is false.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::{Simd, Mask};
+ /// let a = Simd::from_array([0, 1, 2, 3]);
+ /// let b = Simd::from_array([4, 5, 6, 7]);
+ /// let mask = Mask::from_array([true, false, false, true]);
+ /// let c = mask.select(a, b);
+ /// assert_eq!(c.to_array(), [0, 5, 6, 3]);
+ /// ```
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original inputs"]
+ pub fn select<U>(
+ self,
+ true_values: Simd<U, LANES>,
+ false_values: Simd<U, LANES>,
+ ) -> Simd<U, LANES>
+ where
+ U: SimdElement<Mask = T>,
+ {
+ // Safety: The mask has been cast to a vector of integers,
+ // and the operands to select between are vectors of the same type and length.
+ unsafe { intrinsics::simd_select(self.to_int(), true_values, false_values) }
+ }
+
+ /// Choose lanes from two masks.
+ ///
+ /// For each lane in the mask, choose the corresponding lane from `true_values` if
+ /// that lane mask is true, and `false_values` if that lane mask is false.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::Mask;
+ /// let a = Mask::<i32, 4>::from_array([true, true, false, false]);
+ /// let b = Mask::<i32, 4>::from_array([false, false, true, true]);
+ /// let mask = Mask::<i32, 4>::from_array([true, false, false, true]);
+ /// let c = mask.select_mask(a, b);
+ /// assert_eq!(c.to_array(), [true, false, true, false]);
+ /// ```
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original inputs"]
+ pub fn select_mask(self, true_values: Self, false_values: Self) -> Self {
+ self & true_values | !self & false_values
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/swizzle.rs b/library/portable-simd/crates/core_simd/src/swizzle.rs
new file mode 100644
index 000000000..22999d249
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/swizzle.rs
@@ -0,0 +1,385 @@
+use crate::simd::intrinsics;
+use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
+
+/// Constructs a new SIMD vector by copying elements from selected lanes in other vectors.
+///
+/// When swizzling one vector, lanes are selected by a `const` array of `usize`,
+/// like [`Swizzle`].
+///
+/// When swizzling two vectors, lanes are selected by a `const` array of [`Which`],
+/// like [`Swizzle2`].
+///
+/// # Examples
+///
+/// With a single SIMD vector, the const array specifies lane indices in that vector:
+/// ```
+/// # #![feature(portable_simd)]
+/// # use core::simd::{u32x2, u32x4, simd_swizzle};
+/// let v = u32x4::from_array([10, 11, 12, 13]);
+///
+/// // Keeping the same size
+/// let r: u32x4 = simd_swizzle!(v, [3, 0, 1, 2]);
+/// assert_eq!(r.to_array(), [13, 10, 11, 12]);
+///
+/// // Changing the number of lanes
+/// let r: u32x2 = simd_swizzle!(v, [3, 1]);
+/// assert_eq!(r.to_array(), [13, 11]);
+/// ```
+///
+/// With two input SIMD vectors, the const array uses `Which` to specify the source of each index:
+/// ```
+/// # #![feature(portable_simd)]
+/// # use core::simd::{u32x2, u32x4, simd_swizzle, Which};
+/// use Which::{First, Second};
+/// let a = u32x4::from_array([0, 1, 2, 3]);
+/// let b = u32x4::from_array([4, 5, 6, 7]);
+///
+/// // Keeping the same size
+/// let r: u32x4 = simd_swizzle!(a, b, [First(0), First(1), Second(2), Second(3)]);
+/// assert_eq!(r.to_array(), [0, 1, 6, 7]);
+///
+/// // Changing the number of lanes
+/// let r: u32x2 = simd_swizzle!(a, b, [First(0), Second(0)]);
+/// assert_eq!(r.to_array(), [0, 4]);
+/// ```
+#[allow(unused_macros)]
+pub macro simd_swizzle {
+ (
+ $vector:expr, $index:expr $(,)?
+ ) => {
+ {
+ use $crate::simd::Swizzle;
+ struct Impl;
+ impl<const LANES: usize> Swizzle<LANES, {$index.len()}> for Impl {
+ const INDEX: [usize; {$index.len()}] = $index;
+ }
+ Impl::swizzle($vector)
+ }
+ },
+ (
+ $first:expr, $second:expr, $index:expr $(,)?
+ ) => {
+ {
+ use $crate::simd::{Which, Swizzle2};
+ struct Impl;
+ impl<const LANES: usize> Swizzle2<LANES, {$index.len()}> for Impl {
+ const INDEX: [Which; {$index.len()}] = $index;
+ }
+ Impl::swizzle2($first, $second)
+ }
+ }
+}
+
+/// Specifies a lane index into one of two SIMD vectors.
+///
+/// This is an input type for [Swizzle2] and helper macros like [simd_swizzle].
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub enum Which {
+ /// Index of a lane in the first input SIMD vector.
+ First(usize),
+ /// Index of a lane in the second input SIMD vector.
+ Second(usize),
+}
+
+/// Create a vector from the elements of another vector.
+pub trait Swizzle<const INPUT_LANES: usize, const OUTPUT_LANES: usize> {
+ /// Map from the lanes of the input vector to the output vector.
+ const INDEX: [usize; OUTPUT_LANES];
+
+ /// Create a new vector from the lanes of `vector`.
+ ///
+ /// Lane `i` of the output is `vector[Self::INDEX[i]]`.
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original inputs"]
+ fn swizzle<T>(vector: Simd<T, INPUT_LANES>) -> Simd<T, OUTPUT_LANES>
+ where
+ T: SimdElement,
+ LaneCount<INPUT_LANES>: SupportedLaneCount,
+ LaneCount<OUTPUT_LANES>: SupportedLaneCount,
+ {
+ // Safety: `vector` is a vector, and `INDEX_IMPL` is a const array of u32.
+ unsafe { intrinsics::simd_shuffle(vector, vector, Self::INDEX_IMPL) }
+ }
+}
+
+/// Create a vector from the elements of two other vectors.
+pub trait Swizzle2<const INPUT_LANES: usize, const OUTPUT_LANES: usize> {
+ /// Map from the lanes of the input vectors to the output vector
+ const INDEX: [Which; OUTPUT_LANES];
+
+ /// Create a new vector from the lanes of `first` and `second`.
+ ///
+ /// Lane `i` is `first[j]` when `Self::INDEX[i]` is `First(j)`, or `second[j]` when it is
+ /// `Second(j)`.
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original inputs"]
+ fn swizzle2<T>(
+ first: Simd<T, INPUT_LANES>,
+ second: Simd<T, INPUT_LANES>,
+ ) -> Simd<T, OUTPUT_LANES>
+ where
+ T: SimdElement,
+ LaneCount<INPUT_LANES>: SupportedLaneCount,
+ LaneCount<OUTPUT_LANES>: SupportedLaneCount,
+ {
+ // Safety: `first` and `second` are vectors, and `INDEX_IMPL` is a const array of u32.
+ unsafe { intrinsics::simd_shuffle(first, second, Self::INDEX_IMPL) }
+ }
+}
+
+/// The `simd_shuffle` intrinsic expects `u32`, so do error checking and conversion here.
+/// This trait hides `INDEX_IMPL` from the public API.
+trait SwizzleImpl<const INPUT_LANES: usize, const OUTPUT_LANES: usize> {
+ const INDEX_IMPL: [u32; OUTPUT_LANES];
+}
+
+impl<T, const INPUT_LANES: usize, const OUTPUT_LANES: usize> SwizzleImpl<INPUT_LANES, OUTPUT_LANES>
+ for T
+where
+ T: Swizzle<INPUT_LANES, OUTPUT_LANES> + ?Sized,
+{
+ const INDEX_IMPL: [u32; OUTPUT_LANES] = {
+ let mut output = [0; OUTPUT_LANES];
+ let mut i = 0;
+ while i < OUTPUT_LANES {
+ let index = Self::INDEX[i];
+ assert!(index as u32 as usize == index);
+ assert!(index < INPUT_LANES, "source lane exceeds input lane count",);
+ output[i] = index as u32;
+ i += 1;
+ }
+ output
+ };
+}
+
+/// The `simd_shuffle` intrinsic expects `u32`, so do error checking and conversion here.
+/// This trait hides `INDEX_IMPL` from the public API.
+trait Swizzle2Impl<const INPUT_LANES: usize, const OUTPUT_LANES: usize> {
+ const INDEX_IMPL: [u32; OUTPUT_LANES];
+}
+
+impl<T, const INPUT_LANES: usize, const OUTPUT_LANES: usize> Swizzle2Impl<INPUT_LANES, OUTPUT_LANES>
+ for T
+where
+ T: Swizzle2<INPUT_LANES, OUTPUT_LANES> + ?Sized,
+{
+ const INDEX_IMPL: [u32; OUTPUT_LANES] = {
+ let mut output = [0; OUTPUT_LANES];
+ let mut i = 0;
+ while i < OUTPUT_LANES {
+ let (offset, index) = match Self::INDEX[i] {
+ Which::First(index) => (false, index),
+ Which::Second(index) => (true, index),
+ };
+ assert!(index < INPUT_LANES, "source lane exceeds input lane count",);
+
+ // lanes are indexed by the first vector, then second vector
+ let index = if offset { index + INPUT_LANES } else { index };
+ assert!(index as u32 as usize == index);
+ output[i] = index as u32;
+ i += 1;
+ }
+ output
+ };
+}
+
+impl<T, const LANES: usize> Simd<T, LANES>
+where
+ T: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ /// Reverse the order of the lanes in the vector.
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original inputs"]
+ pub fn reverse(self) -> Self {
+ const fn reverse_index<const LANES: usize>() -> [usize; LANES] {
+ let mut index = [0; LANES];
+ let mut i = 0;
+ while i < LANES {
+ index[i] = LANES - i - 1;
+ i += 1;
+ }
+ index
+ }
+
+ struct Reverse;
+
+ impl<const LANES: usize> Swizzle<LANES, LANES> for Reverse {
+ const INDEX: [usize; LANES] = reverse_index::<LANES>();
+ }
+
+ Reverse::swizzle(self)
+ }
+
+ /// Rotates the vector such that the first `OFFSET` elements of the slice move to the end
+ /// while the last `LANES - OFFSET` elements move to the front. After calling `rotate_lanes_left`,
+ /// the element previously in lane `OFFSET` will become the first element in the slice.
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original inputs"]
+ pub fn rotate_lanes_left<const OFFSET: usize>(self) -> Self {
+ const fn rotate_index<const OFFSET: usize, const LANES: usize>() -> [usize; LANES] {
+ let offset = OFFSET % LANES;
+ let mut index = [0; LANES];
+ let mut i = 0;
+ while i < LANES {
+ index[i] = (i + offset) % LANES;
+ i += 1;
+ }
+ index
+ }
+
+ struct Rotate<const OFFSET: usize>;
+
+ impl<const OFFSET: usize, const LANES: usize> Swizzle<LANES, LANES> for Rotate<OFFSET> {
+ const INDEX: [usize; LANES] = rotate_index::<OFFSET, LANES>();
+ }
+
+ Rotate::<OFFSET>::swizzle(self)
+ }
+
+ /// Rotates the vector such that the first `LANES - OFFSET` elements of the vector move to
+ /// the end while the last `OFFSET` elements move to the front. After calling `rotate_lanes_right`,
+ /// the element previously at index `LANES - OFFSET` will become the first element in the slice.
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original inputs"]
+ pub fn rotate_lanes_right<const OFFSET: usize>(self) -> Self {
+ const fn rotate_index<const OFFSET: usize, const LANES: usize>() -> [usize; LANES] {
+ let offset = LANES - OFFSET % LANES;
+ let mut index = [0; LANES];
+ let mut i = 0;
+ while i < LANES {
+ index[i] = (i + offset) % LANES;
+ i += 1;
+ }
+ index
+ }
+
+ struct Rotate<const OFFSET: usize>;
+
+ impl<const OFFSET: usize, const LANES: usize> Swizzle<LANES, LANES> for Rotate<OFFSET> {
+ const INDEX: [usize; LANES] = rotate_index::<OFFSET, LANES>();
+ }
+
+ Rotate::<OFFSET>::swizzle(self)
+ }
+
+ /// Interleave two vectors.
+ ///
+ /// Produces two vectors with lanes taken alternately from `self` and `other`.
+ ///
+ /// The first result contains the first `LANES / 2` lanes from `self` and `other`,
+ /// alternating, starting with the first lane of `self`.
+ ///
+ /// The second result contains the last `LANES / 2` lanes from `self` and `other`,
+ /// alternating, starting with the lane `LANES / 2` from the start of `self`.
+ ///
+ /// ```
+ /// #![feature(portable_simd)]
+ /// # use core::simd::Simd;
+ /// let a = Simd::from_array([0, 1, 2, 3]);
+ /// let b = Simd::from_array([4, 5, 6, 7]);
+ /// let (x, y) = a.interleave(b);
+ /// assert_eq!(x.to_array(), [0, 4, 1, 5]);
+ /// assert_eq!(y.to_array(), [2, 6, 3, 7]);
+ /// ```
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original inputs"]
+ pub fn interleave(self, other: Self) -> (Self, Self) {
+ const fn lo<const LANES: usize>() -> [Which; LANES] {
+ let mut idx = [Which::First(0); LANES];
+ let mut i = 0;
+ while i < LANES {
+ let offset = i / 2;
+ idx[i] = if i % 2 == 0 {
+ Which::First(offset)
+ } else {
+ Which::Second(offset)
+ };
+ i += 1;
+ }
+ idx
+ }
+ const fn hi<const LANES: usize>() -> [Which; LANES] {
+ let mut idx = [Which::First(0); LANES];
+ let mut i = 0;
+ while i < LANES {
+ let offset = (LANES + i) / 2;
+ idx[i] = if i % 2 == 0 {
+ Which::First(offset)
+ } else {
+ Which::Second(offset)
+ };
+ i += 1;
+ }
+ idx
+ }
+
+ struct Lo;
+ struct Hi;
+
+ impl<const LANES: usize> Swizzle2<LANES, LANES> for Lo {
+ const INDEX: [Which; LANES] = lo::<LANES>();
+ }
+
+ impl<const LANES: usize> Swizzle2<LANES, LANES> for Hi {
+ const INDEX: [Which; LANES] = hi::<LANES>();
+ }
+
+ (Lo::swizzle2(self, other), Hi::swizzle2(self, other))
+ }
+
+ /// Deinterleave two vectors.
+ ///
+ /// The first result takes every other lane of `self` and then `other`, starting with
+ /// the first lane.
+ ///
+ /// The second result takes every other lane of `self` and then `other`, starting with
+ /// the second lane.
+ ///
+ /// ```
+ /// #![feature(portable_simd)]
+ /// # use core::simd::Simd;
+ /// let a = Simd::from_array([0, 4, 1, 5]);
+ /// let b = Simd::from_array([2, 6, 3, 7]);
+ /// let (x, y) = a.deinterleave(b);
+ /// assert_eq!(x.to_array(), [0, 1, 2, 3]);
+ /// assert_eq!(y.to_array(), [4, 5, 6, 7]);
+ /// ```
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original inputs"]
+ pub fn deinterleave(self, other: Self) -> (Self, Self) {
+ const fn even<const LANES: usize>() -> [Which; LANES] {
+ let mut idx = [Which::First(0); LANES];
+ let mut i = 0;
+ while i < LANES / 2 {
+ idx[i] = Which::First(2 * i);
+ idx[i + LANES / 2] = Which::Second(2 * i);
+ i += 1;
+ }
+ idx
+ }
+ const fn odd<const LANES: usize>() -> [Which; LANES] {
+ let mut idx = [Which::First(0); LANES];
+ let mut i = 0;
+ while i < LANES / 2 {
+ idx[i] = Which::First(2 * i + 1);
+ idx[i + LANES / 2] = Which::Second(2 * i + 1);
+ i += 1;
+ }
+ idx
+ }
+
+ struct Even;
+ struct Odd;
+
+ impl<const LANES: usize> Swizzle2<LANES, LANES> for Even {
+ const INDEX: [Which; LANES] = even::<LANES>();
+ }
+
+ impl<const LANES: usize> Swizzle2<LANES, LANES> for Odd {
+ const INDEX: [Which; LANES] = odd::<LANES>();
+ }
+
+ (Even::swizzle2(self, other), Odd::swizzle2(self, other))
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/to_bytes.rs b/library/portable-simd/crates/core_simd/src/to_bytes.rs
new file mode 100644
index 000000000..b36b1a347
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/to_bytes.rs
@@ -0,0 +1,41 @@
+macro_rules! impl_to_bytes {
+ { $ty:ty, $size:literal } => {
+ impl<const LANES: usize> crate::simd::Simd<$ty, LANES>
+ where
+ crate::simd::LaneCount<LANES>: crate::simd::SupportedLaneCount,
+ crate::simd::LaneCount<{{ $size * LANES }}>: crate::simd::SupportedLaneCount,
+ {
+ /// Return the memory representation of this integer as a byte array in native byte
+ /// order.
+ pub fn to_ne_bytes(self) -> crate::simd::Simd<u8, {{ $size * LANES }}> {
+ // Safety: transmuting between vectors is safe
+ unsafe { core::mem::transmute_copy(&self) }
+ }
+
+ /// Create a native endian integer value from its memory representation as a byte array
+ /// in native endianness.
+ pub fn from_ne_bytes(bytes: crate::simd::Simd<u8, {{ $size * LANES }}>) -> Self {
+ // Safety: transmuting between vectors is safe
+ unsafe { core::mem::transmute_copy(&bytes) }
+ }
+ }
+ }
+}
+
+impl_to_bytes! { u8, 1 }
+impl_to_bytes! { u16, 2 }
+impl_to_bytes! { u32, 4 }
+impl_to_bytes! { u64, 8 }
+#[cfg(target_pointer_width = "32")]
+impl_to_bytes! { usize, 4 }
+#[cfg(target_pointer_width = "64")]
+impl_to_bytes! { usize, 8 }
+
+impl_to_bytes! { i8, 1 }
+impl_to_bytes! { i16, 2 }
+impl_to_bytes! { i32, 4 }
+impl_to_bytes! { i64, 8 }
+#[cfg(target_pointer_width = "32")]
+impl_to_bytes! { isize, 4 }
+#[cfg(target_pointer_width = "64")]
+impl_to_bytes! { isize, 8 }
diff --git a/library/portable-simd/crates/core_simd/src/vector.rs b/library/portable-simd/crates/core_simd/src/vector.rs
new file mode 100644
index 000000000..78f56402e
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vector.rs
@@ -0,0 +1,742 @@
+mod float;
+mod int;
+mod uint;
+
+pub use float::*;
+pub use int::*;
+pub use uint::*;
+
+// Vectors of pointers are not for public use at the current time.
+pub(crate) mod ptr;
+
+use crate::simd::{
+ intrinsics, LaneCount, Mask, MaskElement, SimdPartialOrd, SupportedLaneCount, Swizzle,
+};
+
+/// A SIMD vector of `LANES` elements of type `T`. `Simd<T, N>` has the same shape as [`[T; N]`](array), but operates like `T`.
+///
+/// Two vectors of the same type and length will, by convention, support the operators (+, *, etc.) that `T` does.
+/// These take the lanes at each index on the left-hand side and right-hand side, perform the operation,
+/// and return the result in the same lane in a vector of equal size. For a given operator, this is equivalent to zipping
+/// the two arrays together and mapping the operator over each lane.
+///
+/// ```rust
+/// # #![feature(array_zip, portable_simd)]
+/// # use core::simd::{Simd};
+/// let a0: [i32; 4] = [-2, 0, 2, 4];
+/// let a1 = [10, 9, 8, 7];
+/// let zm_add = a0.zip(a1).map(|(lhs, rhs)| lhs + rhs);
+/// let zm_mul = a0.zip(a1).map(|(lhs, rhs)| lhs * rhs);
+///
+/// // `Simd<T, N>` implements `From<[T; N]>
+/// let (v0, v1) = (Simd::from(a0), Simd::from(a1));
+/// // Which means arrays implement `Into<Simd<T, N>>`.
+/// assert_eq!(v0 + v1, zm_add.into());
+/// assert_eq!(v0 * v1, zm_mul.into());
+/// ```
+///
+/// `Simd` with integers has the quirk that these operations are also inherently wrapping, as if `T` was [`Wrapping<T>`].
+/// Thus, `Simd` does not implement `wrapping_add`, because that is the default behavior.
+/// This means there is no warning on overflows, even in "debug" builds.
+/// For most applications where `Simd` is appropriate, it is "not a bug" to wrap,
+/// and even "debug builds" are unlikely to tolerate the loss of performance.
+/// You may want to consider using explicitly checked arithmetic if such is required.
+/// Division by zero still causes a panic, so you may want to consider using floating point numbers if that is unacceptable.
+///
+/// [`Wrapping<T>`]: core::num::Wrapping
+///
+/// # Layout
+/// `Simd<T, N>` has a layout similar to `[T; N]` (identical "shapes"), but with a greater alignment.
+/// `[T; N]` is aligned to `T`, but `Simd<T, N>` will have an alignment based on both `T` and `N`.
+/// It is thus sound to [`transmute`] `Simd<T, N>` to `[T; N]`, and will typically optimize to zero cost,
+/// but the reverse transmutation is more likely to require a copy the compiler cannot simply elide.
+///
+/// # ABI "Features"
+/// Due to Rust's safety guarantees, `Simd<T, N>` is currently passed to and from functions via memory, not SIMD registers,
+/// except as an optimization. `#[inline]` hints are recommended on functions that accept `Simd<T, N>` or return it.
+/// The need for this may be corrected in the future.
+///
+/// # Safe SIMD with Unsafe Rust
+///
+/// Operations with `Simd` are typically safe, but there are many reasons to want to combine SIMD with `unsafe` code.
+/// Care must be taken to respect differences between `Simd` and other types it may be transformed into or derived from.
+/// In particular, the layout of `Simd<T, N>` may be similar to `[T; N]`, and may allow some transmutations,
+/// but references to `[T; N]` are not interchangeable with those to `Simd<T, N>`.
+/// Thus, when using `unsafe` Rust to read and write `Simd<T, N>` through [raw pointers], it is a good idea to first try with
+/// [`read_unaligned`] and [`write_unaligned`]. This is because:
+/// - [`read`] and [`write`] require full alignment (in this case, `Simd<T, N>`'s alignment)
+/// - the likely source for reading or destination for writing `Simd<T, N>` is [`[T]`](slice) and similar types, aligned to `T`
+/// - combining these actions would violate the `unsafe` contract and explode the program into a puff of **undefined behavior**
+/// - the compiler can implicitly adjust layouts to make unaligned reads or writes fully aligned if it sees the optimization
+/// - most contemporary processors suffer no performance penalty for "unaligned" reads and writes that are aligned at runtime
+///
+/// By imposing less obligations, unaligned functions are less likely to make the program unsound,
+/// and may be just as fast as stricter alternatives.
+/// When trying to guarantee alignment, [`[T]::as_simd`][as_simd] is an option for converting `[T]` to `[Simd<T, N>]`,
+/// and allows soundly operating on an aligned SIMD body, but it may cost more time when handling the scalar head and tail.
+/// If these are not sufficient, then it is most ideal to design data structures to be already aligned
+/// to the `Simd<T, N>` you wish to use before using `unsafe` Rust to read or write.
+/// More conventional ways to compensate for these facts, like materializing `Simd` to or from an array first,
+/// are handled by safe methods like [`Simd::from_array`] and [`Simd::from_slice`].
+///
+/// [`transmute`]: core::mem::transmute
+/// [raw pointers]: pointer
+/// [`read_unaligned`]: pointer::read_unaligned
+/// [`write_unaligned`]: pointer::write_unaligned
+/// [`read`]: pointer::read
+/// [`write`]: pointer::write
+/// [as_simd]: slice::as_simd
+#[repr(simd)]
+pub struct Simd<T, const LANES: usize>([T; LANES])
+where
+ T: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount;
+
+impl<T, const LANES: usize> Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement,
+{
+ /// Number of lanes in this vector.
+ pub const LANES: usize = LANES;
+
+ /// Returns the number of lanes in this SIMD vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::u32x4;
+ /// let v = u32x4::splat(0);
+ /// assert_eq!(v.lanes(), 4);
+ /// ```
+ pub const fn lanes(&self) -> usize {
+ LANES
+ }
+
+ /// Constructs a new SIMD vector with all lanes set to the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::u32x4;
+ /// let v = u32x4::splat(8);
+ /// assert_eq!(v.as_array(), &[8, 8, 8, 8]);
+ /// ```
+ pub fn splat(value: T) -> Self {
+ // This is preferred over `[value; LANES]`, since it's explicitly a splat:
+ // https://github.com/rust-lang/rust/issues/97804
+ struct Splat;
+ impl<const LANES: usize> Swizzle<1, LANES> for Splat {
+ const INDEX: [usize; LANES] = [0; LANES];
+ }
+ Splat::swizzle(Simd::<T, 1>::from([value]))
+ }
+
+ /// Returns an array reference containing the entire SIMD vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::{Simd, u64x4};
+ /// let v: u64x4 = Simd::from_array([0, 1, 2, 3]);
+ /// assert_eq!(v.as_array(), &[0, 1, 2, 3]);
+ /// ```
+ pub const fn as_array(&self) -> &[T; LANES] {
+ &self.0
+ }
+
+ /// Returns a mutable array reference containing the entire SIMD vector.
+ pub fn as_mut_array(&mut self) -> &mut [T; LANES] {
+ &mut self.0
+ }
+
+ /// Converts an array to a SIMD vector.
+ pub const fn from_array(array: [T; LANES]) -> Self {
+ Self(array)
+ }
+
+ /// Converts a SIMD vector to an array.
+ pub const fn to_array(self) -> [T; LANES] {
+ self.0
+ }
+
+ /// Converts a slice to a SIMD vector containing `slice[..LANES]`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the slice's length is less than the vector's `Simd::LANES`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::u32x4;
+ /// let source = vec![1, 2, 3, 4, 5, 6];
+ /// let v = u32x4::from_slice(&source);
+ /// assert_eq!(v.as_array(), &[1, 2, 3, 4]);
+ /// ```
+ #[must_use]
+ pub const fn from_slice(slice: &[T]) -> Self {
+ assert!(slice.len() >= LANES, "slice length must be at least the number of lanes");
+ let mut array = [slice[0]; LANES];
+ let mut i = 0;
+ while i < LANES {
+ array[i] = slice[i];
+ i += 1;
+ }
+ Self(array)
+ }
+
+ /// Performs lanewise conversion of a SIMD vector's elements to another SIMD-valid type.
+ ///
+ /// This follows the semantics of Rust's `as` conversion for casting
+ /// integers to unsigned integers (interpreting as the other type, so `-1` to `MAX`),
+ /// and from floats to integers (truncating, or saturating at the limits) for each lane,
+ /// or vice versa.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::Simd;
+ /// let floats: Simd<f32, 4> = Simd::from_array([1.9, -4.5, f32::INFINITY, f32::NAN]);
+ /// let ints = floats.cast::<i32>();
+ /// assert_eq!(ints, Simd::from_array([1, -4, i32::MAX, 0]));
+ ///
+ /// // Formally equivalent, but `Simd::cast` can optimize better.
+ /// assert_eq!(ints, Simd::from_array(floats.to_array().map(|x| x as i32)));
+ ///
+ /// // The float conversion does not round-trip.
+ /// let floats_again = ints.cast();
+ /// assert_ne!(floats, floats_again);
+ /// assert_eq!(floats_again, Simd::from_array([1.0, -4.0, 2147483647.0, 0.0]));
+ /// ```
+ #[must_use]
+ #[inline]
+ pub fn cast<U: SimdElement>(self) -> Simd<U, LANES> {
+ // Safety: The input argument is a vector of a valid SIMD element type.
+ unsafe { intrinsics::simd_as(self) }
+ }
+
+ /// Rounds toward zero and converts to the same-width integer type, assuming that
+ /// the value is finite and fits in that type.
+ ///
+ /// # Safety
+ /// The value must:
+ ///
+ /// * Not be NaN
+ /// * Not be infinite
+ /// * Be representable in the return type, after truncating off its fractional part
+ ///
+ /// If these requirements are infeasible or costly, consider using the safe function [cast],
+ /// which saturates on conversion.
+ ///
+ /// [cast]: Simd::cast
+ #[inline]
+ pub unsafe fn to_int_unchecked<I>(self) -> Simd<I, LANES>
+ where
+ T: core::convert::FloatToInt<I>,
+ I: SimdElement,
+ {
+ // Safety: `self` is a vector, and `FloatToInt` ensures the type can be casted to
+ // an integer.
+ unsafe { intrinsics::simd_cast(self) }
+ }
+
+ /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
+ /// If an index is out-of-bounds, the lane is instead selected from the `or` vector.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::Simd;
+ /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
+ /// let idxs = Simd::from_array([9, 3, 0, 5]);
+ /// let alt = Simd::from_array([-5, -4, -3, -2]);
+ ///
+ /// let result = Simd::gather_or(&vec, idxs, alt); // Note the lane that is out-of-bounds.
+ /// assert_eq!(result, Simd::from_array([-5, 13, 10, 15]));
+ /// ```
+ #[must_use]
+ #[inline]
+ pub fn gather_or(slice: &[T], idxs: Simd<usize, LANES>, or: Self) -> Self {
+ Self::gather_select(slice, Mask::splat(true), idxs, or)
+ }
+
+ /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
+ /// If an index is out-of-bounds, the lane is set to the default value for the type.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::Simd;
+ /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
+ /// let idxs = Simd::from_array([9, 3, 0, 5]);
+ ///
+ /// let result = Simd::gather_or_default(&vec, idxs); // Note the lane that is out-of-bounds.
+ /// assert_eq!(result, Simd::from_array([0, 13, 10, 15]));
+ /// ```
+ #[must_use]
+ #[inline]
+ pub fn gather_or_default(slice: &[T], idxs: Simd<usize, LANES>) -> Self
+ where
+ T: Default,
+ {
+ Self::gather_or(slice, idxs, Self::splat(T::default()))
+ }
+
+ /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
+ /// The mask `enable`s all `true` lanes and disables all `false` lanes.
+ /// If an index is disabled or is out-of-bounds, the lane is selected from the `or` vector.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::{Simd, Mask};
+ /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
+ /// let idxs = Simd::from_array([9, 3, 0, 5]);
+ /// let alt = Simd::from_array([-5, -4, -3, -2]);
+ /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane.
+ ///
+ /// let result = Simd::gather_select(&vec, enable, idxs, alt); // Note the lane that is out-of-bounds.
+ /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
+ /// ```
+ #[must_use]
+ #[inline]
+ pub fn gather_select(
+ slice: &[T],
+ enable: Mask<isize, LANES>,
+ idxs: Simd<usize, LANES>,
+ or: Self,
+ ) -> Self {
+ let enable: Mask<isize, LANES> = enable & idxs.simd_lt(Simd::splat(slice.len()));
+ // Safety: We have masked-off out-of-bounds lanes.
+ unsafe { Self::gather_select_unchecked(slice, enable, idxs, or) }
+ }
+
+ /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
+ /// The mask `enable`s all `true` lanes and disables all `false` lanes.
+ /// If an index is disabled, the lane is selected from the `or` vector.
+ ///
+ /// # Safety
+ ///
+ /// Calling this function with an `enable`d out-of-bounds index is *[undefined behavior]*
+ /// even if the resulting value is not used.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdPartialOrd, Mask};
+ /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
+ /// let idxs = Simd::from_array([9, 3, 0, 5]);
+ /// let alt = Simd::from_array([-5, -4, -3, -2]);
+ /// let enable = Mask::from_array([true, true, true, false]); // Note the final mask lane.
+ /// // If this mask was used to gather, it would be unsound. Let's fix that.
+ /// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
+ ///
+ /// // We have masked the OOB lane, so it's safe to gather now.
+ /// let result = unsafe { Simd::gather_select_unchecked(&vec, enable, idxs, alt) };
+ /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
+ /// ```
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[must_use]
+ #[inline]
+ pub unsafe fn gather_select_unchecked(
+ slice: &[T],
+ enable: Mask<isize, LANES>,
+ idxs: Simd<usize, LANES>,
+ or: Self,
+ ) -> Self {
+ let base_ptr = crate::simd::ptr::SimdConstPtr::splat(slice.as_ptr());
+ // Ferris forgive me, I have done pointer arithmetic here.
+ let ptrs = base_ptr.wrapping_add(idxs);
+ // Safety: The ptrs have been bounds-masked to prevent memory-unsafe reads insha'allah
+ unsafe { intrinsics::simd_gather(or, ptrs, enable.to_int()) }
+ }
+
+ /// Writes the values in a SIMD vector to potentially discontiguous indices in `slice`.
+ /// If two lanes in the scattered vector would write to the same index
+ /// only the last lane is guaranteed to actually be written.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # use core::simd::Simd;
+ /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
+ /// let idxs = Simd::from_array([9, 3, 0, 0]);
+ /// let vals = Simd::from_array([-27, 82, -41, 124]);
+ ///
+ /// vals.scatter(&mut vec, idxs); // index 0 receives two writes.
+ /// assert_eq!(vec, vec![124, 11, 12, 82, 14, 15, 16, 17, 18]);
+ /// ```
+ #[inline]
+ pub fn scatter(self, slice: &mut [T], idxs: Simd<usize, LANES>) {
+ self.scatter_select(slice, Mask::splat(true), idxs)
+ }
+
+ /// Writes the values in a SIMD vector to multiple potentially discontiguous indices in `slice`.
+ /// The mask `enable`s all `true` lanes and disables all `false` lanes.
+ /// If an enabled index is out-of-bounds, the lane is not written.
+ /// If two enabled lanes in the scattered vector would write to the same index,
+ /// only the last lane is guaranteed to actually be written.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, Mask};
+ /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
+ /// let idxs = Simd::from_array([9, 3, 0, 0]);
+ /// let vals = Simd::from_array([-27, 82, -41, 124]);
+ /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane.
+ ///
+ /// vals.scatter_select(&mut vec, enable, idxs); // index 0's second write is masked, thus omitted.
+ /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
+ /// ```
+ #[inline]
+ pub fn scatter_select(
+ self,
+ slice: &mut [T],
+ enable: Mask<isize, LANES>,
+ idxs: Simd<usize, LANES>,
+ ) {
+ let enable: Mask<isize, LANES> = enable & idxs.simd_lt(Simd::splat(slice.len()));
+ // Safety: We have masked-off out-of-bounds lanes.
+ unsafe { self.scatter_select_unchecked(slice, enable, idxs) }
+ }
+
+ /// Writes the values in a SIMD vector to multiple potentially discontiguous indices in `slice`.
+ /// The mask `enable`s all `true` lanes and disables all `false` lanes.
+ /// If two enabled lanes in the scattered vector would write to the same index,
+ /// only the last lane is guaranteed to actually be written.
+ ///
+ /// # Safety
+ ///
+ /// Calling this function with an enabled out-of-bounds index is *[undefined behavior]*,
+ /// and may lead to memory corruption.
+ ///
+ /// # Examples
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdPartialOrd, Mask};
+ /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
+ /// let idxs = Simd::from_array([9, 3, 0, 0]);
+ /// let vals = Simd::from_array([-27, 82, -41, 124]);
+ /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane.
+ /// // If this mask was used to scatter, it would be unsound. Let's fix that.
+ /// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
+ ///
+ /// // We have masked the OOB lane, so it's safe to scatter now.
+ /// unsafe { vals.scatter_select_unchecked(&mut vec, enable, idxs); }
+ /// // index 0's second write is masked, thus was omitted.
+ /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
+ /// ```
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline]
+ pub unsafe fn scatter_select_unchecked(
+ self,
+ slice: &mut [T],
+ enable: Mask<isize, LANES>,
+ idxs: Simd<usize, LANES>,
+ ) {
+ // Safety: This block works with *mut T derived from &mut 'a [T],
+ // which means it is delicate in Rust's borrowing model, circa 2021:
+ // &mut 'a [T] asserts uniqueness, so deriving &'a [T] invalidates live *mut Ts!
+ // Even though this block is largely safe methods, it must be exactly this way
+ // to prevent invalidating the raw ptrs while they're live.
+ // Thus, entering this block requires all values to use being already ready:
+ // 0. idxs we want to write to, which are used to construct the mask.
+ // 1. enable, which depends on an initial &'a [T] and the idxs.
+ // 2. actual values to scatter (self).
+ // 3. &mut [T] which will become our base ptr.
+ unsafe {
+ // Now Entering ☢️ *mut T Zone
+ let base_ptr = crate::simd::ptr::SimdMutPtr::splat(slice.as_mut_ptr());
+ // Ferris forgive me, I have done pointer arithmetic here.
+ let ptrs = base_ptr.wrapping_add(idxs);
+ // The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah
+ intrinsics::simd_scatter(self, ptrs, enable.to_int())
+ // Cleared ☢️ *mut T Zone
+ }
+ }
+}
+
+impl<T, const LANES: usize> Copy for Simd<T, LANES>
+where
+ T: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+}
+
+impl<T, const LANES: usize> Clone for Simd<T, LANES>
+where
+ T: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T, const LANES: usize> Default for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement + Default,
+{
+ #[inline]
+ fn default() -> Self {
+ Self::splat(T::default())
+ }
+}
+
+impl<T, const LANES: usize> PartialEq for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement + PartialEq,
+{
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ // Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
+ let mask = unsafe {
+ let tfvec: Simd<<T as SimdElement>::Mask, LANES> = intrinsics::simd_eq(*self, *other);
+ Mask::from_int_unchecked(tfvec)
+ };
+
+ // Two vectors are equal if all lanes tested true for vertical equality.
+ mask.all()
+ }
+
+ #[allow(clippy::partialeq_ne_impl)]
+ #[inline]
+ fn ne(&self, other: &Self) -> bool {
+ // Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
+ let mask = unsafe {
+ let tfvec: Simd<<T as SimdElement>::Mask, LANES> = intrinsics::simd_ne(*self, *other);
+ Mask::from_int_unchecked(tfvec)
+ };
+
+ // Two vectors are non-equal if any lane tested true for vertical non-equality.
+ mask.any()
+ }
+}
+
+impl<T, const LANES: usize> PartialOrd for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement + PartialOrd,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
+ // TODO use SIMD equality
+ self.to_array().partial_cmp(other.as_ref())
+ }
+}
+
+impl<T, const LANES: usize> Eq for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement + Eq,
+{
+}
+
+impl<T, const LANES: usize> Ord for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement + Ord,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> core::cmp::Ordering {
+ // TODO use SIMD equality
+ self.to_array().cmp(other.as_ref())
+ }
+}
+
+impl<T, const LANES: usize> core::hash::Hash for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement + core::hash::Hash,
+{
+ #[inline]
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: core::hash::Hasher,
+ {
+ self.as_array().hash(state)
+ }
+}
+
+// array references
+impl<T, const LANES: usize> AsRef<[T; LANES]> for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement,
+{
+ #[inline]
+ fn as_ref(&self) -> &[T; LANES] {
+ &self.0
+ }
+}
+
+impl<T, const LANES: usize> AsMut<[T; LANES]> for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement,
+{
+ #[inline]
+ fn as_mut(&mut self) -> &mut [T; LANES] {
+ &mut self.0
+ }
+}
+
+// slice references
+impl<T, const LANES: usize> AsRef<[T]> for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement,
+{
+ #[inline]
+ fn as_ref(&self) -> &[T] {
+ &self.0
+ }
+}
+
+impl<T, const LANES: usize> AsMut<[T]> for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement,
+{
+ #[inline]
+ fn as_mut(&mut self) -> &mut [T] {
+ &mut self.0
+ }
+}
+
+// vector/array conversion
+impl<T, const LANES: usize> From<[T; LANES]> for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement,
+{
+ fn from(array: [T; LANES]) -> Self {
+ Self(array)
+ }
+}
+
+impl<T, const LANES: usize> From<Simd<T, LANES>> for [T; LANES]
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement,
+{
+ fn from(vector: Simd<T, LANES>) -> Self {
+ vector.to_array()
+ }
+}
+
+mod sealed {
+ pub trait Sealed {}
+}
+use sealed::Sealed;
+
+/// Marker trait for types that may be used as SIMD vector elements.
+///
+/// # Safety
+/// This trait, when implemented, asserts the compiler can monomorphize
+/// `#[repr(simd)]` structs with the marked type as an element.
+/// Strictly, it is valid to impl if the vector will not be miscompiled.
+/// Practically, it is user-unfriendly to impl it if the vector won't compile,
+/// even when no soundness guarantees are broken by allowing the user to try.
+pub unsafe trait SimdElement: Sealed + Copy {
+ /// The mask element type corresponding to this element type.
+ type Mask: MaskElement;
+}
+
+impl Sealed for u8 {}
+
+// Safety: u8 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for u8 {
+ type Mask = i8;
+}
+
+impl Sealed for u16 {}
+
+// Safety: u16 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for u16 {
+ type Mask = i16;
+}
+
+impl Sealed for u32 {}
+
+// Safety: u32 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for u32 {
+ type Mask = i32;
+}
+
+impl Sealed for u64 {}
+
+// Safety: u64 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for u64 {
+ type Mask = i64;
+}
+
+impl Sealed for usize {}
+
+// Safety: usize is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for usize {
+ type Mask = isize;
+}
+
+impl Sealed for i8 {}
+
+// Safety: i8 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for i8 {
+ type Mask = i8;
+}
+
+impl Sealed for i16 {}
+
+// Safety: i16 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for i16 {
+ type Mask = i16;
+}
+
+impl Sealed for i32 {}
+
+// Safety: i32 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for i32 {
+ type Mask = i32;
+}
+
+impl Sealed for i64 {}
+
+// Safety: i64 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for i64 {
+ type Mask = i64;
+}
+
+impl Sealed for isize {}
+
+// Safety: isize is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for isize {
+ type Mask = isize;
+}
+
+impl Sealed for f32 {}
+
+// Safety: f32 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for f32 {
+ type Mask = i32;
+}
+
+impl Sealed for f64 {}
+
+// Safety: f64 is a valid SIMD element type, and is supported by this API
+unsafe impl SimdElement for f64 {
+ type Mask = i64;
+}
diff --git a/library/portable-simd/crates/core_simd/src/vector/float.rs b/library/portable-simd/crates/core_simd/src/vector/float.rs
new file mode 100644
index 000000000..f836c99b1
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vector/float.rs
@@ -0,0 +1,24 @@
+#![allow(non_camel_case_types)]
+
+use crate::simd::Simd;
+
+/// A 64-bit SIMD vector with two elements of type `f32`.
+pub type f32x2 = Simd<f32, 2>;
+
+/// A 128-bit SIMD vector with four elements of type `f32`.
+pub type f32x4 = Simd<f32, 4>;
+
+/// A 256-bit SIMD vector with eight elements of type `f32`.
+pub type f32x8 = Simd<f32, 8>;
+
+/// A 512-bit SIMD vector with 16 elements of type `f32`.
+pub type f32x16 = Simd<f32, 16>;
+
+/// A 128-bit SIMD vector with two elements of type `f64`.
+pub type f64x2 = Simd<f64, 2>;
+
+/// A 256-bit SIMD vector with four elements of type `f64`.
+pub type f64x4 = Simd<f64, 4>;
+
+/// A 512-bit SIMD vector with eight elements of type `f64`.
+pub type f64x8 = Simd<f64, 8>;
diff --git a/library/portable-simd/crates/core_simd/src/vector/int.rs b/library/portable-simd/crates/core_simd/src/vector/int.rs
new file mode 100644
index 000000000..20e56c7dc
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vector/int.rs
@@ -0,0 +1,63 @@
+#![allow(non_camel_case_types)]
+
+use crate::simd::Simd;
+
+/// A SIMD vector with two elements of type `isize`.
+pub type isizex2 = Simd<isize, 2>;
+
+/// A SIMD vector with four elements of type `isize`.
+pub type isizex4 = Simd<isize, 4>;
+
+/// A SIMD vector with eight elements of type `isize`.
+pub type isizex8 = Simd<isize, 8>;
+
+/// A 32-bit SIMD vector with two elements of type `i16`.
+pub type i16x2 = Simd<i16, 2>;
+
+/// A 64-bit SIMD vector with four elements of type `i16`.
+pub type i16x4 = Simd<i16, 4>;
+
+/// A 128-bit SIMD vector with eight elements of type `i16`.
+pub type i16x8 = Simd<i16, 8>;
+
+/// A 256-bit SIMD vector with 16 elements of type `i16`.
+pub type i16x16 = Simd<i16, 16>;
+
+/// A 512-bit SIMD vector with 32 elements of type `i16`.
+pub type i16x32 = Simd<i16, 32>;
+
+/// A 64-bit SIMD vector with two elements of type `i32`.
+pub type i32x2 = Simd<i32, 2>;
+
+/// A 128-bit SIMD vector with four elements of type `i32`.
+pub type i32x4 = Simd<i32, 4>;
+
+/// A 256-bit SIMD vector with eight elements of type `i32`.
+pub type i32x8 = Simd<i32, 8>;
+
+/// A 512-bit SIMD vector with 16 elements of type `i32`.
+pub type i32x16 = Simd<i32, 16>;
+
+/// A 128-bit SIMD vector with two elements of type `i64`.
+pub type i64x2 = Simd<i64, 2>;
+
+/// A 256-bit SIMD vector with four elements of type `i64`.
+pub type i64x4 = Simd<i64, 4>;
+
+/// A 512-bit SIMD vector with eight elements of type `i64`.
+pub type i64x8 = Simd<i64, 8>;
+
+/// A 32-bit SIMD vector with four elements of type `i8`.
+pub type i8x4 = Simd<i8, 4>;
+
+/// A 64-bit SIMD vector with eight elements of type `i8`.
+pub type i8x8 = Simd<i8, 8>;
+
+/// A 128-bit SIMD vector with 16 elements of type `i8`.
+pub type i8x16 = Simd<i8, 16>;
+
+/// A 256-bit SIMD vector with 32 elements of type `i8`.
+pub type i8x32 = Simd<i8, 32>;
+
+/// A 512-bit SIMD vector with 64 elements of type `i8`.
+pub type i8x64 = Simd<i8, 64>;
diff --git a/library/portable-simd/crates/core_simd/src/vector/ptr.rs b/library/portable-simd/crates/core_simd/src/vector/ptr.rs
new file mode 100644
index 000000000..fa756344d
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vector/ptr.rs
@@ -0,0 +1,51 @@
+//! Private implementation details of public gather/scatter APIs.
+use crate::simd::intrinsics;
+use crate::simd::{LaneCount, Simd, SupportedLaneCount};
+
+/// A vector of *const T.
+#[derive(Debug, Copy, Clone)]
+#[repr(simd)]
+pub(crate) struct SimdConstPtr<T, const LANES: usize>([*const T; LANES]);
+
+impl<T, const LANES: usize> SimdConstPtr<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: Sized,
+{
+ #[inline]
+ #[must_use]
+ pub fn splat(ptr: *const T) -> Self {
+ Self([ptr; LANES])
+ }
+
+ #[inline]
+ #[must_use]
+ pub fn wrapping_add(self, addend: Simd<usize, LANES>) -> Self {
+ // Safety: this intrinsic doesn't have a precondition
+ unsafe { intrinsics::simd_arith_offset(self, addend) }
+ }
+}
+
+/// A vector of *mut T. Be very careful around potential aliasing.
+#[derive(Debug, Copy, Clone)]
+#[repr(simd)]
+pub(crate) struct SimdMutPtr<T, const LANES: usize>([*mut T; LANES]);
+
+impl<T, const LANES: usize> SimdMutPtr<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: Sized,
+{
+ #[inline]
+ #[must_use]
+ pub fn splat(ptr: *mut T) -> Self {
+ Self([ptr; LANES])
+ }
+
+ #[inline]
+ #[must_use]
+ pub fn wrapping_add(self, addend: Simd<usize, LANES>) -> Self {
+ // Safety: this intrinsic doesn't have a precondition
+ unsafe { intrinsics::simd_arith_offset(self, addend) }
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/vector/uint.rs b/library/portable-simd/crates/core_simd/src/vector/uint.rs
new file mode 100644
index 000000000..b4a69c443
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vector/uint.rs
@@ -0,0 +1,63 @@
+#![allow(non_camel_case_types)]
+
+use crate::simd::Simd;
+
+/// A SIMD vector with two elements of type `usize`.
+pub type usizex2 = Simd<usize, 2>;
+
+/// A SIMD vector with four elements of type `usize`.
+pub type usizex4 = Simd<usize, 4>;
+
+/// A SIMD vector with eight elements of type `usize`.
+pub type usizex8 = Simd<usize, 8>;
+
+/// A 32-bit SIMD vector with two elements of type `u16`.
+pub type u16x2 = Simd<u16, 2>;
+
+/// A 64-bit SIMD vector with four elements of type `u16`.
+pub type u16x4 = Simd<u16, 4>;
+
+/// A 128-bit SIMD vector with eight elements of type `u16`.
+pub type u16x8 = Simd<u16, 8>;
+
+/// A 256-bit SIMD vector with 16 elements of type `u16`.
+pub type u16x16 = Simd<u16, 16>;
+
+/// A 512-bit SIMD vector with 32 elements of type `u16`.
+pub type u16x32 = Simd<u16, 32>;
+
+/// A 64-bit SIMD vector with two elements of type `u32`.
+pub type u32x2 = Simd<u32, 2>;
+
+/// A 128-bit SIMD vector with four elements of type `u32`.
+pub type u32x4 = Simd<u32, 4>;
+
+/// A 256-bit SIMD vector with eight elements of type `u32`.
+pub type u32x8 = Simd<u32, 8>;
+
+/// A 512-bit SIMD vector with 16 elements of type `u32`.
+pub type u32x16 = Simd<u32, 16>;
+
+/// A 128-bit SIMD vector with two elements of type `u64`.
+pub type u64x2 = Simd<u64, 2>;
+
+/// A 256-bit SIMD vector with four elements of type `u64`.
+pub type u64x4 = Simd<u64, 4>;
+
+/// A 512-bit SIMD vector with eight elements of type `u64`.
+pub type u64x8 = Simd<u64, 8>;
+
+/// A 32-bit SIMD vector with four elements of type `u8`.
+pub type u8x4 = Simd<u8, 4>;
+
+/// A 64-bit SIMD vector with eight elements of type `u8`.
+pub type u8x8 = Simd<u8, 8>;
+
+/// A 128-bit SIMD vector with 16 elements of type `u8`.
+pub type u8x16 = Simd<u8, 16>;
+
+/// A 256-bit SIMD vector with 32 elements of type `u8`.
+pub type u8x32 = Simd<u8, 32>;
+
+/// A 512-bit SIMD vector with 64 elements of type `u8`.
+pub type u8x64 = Simd<u8, 64>;
diff --git a/library/portable-simd/crates/core_simd/src/vendor.rs b/library/portable-simd/crates/core_simd/src/vendor.rs
new file mode 100644
index 000000000..9fb70218c
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vendor.rs
@@ -0,0 +1,31 @@
+/// Provides implementations of `From<$a> for $b` and `From<$b> for $a` that transmutes the value.
+#[allow(unused)]
+macro_rules! from_transmute {
+ { unsafe $a:ty => $b:ty } => {
+ from_transmute!{ @impl $a => $b }
+ from_transmute!{ @impl $b => $a }
+ };
+ { @impl $from:ty => $to:ty } => {
+ impl core::convert::From<$from> for $to {
+ #[inline]
+ fn from(value: $from) -> $to {
+ // Safety: transmuting between vectors is safe, but the caller of this macro
+ // checks the invariants
+ unsafe { core::mem::transmute(value) }
+ }
+ }
+ };
+}
+
+/// Conversions to x86's SIMD types.
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+mod x86;
+
+#[cfg(any(target_arch = "wasm32"))]
+mod wasm32;
+
+#[cfg(any(target_arch = "aarch64", target_arch = "arm",))]
+mod arm;
+
+#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
+mod powerpc;
diff --git a/library/portable-simd/crates/core_simd/src/vendor/arm.rs b/library/portable-simd/crates/core_simd/src/vendor/arm.rs
new file mode 100644
index 000000000..ff3b69ccf
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vendor/arm.rs
@@ -0,0 +1,76 @@
+#![allow(unused)]
+use crate::simd::*;
+
+#[cfg(target_arch = "arm")]
+use core::arch::arm::*;
+
+#[cfg(target_arch = "aarch64")]
+use core::arch::aarch64::*;
+
+#[cfg(any(
+ target_arch = "aarch64",
+ all(target_arch = "arm", target_feature = "v7"),
+))]
+mod neon {
+ use super::*;
+
+ from_transmute! { unsafe f32x2 => float32x2_t }
+ from_transmute! { unsafe f32x4 => float32x4_t }
+
+ from_transmute! { unsafe u8x8 => uint8x8_t }
+ from_transmute! { unsafe u8x16 => uint8x16_t }
+ from_transmute! { unsafe i8x8 => int8x8_t }
+ from_transmute! { unsafe i8x16 => int8x16_t }
+ from_transmute! { unsafe u8x8 => poly8x8_t }
+ from_transmute! { unsafe u8x16 => poly8x16_t }
+
+ from_transmute! { unsafe u16x4 => uint16x4_t }
+ from_transmute! { unsafe u16x8 => uint16x8_t }
+ from_transmute! { unsafe i16x4 => int16x4_t }
+ from_transmute! { unsafe i16x8 => int16x8_t }
+ from_transmute! { unsafe u16x4 => poly16x4_t }
+ from_transmute! { unsafe u16x8 => poly16x8_t }
+
+ from_transmute! { unsafe u32x2 => uint32x2_t }
+ from_transmute! { unsafe u32x4 => uint32x4_t }
+ from_transmute! { unsafe i32x2 => int32x2_t }
+ from_transmute! { unsafe i32x4 => int32x4_t }
+
+ from_transmute! { unsafe Simd<u64, 1> => uint64x1_t }
+ from_transmute! { unsafe u64x2 => uint64x2_t }
+ from_transmute! { unsafe Simd<i64, 1> => int64x1_t }
+ from_transmute! { unsafe i64x2 => int64x2_t }
+ from_transmute! { unsafe Simd<u64, 1> => poly64x1_t }
+ from_transmute! { unsafe u64x2 => poly64x2_t }
+}
+
+#[cfg(any(
+ all(target_feature = "v5te", not(target_feature = "mclass")),
+ all(target_feature = "mclass", target_feature = "dsp"),
+))]
+mod dsp {
+ use super::*;
+
+ from_transmute! { unsafe Simd<u16, 2> => uint16x2_t }
+ from_transmute! { unsafe Simd<i16, 2> => int16x2_t }
+}
+
+#[cfg(any(
+ all(target_feature = "v6", not(target_feature = "mclass")),
+ all(target_feature = "mclass", target_feature = "dsp"),
+))]
+mod simd32 {
+ use super::*;
+
+ from_transmute! { unsafe Simd<u8, 4> => uint8x4_t }
+ from_transmute! { unsafe Simd<i8, 4> => int8x4_t }
+}
+
+#[cfg(target_arch = "aarch64")]
+mod aarch64 {
+ use super::neon::*;
+ use super::*;
+
+ from_transmute! { unsafe Simd<f64, 1> => float64x1_t }
+ from_transmute! { unsafe f64x2 => float64x2_t }
+}
diff --git a/library/portable-simd/crates/core_simd/src/vendor/powerpc.rs b/library/portable-simd/crates/core_simd/src/vendor/powerpc.rs
new file mode 100644
index 000000000..92f97d471
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vendor/powerpc.rs
@@ -0,0 +1,11 @@
+use crate::simd::*;
+
+#[cfg(target_arch = "powerpc")]
+use core::arch::powerpc::*;
+
+#[cfg(target_arch = "powerpc64")]
+use core::arch::powerpc64::*;
+
+from_transmute! { unsafe f64x2 => vector_double }
+from_transmute! { unsafe i64x2 => vector_signed_long }
+from_transmute! { unsafe u64x2 => vector_unsigned_long }
diff --git a/library/portable-simd/crates/core_simd/src/vendor/wasm32.rs b/library/portable-simd/crates/core_simd/src/vendor/wasm32.rs
new file mode 100644
index 000000000..ef3baf885
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vendor/wasm32.rs
@@ -0,0 +1,30 @@
+use crate::simd::*;
+use core::arch::wasm32::v128;
+
+from_transmute! { unsafe u8x16 => v128 }
+from_transmute! { unsafe i8x16 => v128 }
+
+from_transmute! { unsafe u16x8 => v128 }
+from_transmute! { unsafe i16x8 => v128 }
+
+from_transmute! { unsafe u32x4 => v128 }
+from_transmute! { unsafe i32x4 => v128 }
+from_transmute! { unsafe f32x4 => v128 }
+
+from_transmute! { unsafe u64x2 => v128 }
+from_transmute! { unsafe i64x2 => v128 }
+from_transmute! { unsafe f64x2 => v128 }
+
+#[cfg(target_pointer_width = "32")]
+mod p32 {
+ use super::*;
+ from_transmute! { unsafe usizex4 => v128 }
+ from_transmute! { unsafe isizex4 => v128 }
+}
+
+#[cfg(target_pointer_width = "64")]
+mod p64 {
+ use super::*;
+ from_transmute! { unsafe usizex2 => v128 }
+ from_transmute! { unsafe isizex2 => v128 }
+}
diff --git a/library/portable-simd/crates/core_simd/src/vendor/x86.rs b/library/portable-simd/crates/core_simd/src/vendor/x86.rs
new file mode 100644
index 000000000..0dd47015e
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/vendor/x86.rs
@@ -0,0 +1,63 @@
+use crate::simd::*;
+
+#[cfg(any(target_arch = "x86"))]
+use core::arch::x86::*;
+
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+from_transmute! { unsafe u8x16 => __m128i }
+from_transmute! { unsafe u8x32 => __m256i }
+from_transmute! { unsafe u8x64 => __m512i }
+from_transmute! { unsafe i8x16 => __m128i }
+from_transmute! { unsafe i8x32 => __m256i }
+from_transmute! { unsafe i8x64 => __m512i }
+
+from_transmute! { unsafe u16x8 => __m128i }
+from_transmute! { unsafe u16x16 => __m256i }
+from_transmute! { unsafe u16x32 => __m512i }
+from_transmute! { unsafe i16x8 => __m128i }
+from_transmute! { unsafe i16x16 => __m256i }
+from_transmute! { unsafe i16x32 => __m512i }
+
+from_transmute! { unsafe u32x4 => __m128i }
+from_transmute! { unsafe u32x8 => __m256i }
+from_transmute! { unsafe u32x16 => __m512i }
+from_transmute! { unsafe i32x4 => __m128i }
+from_transmute! { unsafe i32x8 => __m256i }
+from_transmute! { unsafe i32x16 => __m512i }
+from_transmute! { unsafe f32x4 => __m128 }
+from_transmute! { unsafe f32x8 => __m256 }
+from_transmute! { unsafe f32x16 => __m512 }
+
+from_transmute! { unsafe u64x2 => __m128i }
+from_transmute! { unsafe u64x4 => __m256i }
+from_transmute! { unsafe u64x8 => __m512i }
+from_transmute! { unsafe i64x2 => __m128i }
+from_transmute! { unsafe i64x4 => __m256i }
+from_transmute! { unsafe i64x8 => __m512i }
+from_transmute! { unsafe f64x2 => __m128d }
+from_transmute! { unsafe f64x4 => __m256d }
+from_transmute! { unsafe f64x8 => __m512d }
+
+#[cfg(target_pointer_width = "32")]
+mod p32 {
+ use super::*;
+ from_transmute! { unsafe usizex4 => __m128i }
+ from_transmute! { unsafe usizex8 => __m256i }
+ from_transmute! { unsafe Simd<usize, 16> => __m512i }
+ from_transmute! { unsafe isizex4 => __m128i }
+ from_transmute! { unsafe isizex8 => __m256i }
+ from_transmute! { unsafe Simd<isize, 16> => __m512i }
+}
+
+#[cfg(target_pointer_width = "64")]
+mod p64 {
+ use super::*;
+ from_transmute! { unsafe usizex2 => __m128i }
+ from_transmute! { unsafe usizex4 => __m256i }
+ from_transmute! { unsafe usizex8 => __m512i }
+ from_transmute! { unsafe isizex2 => __m128i }
+ from_transmute! { unsafe isizex4 => __m256i }
+ from_transmute! { unsafe isizex8 => __m512i }
+}