summaryrefslogtreecommitdiffstats
path: root/library/portable-simd/crates/core_simd/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
commit9835e2ae736235810b4ea1c162ca5e65c547e770 (patch)
tree3fcebf40ed70e581d776a8a4c65923e8ec20e026 /library/portable-simd/crates/core_simd/src
parentReleasing progress-linux version 1.70.0+dfsg2-1~progress7.99u1. (diff)
downloadrustc-9835e2ae736235810b4ea1c162ca5e65c547e770.tar.xz
rustc-9835e2ae736235810b4ea1c162ca5e65c547e770.zip
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/portable-simd/crates/core_simd/src')
-rw-r--r--library/portable-simd/crates/core_simd/src/alias.rs227
-rw-r--r--library/portable-simd/crates/core_simd/src/cast.rs55
-rw-r--r--library/portable-simd/crates/core_simd/src/elements.rs4
-rw-r--r--library/portable-simd/crates/core_simd/src/elements/const_ptr.rs141
-rw-r--r--library/portable-simd/crates/core_simd/src/elements/mut_ptr.rs136
-rw-r--r--library/portable-simd/crates/core_simd/src/eq.rs38
-rw-r--r--library/portable-simd/crates/core_simd/src/fmt.rs50
-rw-r--r--library/portable-simd/crates/core_simd/src/intrinsics.rs16
-rw-r--r--library/portable-simd/crates/core_simd/src/lane_count.rs36
-rw-r--r--library/portable-simd/crates/core_simd/src/lib.rs8
-rw-r--r--library/portable-simd/crates/core_simd/src/masks.rs69
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/bitmask.rs4
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/full_masks.rs4
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs4
-rw-r--r--library/portable-simd/crates/core_simd/src/mod.rs6
-rw-r--r--library/portable-simd/crates/core_simd/src/ops/deref.rs2
-rw-r--r--library/portable-simd/crates/core_simd/src/ord.rs102
-rw-r--r--library/portable-simd/crates/core_simd/src/swizzle.rs72
-rw-r--r--library/portable-simd/crates/core_simd/src/swizzle_dyn.rs157
-rw-r--r--library/portable-simd/crates/core_simd/src/vector.rs665
-rw-r--r--library/portable-simd/crates/core_simd/src/vector/float.rs24
-rw-r--r--library/portable-simd/crates/core_simd/src/vector/int.rs63
-rw-r--r--library/portable-simd/crates/core_simd/src/vector/ptr.rs51
-rw-r--r--library/portable-simd/crates/core_simd/src/vector/uint.rs63
24 files changed, 1443 insertions, 554 deletions
diff --git a/library/portable-simd/crates/core_simd/src/alias.rs b/library/portable-simd/crates/core_simd/src/alias.rs
new file mode 100644
index 000000000..23f121c46
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/alias.rs
@@ -0,0 +1,227 @@
+macro_rules! number {
+ { 1 } => { "one" };
+ { 2 } => { "two" };
+ { 4 } => { "four" };
+ { 8 } => { "eight" };
+ { $x:literal } => { stringify!($x) };
+}
+
+macro_rules! plural {
+ { 1 } => { "" };
+ { $x:literal } => { "s" };
+}
+
+macro_rules! alias {
+ {
+ $(
+ $element_ty:ty = {
+ $($alias:ident $num_elements:tt)*
+ }
+ )*
+ } => {
+ $(
+ $(
+ #[doc = concat!("A SIMD vector with ", number!($num_elements), " element", plural!($num_elements), " of type [`", stringify!($element_ty), "`].")]
+ #[allow(non_camel_case_types)]
+ pub type $alias = $crate::simd::Simd<$element_ty, $num_elements>;
+ )*
+ )*
+ }
+}
+
+macro_rules! mask_alias {
+ {
+ $(
+ $element_ty:ty : $size:literal = {
+ $($alias:ident $num_elements:tt)*
+ }
+ )*
+ } => {
+ $(
+ $(
+ #[doc = concat!("A SIMD mask with ", number!($num_elements), " element", plural!($num_elements), " for vectors with ", $size, " element types.")]
+ ///
+ #[doc = concat!(
+ "The layout of this type is unspecified, and may change between platforms and/or Rust versions, and code should not assume that it is equivalent to `[",
+ stringify!($element_ty), "; ", $num_elements, "]`."
+ )]
+ #[allow(non_camel_case_types)]
+ pub type $alias = $crate::simd::Mask<$element_ty, $num_elements>;
+ )*
+ )*
+ }
+}
+
+alias! {
+ i8 = {
+ i8x1 1
+ i8x2 2
+ i8x4 4
+ i8x8 8
+ i8x16 16
+ i8x32 32
+ i8x64 64
+ }
+
+ i16 = {
+ i16x1 1
+ i16x2 2
+ i16x4 4
+ i16x8 8
+ i16x16 16
+ i16x32 32
+ i16x64 64
+ }
+
+ i32 = {
+ i32x1 1
+ i32x2 2
+ i32x4 4
+ i32x8 8
+ i32x16 16
+ i32x32 32
+ i32x64 64
+ }
+
+ i64 = {
+ i64x1 1
+ i64x2 2
+ i64x4 4
+ i64x8 8
+ i64x16 16
+ i64x32 32
+ i64x64 64
+ }
+
+ isize = {
+ isizex1 1
+ isizex2 2
+ isizex4 4
+ isizex8 8
+ isizex16 16
+ isizex32 32
+ isizex64 64
+ }
+
+ u8 = {
+ u8x1 1
+ u8x2 2
+ u8x4 4
+ u8x8 8
+ u8x16 16
+ u8x32 32
+ u8x64 64
+ }
+
+ u16 = {
+ u16x1 1
+ u16x2 2
+ u16x4 4
+ u16x8 8
+ u16x16 16
+ u16x32 32
+ u16x64 64
+ }
+
+ u32 = {
+ u32x1 1
+ u32x2 2
+ u32x4 4
+ u32x8 8
+ u32x16 16
+ u32x32 32
+ u32x64 64
+ }
+
+ u64 = {
+ u64x1 1
+ u64x2 2
+ u64x4 4
+ u64x8 8
+ u64x16 16
+ u64x32 32
+ u64x64 64
+ }
+
+ usize = {
+ usizex1 1
+ usizex2 2
+ usizex4 4
+ usizex8 8
+ usizex16 16
+ usizex32 32
+ usizex64 64
+ }
+
+ f32 = {
+ f32x1 1
+ f32x2 2
+ f32x4 4
+ f32x8 8
+ f32x16 16
+ f32x32 32
+ f32x64 64
+ }
+
+ f64 = {
+ f64x1 1
+ f64x2 2
+ f64x4 4
+ f64x8 8
+ f64x16 16
+ f64x32 32
+ f64x64 64
+ }
+}
+
+mask_alias! {
+ i8 : "8-bit" = {
+ mask8x1 1
+ mask8x2 2
+ mask8x4 4
+ mask8x8 8
+ mask8x16 16
+ mask8x32 32
+ mask8x64 64
+ }
+
+ i16 : "16-bit" = {
+ mask16x1 1
+ mask16x2 2
+ mask16x4 4
+ mask16x8 8
+ mask16x16 16
+ mask16x32 32
+ mask16x64 64
+ }
+
+ i32 : "32-bit" = {
+ mask32x1 1
+ mask32x2 2
+ mask32x4 4
+ mask32x8 8
+ mask32x16 16
+ mask32x32 32
+ mask32x64 64
+ }
+
+ i64 : "64-bit" = {
+ mask64x1 1
+ mask64x2 2
+ mask64x4 4
+ mask64x8 8
+ mask64x16 16
+ mask64x32 32
+ mask64x64 64
+ }
+
+ isize : "pointer-sized" = {
+ masksizex1 1
+ masksizex2 2
+ masksizex4 4
+ masksizex8 8
+ masksizex16 16
+ masksizex32 32
+ masksizex64 64
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/cast.rs b/library/portable-simd/crates/core_simd/src/cast.rs
new file mode 100644
index 000000000..65a3f845f
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/cast.rs
@@ -0,0 +1,55 @@
+use crate::simd::SimdElement;
+
+/// Supporting trait for `Simd::cast`. Typically doesn't need to be used directly.
+///
+/// # Safety
+/// Implementing this trait asserts that the type is a valid vector element for the `simd_cast` or
+/// `simd_as` intrinsics.
+pub unsafe trait SimdCast: SimdElement {}
+
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for i8 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for i16 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for i32 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for i64 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for isize {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for u8 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for u16 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for u32 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for u64 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for usize {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for f32 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for f64 {}
+
+/// Supporting trait for `Simd::cast_ptr`. Typically doesn't need to be used directly.
+///
+/// # Safety
+/// Implementing this trait asserts that the type is a valid vector element for the `simd_cast_ptr`
+/// intrinsic.
+pub unsafe trait SimdCastPtr<T> {}
+
+// Safety: pointers can be cast to other pointer types
+unsafe impl<T, U> SimdCastPtr<T> for *const U
+where
+ U: core::ptr::Pointee,
+ T: core::ptr::Pointee<Metadata = U::Metadata>,
+{
+}
+// Safety: pointers can be cast to other pointer types
+unsafe impl<T, U> SimdCastPtr<T> for *mut U
+where
+ U: core::ptr::Pointee,
+ T: core::ptr::Pointee<Metadata = U::Metadata>,
+{
+}
diff --git a/library/portable-simd/crates/core_simd/src/elements.rs b/library/portable-simd/crates/core_simd/src/elements.rs
index 701eb66b2..dc7f52a4d 100644
--- a/library/portable-simd/crates/core_simd/src/elements.rs
+++ b/library/portable-simd/crates/core_simd/src/elements.rs
@@ -1,11 +1,15 @@
+mod const_ptr;
mod float;
mod int;
+mod mut_ptr;
mod uint;
mod sealed {
pub trait Sealed {}
}
+pub use const_ptr::*;
pub use float::*;
pub use int::*;
+pub use mut_ptr::*;
pub use uint::*;
diff --git a/library/portable-simd/crates/core_simd/src/elements/const_ptr.rs b/library/portable-simd/crates/core_simd/src/elements/const_ptr.rs
new file mode 100644
index 000000000..0ef9802b5
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/elements/const_ptr.rs
@@ -0,0 +1,141 @@
+use super::sealed::Sealed;
+use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
+
+/// Operations on SIMD vectors of constant pointers.
+pub trait SimdConstPtr: Copy + Sealed {
+ /// Vector of `usize` with the same number of lanes.
+ type Usize;
+
+ /// Vector of `isize` with the same number of lanes.
+ type Isize;
+
+ /// Vector of mutable pointers to the same type.
+ type MutPtr;
+
+ /// Mask type used for manipulating this SIMD vector type.
+ type Mask;
+
+ /// Returns `true` for each lane that is null.
+ fn is_null(self) -> Self::Mask;
+
+ /// Changes constness without changing the type.
+ ///
+ /// Equivalent to calling [`pointer::cast_mut`] on each lane.
+ fn cast_mut(self) -> Self::MutPtr;
+
+ /// Gets the "address" portion of the pointer.
+ ///
+ /// This method discards pointer semantic metadata, so the result cannot be
+ /// directly cast into a valid pointer.
+ ///
+ /// This method semantically discards *provenance* and
+ /// *address-space* information. To properly restore that information, use [`Self::with_addr`].
+ ///
+ /// Equivalent to calling [`pointer::addr`] on each lane.
+ fn addr(self) -> Self::Usize;
+
+ /// Creates a new pointer with the given address.
+ ///
+ /// This performs the same operation as a cast, but copies the *address-space* and
+ /// *provenance* of `self` to the new pointer.
+ ///
+ /// Equivalent to calling [`pointer::with_addr`] on each lane.
+ fn with_addr(self, addr: Self::Usize) -> Self;
+
+ /// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
+ /// in [`Self::from_exposed_addr`].
+ fn expose_addr(self) -> Self::Usize;
+
+ /// Convert an address back to a pointer, picking up a previously "exposed" provenance.
+ ///
+ /// Equivalent to calling [`core::ptr::from_exposed_addr`] on each lane.
+ fn from_exposed_addr(addr: Self::Usize) -> Self;
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ ///
+ /// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
+ fn wrapping_offset(self, offset: Self::Isize) -> Self;
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ ///
+ /// Equivalent to calling [`pointer::wrapping_add`] on each lane.
+ fn wrapping_add(self, count: Self::Usize) -> Self;
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ ///
+ /// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
+ fn wrapping_sub(self, count: Self::Usize) -> Self;
+}
+
+impl<T, const LANES: usize> Sealed for Simd<*const T, LANES> where
+ LaneCount<LANES>: SupportedLaneCount
+{
+}
+
+impl<T, const LANES: usize> SimdConstPtr for Simd<*const T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Usize = Simd<usize, LANES>;
+ type Isize = Simd<isize, LANES>;
+ type MutPtr = Simd<*mut T, LANES>;
+ type Mask = Mask<isize, LANES>;
+
+ #[inline]
+ fn is_null(self) -> Self::Mask {
+ Simd::splat(core::ptr::null()).simd_eq(self)
+ }
+
+ #[inline]
+ fn cast_mut(self) -> Self::MutPtr {
+ self.cast_ptr()
+ }
+
+ #[inline]
+ fn addr(self) -> Self::Usize {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
+ // provenance).
+ unsafe { core::mem::transmute_copy(&self) }
+ }
+
+ #[inline]
+ fn with_addr(self, addr: Self::Usize) -> Self {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ //
+ // In the mean-time, this operation is defined to be "as if" it was
+ // a wrapping_offset, so we can emulate it as such. This should properly
+ // restore pointer provenance even under today's compiler.
+ self.cast_ptr::<*const u8>()
+ .wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
+ .cast_ptr()
+ }
+
+ #[inline]
+ fn expose_addr(self) -> Self::Usize {
+ // Safety: `self` is a pointer vector
+ unsafe { intrinsics::simd_expose_addr(self) }
+ }
+
+ #[inline]
+ fn from_exposed_addr(addr: Self::Usize) -> Self {
+ // Safety: `self` is a pointer vector
+ unsafe { intrinsics::simd_from_exposed_addr(addr) }
+ }
+
+ #[inline]
+ fn wrapping_offset(self, count: Self::Isize) -> Self {
+ // Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
+ unsafe { intrinsics::simd_arith_offset(self, count) }
+ }
+
+ #[inline]
+ fn wrapping_add(self, count: Self::Usize) -> Self {
+ self.wrapping_offset(count.cast())
+ }
+
+ #[inline]
+ fn wrapping_sub(self, count: Self::Usize) -> Self {
+ self.wrapping_offset(-count.cast::<isize>())
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/elements/mut_ptr.rs b/library/portable-simd/crates/core_simd/src/elements/mut_ptr.rs
new file mode 100644
index 000000000..d87986b4a
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/elements/mut_ptr.rs
@@ -0,0 +1,136 @@
+use super::sealed::Sealed;
+use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
+
+/// Operations on SIMD vectors of mutable pointers.
+pub trait SimdMutPtr: Copy + Sealed {
+ /// Vector of `usize` with the same number of lanes.
+ type Usize;
+
+ /// Vector of `isize` with the same number of lanes.
+ type Isize;
+
+ /// Vector of constant pointers to the same type.
+ type ConstPtr;
+
+ /// Mask type used for manipulating this SIMD vector type.
+ type Mask;
+
+ /// Returns `true` for each lane that is null.
+ fn is_null(self) -> Self::Mask;
+
+ /// Changes constness without changing the type.
+ ///
+ /// Equivalent to calling [`pointer::cast_const`] on each lane.
+ fn cast_const(self) -> Self::ConstPtr;
+
+ /// Gets the "address" portion of the pointer.
+ ///
+ /// This method discards pointer semantic metadata, so the result cannot be
+ /// directly cast into a valid pointer.
+ ///
+ /// Equivalent to calling [`pointer::addr`] on each lane.
+ fn addr(self) -> Self::Usize;
+
+ /// Creates a new pointer with the given address.
+ ///
+ /// This performs the same operation as a cast, but copies the *address-space* and
+ /// *provenance* of `self` to the new pointer.
+ ///
+ /// Equivalent to calling [`pointer::with_addr`] on each lane.
+ fn with_addr(self, addr: Self::Usize) -> Self;
+
+ /// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
+ /// in [`Self::from_exposed_addr`].
+ fn expose_addr(self) -> Self::Usize;
+
+ /// Convert an address back to a pointer, picking up a previously "exposed" provenance.
+ ///
+ /// Equivalent to calling [`core::ptr::from_exposed_addr_mut`] on each lane.
+ fn from_exposed_addr(addr: Self::Usize) -> Self;
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ ///
+ /// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
+ fn wrapping_offset(self, offset: Self::Isize) -> Self;
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ ///
+ /// Equivalent to calling [`pointer::wrapping_add`] on each lane.
+ fn wrapping_add(self, count: Self::Usize) -> Self;
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ ///
+ /// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
+ fn wrapping_sub(self, count: Self::Usize) -> Self;
+}
+
+impl<T, const LANES: usize> Sealed for Simd<*mut T, LANES> where LaneCount<LANES>: SupportedLaneCount
+{}
+
+impl<T, const LANES: usize> SimdMutPtr for Simd<*mut T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Usize = Simd<usize, LANES>;
+ type Isize = Simd<isize, LANES>;
+ type ConstPtr = Simd<*const T, LANES>;
+ type Mask = Mask<isize, LANES>;
+
+ #[inline]
+ fn is_null(self) -> Self::Mask {
+ Simd::splat(core::ptr::null_mut()).simd_eq(self)
+ }
+
+ #[inline]
+ fn cast_const(self) -> Self::ConstPtr {
+ self.cast_ptr()
+ }
+
+ #[inline]
+ fn addr(self) -> Self::Usize {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
+ // provenance).
+ unsafe { core::mem::transmute_copy(&self) }
+ }
+
+ #[inline]
+ fn with_addr(self, addr: Self::Usize) -> Self {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ //
+ // In the mean-time, this operation is defined to be "as if" it was
+ // a wrapping_offset, so we can emulate it as such. This should properly
+ // restore pointer provenance even under today's compiler.
+ self.cast_ptr::<*mut u8>()
+ .wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
+ .cast_ptr()
+ }
+
+ #[inline]
+ fn expose_addr(self) -> Self::Usize {
+ // Safety: `self` is a pointer vector
+ unsafe { intrinsics::simd_expose_addr(self) }
+ }
+
+ #[inline]
+ fn from_exposed_addr(addr: Self::Usize) -> Self {
+ // Safety: `self` is a pointer vector
+ unsafe { intrinsics::simd_from_exposed_addr(addr) }
+ }
+
+ #[inline]
+ fn wrapping_offset(self, count: Self::Isize) -> Self {
+ // Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
+ unsafe { intrinsics::simd_arith_offset(self, count) }
+ }
+
+ #[inline]
+ fn wrapping_add(self, count: Self::Usize) -> Self {
+ self.wrapping_offset(count.cast())
+ }
+
+ #[inline]
+ fn wrapping_sub(self, count: Self::Usize) -> Self {
+ self.wrapping_offset(-count.cast::<isize>())
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/eq.rs b/library/portable-simd/crates/core_simd/src/eq.rs
index c7111f720..80763c072 100644
--- a/library/portable-simd/crates/core_simd/src/eq.rs
+++ b/library/portable-simd/crates/core_simd/src/eq.rs
@@ -1,4 +1,6 @@
-use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdElement, SupportedLaneCount};
+use crate::simd::{
+ intrinsics, LaneCount, Mask, Simd, SimdConstPtr, SimdElement, SimdMutPtr, SupportedLaneCount,
+};
/// Parallel `PartialEq`.
pub trait SimdPartialEq {
@@ -71,3 +73,37 @@ macro_rules! impl_mask {
}
impl_mask! { i8, i16, i32, i64, isize }
+
+impl<T, const LANES: usize> SimdPartialEq for Simd<*const T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Mask = Mask<isize, LANES>;
+
+ #[inline]
+ fn simd_eq(self, other: Self) -> Self::Mask {
+ self.addr().simd_eq(other.addr())
+ }
+
+ #[inline]
+ fn simd_ne(self, other: Self) -> Self::Mask {
+ self.addr().simd_ne(other.addr())
+ }
+}
+
+impl<T, const LANES: usize> SimdPartialEq for Simd<*mut T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Mask = Mask<isize, LANES>;
+
+ #[inline]
+ fn simd_eq(self, other: Self) -> Self::Mask {
+ self.addr().simd_eq(other.addr())
+ }
+
+ #[inline]
+ fn simd_ne(self, other: Self) -> Self::Mask {
+ self.addr().simd_ne(other.addr())
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/fmt.rs b/library/portable-simd/crates/core_simd/src/fmt.rs
index dbd9839c4..b7317969c 100644
--- a/library/portable-simd/crates/core_simd/src/fmt.rs
+++ b/library/portable-simd/crates/core_simd/src/fmt.rs
@@ -1,39 +1,21 @@
use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
use core::fmt;
-macro_rules! impl_fmt_trait {
- { $($trait:ident,)* } => {
- $(
- impl<T, const LANES: usize> fmt::$trait for Simd<T, LANES>
- where
- LaneCount<LANES>: SupportedLaneCount,
- T: SimdElement + fmt::$trait,
- {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- #[repr(transparent)]
- struct Wrapper<'a, T: fmt::$trait>(&'a T);
-
- impl<T: fmt::$trait> fmt::Debug for Wrapper<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- self.0.fmt(f)
- }
- }
-
- f.debug_list()
- .entries(self.as_array().iter().map(|x| Wrapper(x)))
- .finish()
- }
- }
- )*
+impl<T, const LANES: usize> fmt::Debug for Simd<T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+ T: SimdElement + fmt::Debug,
+{
+ /// A `Simd<T, N>` has a debug format like the one for `[T]`:
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd::Simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd::Simd;
+ /// let floats = Simd::<f32, 4>::splat(-1.0);
+ /// assert_eq!(format!("{:?}", [-1.0; 4]), format!("{:?}", floats));
+ /// ```
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ <[T] as fmt::Debug>::fmt(self.as_array(), f)
}
}
-
-impl_fmt_trait! {
- Debug,
- Binary,
- LowerExp,
- UpperExp,
- Octal,
- LowerHex,
- UpperHex,
-}
diff --git a/library/portable-simd/crates/core_simd/src/intrinsics.rs b/library/portable-simd/crates/core_simd/src/intrinsics.rs
index 704e6ed01..dd6698e2b 100644
--- a/library/portable-simd/crates/core_simd/src/intrinsics.rs
+++ b/library/portable-simd/crates/core_simd/src/intrinsics.rs
@@ -61,9 +61,6 @@ extern "platform-intrinsic" {
/// xor
pub(crate) fn simd_xor<T>(x: T, y: T) -> T;
- /// getelementptr (without inbounds)
- pub(crate) fn simd_arith_offset<T, U>(ptrs: T, offsets: U) -> T;
-
/// fptoui/fptosi/uitofp/sitofp
/// casting floats to integers is truncating, so it is safe to convert values like e.g. 1.5
/// but the truncated value must fit in the target type or the result is poison.
@@ -150,4 +147,17 @@ extern "platform-intrinsic" {
pub(crate) fn simd_select<M, T>(m: M, yes: T, no: T) -> T;
#[allow(unused)]
pub(crate) fn simd_select_bitmask<M, T>(m: M, yes: T, no: T) -> T;
+
+ /// getelementptr (without inbounds)
+ /// equivalent to wrapping_offset
+ pub(crate) fn simd_arith_offset<T, U>(ptr: T, offset: U) -> T;
+
+ /// equivalent to `T as U` semantics, specifically for pointers
+ pub(crate) fn simd_cast_ptr<T, U>(ptr: T) -> U;
+
+ /// expose a pointer as an address
+ pub(crate) fn simd_expose_addr<T, U>(ptr: T) -> U;
+
+ /// convert an exposed address back to a pointer
+ pub(crate) fn simd_from_exposed_addr<T, U>(addr: T) -> U;
}
diff --git a/library/portable-simd/crates/core_simd/src/lane_count.rs b/library/portable-simd/crates/core_simd/src/lane_count.rs
index 63723e2ec..2b91eb9e8 100644
--- a/library/portable-simd/crates/core_simd/src/lane_count.rs
+++ b/library/portable-simd/crates/core_simd/src/lane_count.rs
@@ -23,24 +23,20 @@ pub trait SupportedLaneCount: Sealed {
impl<const LANES: usize> Sealed for LaneCount<LANES> {}
-impl SupportedLaneCount for LaneCount<1> {
- type BitMask = [u8; 1];
-}
-impl SupportedLaneCount for LaneCount<2> {
- type BitMask = [u8; 1];
-}
-impl SupportedLaneCount for LaneCount<4> {
- type BitMask = [u8; 1];
-}
-impl SupportedLaneCount for LaneCount<8> {
- type BitMask = [u8; 1];
-}
-impl SupportedLaneCount for LaneCount<16> {
- type BitMask = [u8; 2];
-}
-impl SupportedLaneCount for LaneCount<32> {
- type BitMask = [u8; 4];
-}
-impl SupportedLaneCount for LaneCount<64> {
- type BitMask = [u8; 8];
+macro_rules! supported_lane_count {
+ ($($lanes:literal),+) => {
+ $(
+ impl SupportedLaneCount for LaneCount<$lanes> {
+ type BitMask = [u8; ($lanes + 7) / 8];
+ }
+ )+
+ };
}
+
+supported_lane_count!(1, 2, 4, 8, 16, 32, 64);
+#[cfg(feature = "all_lane_counts")]
+supported_lane_count!(
+ 3, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63
+);
diff --git a/library/portable-simd/crates/core_simd/src/lib.rs b/library/portable-simd/crates/core_simd/src/lib.rs
index 715f258f6..e5307de21 100644
--- a/library/portable-simd/crates/core_simd/src/lib.rs
+++ b/library/portable-simd/crates/core_simd/src/lib.rs
@@ -1,5 +1,8 @@
#![no_std]
#![feature(
+ const_refs_to_cell,
+ const_maybe_uninit_as_mut_ptr,
+ const_mut_refs,
convert_float_to_int,
decl_macro,
intra_doc_pointers,
@@ -7,7 +10,9 @@
repr_simd,
simd_ffi,
staged_api,
- stdsimd
+ stdsimd,
+ strict_provenance,
+ ptr_metadata
)]
#![cfg_attr(feature = "generic_const_exprs", feature(generic_const_exprs))]
#![cfg_attr(feature = "generic_const_exprs", allow(incomplete_features))]
@@ -19,4 +24,3 @@
#[path = "mod.rs"]
mod core_simd;
pub use self::core_simd::simd;
-pub use simd::*;
diff --git a/library/portable-simd/crates/core_simd/src/masks.rs b/library/portable-simd/crates/core_simd/src/masks.rs
index c36c336d8..e0f3c7bee 100644
--- a/library/portable-simd/crates/core_simd/src/masks.rs
+++ b/library/portable-simd/crates/core_simd/src/masks.rs
@@ -55,6 +55,7 @@ pub unsafe trait MaskElement: SimdElement + Sealed {}
macro_rules! impl_element {
{ $ty:ty } => {
impl Sealed for $ty {
+ #[inline]
fn valid<const LANES: usize>(value: Simd<Self, LANES>) -> bool
where
LaneCount<LANES>: SupportedLaneCount,
@@ -62,6 +63,7 @@ macro_rules! impl_element {
(value.simd_eq(Simd::splat(0 as _)) | value.simd_eq(Simd::splat(-1 as _))).all()
}
+ #[inline]
fn eq(self, other: Self) -> bool { self == other }
const TRUE: Self = -1;
@@ -83,8 +85,10 @@ impl_element! { isize }
///
/// Masks represent boolean inclusion/exclusion on a per-lane basis.
///
-/// The layout of this type is unspecified.
-#[repr(transparent)]
+/// The layout of this type is unspecified, and may change between platforms
+/// and/or Rust versions, and code should not assume that it is equivalent to
+/// `[T; LANES]`.
+#[cfg_attr(not(doc), repr(transparent))] // work around https://github.com/rust-lang/rust/issues/90435
pub struct Mask<T, const LANES: usize>(mask_impl::Mask<T, LANES>)
where
T: MaskElement,
@@ -102,6 +106,7 @@ where
T: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn clone(&self) -> Self {
*self
}
@@ -113,11 +118,13 @@ where
LaneCount<LANES>: SupportedLaneCount,
{
/// Construct a mask by setting all lanes to the given value.
+ #[inline]
pub fn splat(value: bool) -> Self {
Self(mask_impl::Mask::splat(value))
}
/// Converts an array of bools to a SIMD mask.
+ #[inline]
pub fn from_array(array: [bool; LANES]) -> Self {
// SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of
// true: 0b_0000_0001
@@ -134,6 +141,7 @@ where
}
/// Converts a SIMD mask to an array of bools.
+ #[inline]
pub fn to_array(self) -> [bool; LANES] {
// This follows mostly the same logic as from_array.
// SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of
@@ -261,6 +269,7 @@ where
T: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn from(array: [bool; LANES]) -> Self {
Self::from_array(array)
}
@@ -271,6 +280,7 @@ where
T: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn from(vector: Mask<T, LANES>) -> Self {
vector.to_array()
}
@@ -520,60 +530,6 @@ where
}
}
-/// A mask for SIMD vectors with eight elements of 8 bits.
-pub type mask8x8 = Mask<i8, 8>;
-
-/// A mask for SIMD vectors with 16 elements of 8 bits.
-pub type mask8x16 = Mask<i8, 16>;
-
-/// A mask for SIMD vectors with 32 elements of 8 bits.
-pub type mask8x32 = Mask<i8, 32>;
-
-/// A mask for SIMD vectors with 64 elements of 8 bits.
-pub type mask8x64 = Mask<i8, 64>;
-
-/// A mask for SIMD vectors with four elements of 16 bits.
-pub type mask16x4 = Mask<i16, 4>;
-
-/// A mask for SIMD vectors with eight elements of 16 bits.
-pub type mask16x8 = Mask<i16, 8>;
-
-/// A mask for SIMD vectors with 16 elements of 16 bits.
-pub type mask16x16 = Mask<i16, 16>;
-
-/// A mask for SIMD vectors with 32 elements of 16 bits.
-pub type mask16x32 = Mask<i16, 32>;
-
-/// A mask for SIMD vectors with two elements of 32 bits.
-pub type mask32x2 = Mask<i32, 2>;
-
-/// A mask for SIMD vectors with four elements of 32 bits.
-pub type mask32x4 = Mask<i32, 4>;
-
-/// A mask for SIMD vectors with eight elements of 32 bits.
-pub type mask32x8 = Mask<i32, 8>;
-
-/// A mask for SIMD vectors with 16 elements of 32 bits.
-pub type mask32x16 = Mask<i32, 16>;
-
-/// A mask for SIMD vectors with two elements of 64 bits.
-pub type mask64x2 = Mask<i64, 2>;
-
-/// A mask for SIMD vectors with four elements of 64 bits.
-pub type mask64x4 = Mask<i64, 4>;
-
-/// A mask for SIMD vectors with eight elements of 64 bits.
-pub type mask64x8 = Mask<i64, 8>;
-
-/// A mask for SIMD vectors with two elements of pointer width.
-pub type masksizex2 = Mask<isize, 2>;
-
-/// A mask for SIMD vectors with four elements of pointer width.
-pub type masksizex4 = Mask<isize, 4>;
-
-/// A mask for SIMD vectors with eight elements of pointer width.
-pub type masksizex8 = Mask<isize, 8>;
-
macro_rules! impl_from {
{ $from:ty => $($to:ty),* } => {
$(
@@ -581,6 +537,7 @@ macro_rules! impl_from {
where
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn from(value: Mask<$from, LANES>) -> Self {
value.cast()
}
diff --git a/library/portable-simd/crates/core_simd/src/masks/bitmask.rs b/library/portable-simd/crates/core_simd/src/masks/bitmask.rs
index 365ecc0a3..20465ba9b 100644
--- a/library/portable-simd/crates/core_simd/src/masks/bitmask.rs
+++ b/library/portable-simd/crates/core_simd/src/masks/bitmask.rs
@@ -26,6 +26,7 @@ where
T: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn clone(&self) -> Self {
*self
}
@@ -36,6 +37,7 @@ where
T: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn eq(&self, other: &Self) -> bool {
self.0.as_ref() == other.0.as_ref()
}
@@ -46,6 +48,7 @@ where
T: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.as_ref().partial_cmp(other.0.as_ref())
}
@@ -63,6 +66,7 @@ where
T: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.0.as_ref().cmp(other.0.as_ref())
}
diff --git a/library/portable-simd/crates/core_simd/src/masks/full_masks.rs b/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
index b5ba198e5..1d13c45b8 100644
--- a/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
+++ b/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
@@ -37,6 +37,7 @@ where
T: MaskElement + PartialEq,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
@@ -47,6 +48,7 @@ where
T: MaskElement + PartialOrd,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
@@ -64,6 +66,7 @@ where
T: MaskElement + Ord,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.0.cmp(&other.0)
}
@@ -262,6 +265,7 @@ where
T: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
+ #[inline]
fn from(value: Mask<T, LANES>) -> Self {
value.0
}
diff --git a/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs b/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
index 2235f016c..fc7d6b781 100644
--- a/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
+++ b/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
@@ -48,10 +48,12 @@ macro_rules! impl_integer_intrinsic {
impl<T: MaskElement> ToBitMask for Mask<T, $lanes> {
type BitMask = $int;
+ #[inline]
fn to_bitmask(self) -> $int {
self.0.to_bitmask_integer()
}
+ #[inline]
fn from_bitmask(bitmask: $int) -> Self {
Self(mask_impl::Mask::from_bitmask_integer(bitmask))
}
@@ -83,10 +85,12 @@ where
{
const BYTES: usize = bitmask_len(LANES);
+ #[inline]
fn to_bitmask_array(self) -> [u8; Self::BYTES] {
self.0.to_bitmask_array()
}
+ #[inline]
fn from_bitmask_array(bitmask: [u8; Self::BYTES]) -> Self {
Mask(mask_impl::Mask::from_bitmask_array(bitmask))
}
diff --git a/library/portable-simd/crates/core_simd/src/mod.rs b/library/portable-simd/crates/core_simd/src/mod.rs
index b472aa3ab..35c659b7a 100644
--- a/library/portable-simd/crates/core_simd/src/mod.rs
+++ b/library/portable-simd/crates/core_simd/src/mod.rs
@@ -6,6 +6,8 @@ pub(crate) mod intrinsics;
#[cfg(feature = "generic_const_exprs")]
mod to_bytes;
+mod alias;
+mod cast;
mod elements;
mod eq;
mod fmt;
@@ -15,6 +17,7 @@ mod masks;
mod ops;
mod ord;
mod select;
+mod swizzle_dyn;
mod vector;
mod vendor;
@@ -22,11 +25,14 @@ mod vendor;
pub mod simd {
pub(crate) use crate::core_simd::intrinsics;
+ pub use crate::core_simd::alias::*;
+ pub use crate::core_simd::cast::*;
pub use crate::core_simd::elements::*;
pub use crate::core_simd::eq::*;
pub use crate::core_simd::lane_count::{LaneCount, SupportedLaneCount};
pub use crate::core_simd::masks::*;
pub use crate::core_simd::ord::*;
pub use crate::core_simd::swizzle::*;
+ pub use crate::core_simd::swizzle_dyn::*;
pub use crate::core_simd::vector::*;
}
diff --git a/library/portable-simd/crates/core_simd/src/ops/deref.rs b/library/portable-simd/crates/core_simd/src/ops/deref.rs
index 9883a74c9..302bf148b 100644
--- a/library/portable-simd/crates/core_simd/src/ops/deref.rs
+++ b/library/portable-simd/crates/core_simd/src/ops/deref.rs
@@ -71,7 +71,7 @@ macro_rules! deref_ops {
#[inline]
#[must_use = "operator returns a new vector without mutating the inputs"]
- fn $call(self, rhs: &$simd) -> Self::Output {
+ fn $call(self, rhs: &'rhs $simd) -> Self::Output {
(*self).$call(*rhs)
}
}
diff --git a/library/portable-simd/crates/core_simd/src/ord.rs b/library/portable-simd/crates/core_simd/src/ord.rs
index 9a87bc2e3..1ae9cd061 100644
--- a/library/portable-simd/crates/core_simd/src/ord.rs
+++ b/library/portable-simd/crates/core_simd/src/ord.rs
@@ -1,4 +1,6 @@
-use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
+use crate::simd::{
+ intrinsics, LaneCount, Mask, Simd, SimdConstPtr, SimdMutPtr, SimdPartialEq, SupportedLaneCount,
+};
/// Parallel `PartialOrd`.
pub trait SimdPartialOrd: SimdPartialEq {
@@ -211,3 +213,101 @@ macro_rules! impl_mask {
}
impl_mask! { i8, i16, i32, i64, isize }
+
+impl<T, const LANES: usize> SimdPartialOrd for Simd<*const T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn simd_lt(self, other: Self) -> Self::Mask {
+ self.addr().simd_lt(other.addr())
+ }
+
+ #[inline]
+ fn simd_le(self, other: Self) -> Self::Mask {
+ self.addr().simd_le(other.addr())
+ }
+
+ #[inline]
+ fn simd_gt(self, other: Self) -> Self::Mask {
+ self.addr().simd_gt(other.addr())
+ }
+
+ #[inline]
+ fn simd_ge(self, other: Self) -> Self::Mask {
+ self.addr().simd_ge(other.addr())
+ }
+}
+
+impl<T, const LANES: usize> SimdOrd for Simd<*const T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn simd_max(self, other: Self) -> Self {
+ self.simd_lt(other).select(other, self)
+ }
+
+ #[inline]
+ fn simd_min(self, other: Self) -> Self {
+ self.simd_gt(other).select(other, self)
+ }
+
+ #[inline]
+ fn simd_clamp(self, min: Self, max: Self) -> Self {
+ assert!(
+ min.simd_le(max).all(),
+ "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ );
+ self.simd_max(min).simd_min(max)
+ }
+}
+
+impl<T, const LANES: usize> SimdPartialOrd for Simd<*mut T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn simd_lt(self, other: Self) -> Self::Mask {
+ self.addr().simd_lt(other.addr())
+ }
+
+ #[inline]
+ fn simd_le(self, other: Self) -> Self::Mask {
+ self.addr().simd_le(other.addr())
+ }
+
+ #[inline]
+ fn simd_gt(self, other: Self) -> Self::Mask {
+ self.addr().simd_gt(other.addr())
+ }
+
+ #[inline]
+ fn simd_ge(self, other: Self) -> Self::Mask {
+ self.addr().simd_ge(other.addr())
+ }
+}
+
+impl<T, const LANES: usize> SimdOrd for Simd<*mut T, LANES>
+where
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn simd_max(self, other: Self) -> Self {
+ self.simd_lt(other).select(other, self)
+ }
+
+ #[inline]
+ fn simd_min(self, other: Self) -> Self {
+ self.simd_gt(other).select(other, self)
+ }
+
+ #[inline]
+ fn simd_clamp(self, min: Self, max: Self) -> Self {
+ assert!(
+ min.simd_le(max).all(),
+ "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ );
+ self.simd_max(min).simd_min(max)
+ }
+}
diff --git a/library/portable-simd/crates/core_simd/src/swizzle.rs b/library/portable-simd/crates/core_simd/src/swizzle.rs
index 22999d249..68f20516c 100644
--- a/library/portable-simd/crates/core_simd/src/swizzle.rs
+++ b/library/portable-simd/crates/core_simd/src/swizzle.rs
@@ -265,16 +265,13 @@ where
/// Interleave two vectors.
///
- /// Produces two vectors with lanes taken alternately from `self` and `other`.
+ /// The resulting vectors contain lanes taken alternatively from `self` and `other`, first
+ /// filling the first result, and then the second.
///
- /// The first result contains the first `LANES / 2` lanes from `self` and `other`,
- /// alternating, starting with the first lane of `self`.
- ///
- /// The second result contains the last `LANES / 2` lanes from `self` and `other`,
- /// alternating, starting with the lane `LANES / 2` from the start of `self`.
+ /// The reverse of this operation is [`Simd::deinterleave`].
///
/// ```
- /// #![feature(portable_simd)]
+ /// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// let a = Simd::from_array([0, 1, 2, 3]);
/// let b = Simd::from_array([4, 5, 6, 7]);
@@ -285,29 +282,17 @@ where
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
pub fn interleave(self, other: Self) -> (Self, Self) {
- const fn lo<const LANES: usize>() -> [Which; LANES] {
- let mut idx = [Which::First(0); LANES];
- let mut i = 0;
- while i < LANES {
- let offset = i / 2;
- idx[i] = if i % 2 == 0 {
- Which::First(offset)
- } else {
- Which::Second(offset)
- };
- i += 1;
- }
- idx
- }
- const fn hi<const LANES: usize>() -> [Which; LANES] {
+ const fn interleave<const LANES: usize>(high: bool) -> [Which; LANES] {
let mut idx = [Which::First(0); LANES];
let mut i = 0;
while i < LANES {
- let offset = (LANES + i) / 2;
- idx[i] = if i % 2 == 0 {
- Which::First(offset)
+ // Treat the source as a concatenated vector
+ let dst_index = if high { i + LANES } else { i };
+ let src_index = dst_index / 2 + (dst_index % 2) * LANES;
+ idx[i] = if src_index < LANES {
+ Which::First(src_index)
} else {
- Which::Second(offset)
+ Which::Second(src_index % LANES)
};
i += 1;
}
@@ -318,11 +303,11 @@ where
struct Hi;
impl<const LANES: usize> Swizzle2<LANES, LANES> for Lo {
- const INDEX: [Which; LANES] = lo::<LANES>();
+ const INDEX: [Which; LANES] = interleave::<LANES>(false);
}
impl<const LANES: usize> Swizzle2<LANES, LANES> for Hi {
- const INDEX: [Which; LANES] = hi::<LANES>();
+ const INDEX: [Which; LANES] = interleave::<LANES>(true);
}
(Lo::swizzle2(self, other), Hi::swizzle2(self, other))
@@ -336,8 +321,10 @@ where
/// The second result takes every other lane of `self` and then `other`, starting with
/// the second lane.
///
+ /// The reverse of this operation is [`Simd::interleave`].
+ ///
/// ```
- /// #![feature(portable_simd)]
+ /// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// let a = Simd::from_array([0, 4, 1, 5]);
/// let b = Simd::from_array([2, 6, 3, 7]);
@@ -348,22 +335,17 @@ where
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
pub fn deinterleave(self, other: Self) -> (Self, Self) {
- const fn even<const LANES: usize>() -> [Which; LANES] {
- let mut idx = [Which::First(0); LANES];
- let mut i = 0;
- while i < LANES / 2 {
- idx[i] = Which::First(2 * i);
- idx[i + LANES / 2] = Which::Second(2 * i);
- i += 1;
- }
- idx
- }
- const fn odd<const LANES: usize>() -> [Which; LANES] {
+ const fn deinterleave<const LANES: usize>(second: bool) -> [Which; LANES] {
let mut idx = [Which::First(0); LANES];
let mut i = 0;
- while i < LANES / 2 {
- idx[i] = Which::First(2 * i + 1);
- idx[i + LANES / 2] = Which::Second(2 * i + 1);
+ while i < LANES {
+ // Treat the source as a concatenated vector
+ let src_index = i * 2 + second as usize;
+ idx[i] = if src_index < LANES {
+ Which::First(src_index)
+ } else {
+ Which::Second(src_index % LANES)
+ };
i += 1;
}
idx
@@ -373,11 +355,11 @@ where
struct Odd;
impl<const LANES: usize> Swizzle2<LANES, LANES> for Even {
- const INDEX: [Which; LANES] = even::<LANES>();
+ const INDEX: [Which; LANES] = deinterleave::<LANES>(false);
}
impl<const LANES: usize> Swizzle2<LANES, LANES> for Odd {
- const INDEX: [Which; LANES] = odd::<LANES>();
+ const INDEX: [Which; LANES] = deinterleave::<LANES>(true);
}
(Even::swizzle2(self, other), Odd::swizzle2(self, other))
diff --git a/library/portable-simd/crates/core_simd/src/swizzle_dyn.rs b/library/portable-simd/crates/core_simd/src/swizzle_dyn.rs
new file mode 100644
index 000000000..6065d6459
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/swizzle_dyn.rs
@@ -0,0 +1,157 @@
+use crate::simd::{LaneCount, Simd, SupportedLaneCount};
+use core::mem;
+
+impl<const N: usize> Simd<u8, N>
+where
+ LaneCount<N>: SupportedLaneCount,
+{
+ /// Swizzle a vector of bytes according to the index vector.
+ /// Indices within range select the appropriate byte.
+ /// Indices "out of bounds" instead select 0.
+ ///
+ /// Note that the current implementation is selected during build-time
+ /// of the standard library, so `cargo build -Zbuild-std` may be necessary
+ /// to unlock better performance, especially for larger vectors.
+ /// A planned compiler improvement will enable using `#[target_feature]` instead.
+ #[inline]
+ pub fn swizzle_dyn(self, idxs: Simd<u8, N>) -> Self {
+ #![allow(unused_imports, unused_unsafe)]
+ #[cfg(target_arch = "aarch64")]
+ use core::arch::aarch64::{uint8x8_t, vqtbl1q_u8, vtbl1_u8};
+ #[cfg(all(target_arch = "arm", target_feature = "v7", target_feature = "neon"))]
+ use core::arch::arm::{uint8x8_t, vtbl1_u8};
+ #[cfg(target_arch = "wasm32")]
+ use core::arch::wasm32 as wasm;
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64 as x86;
+ // SAFETY: Intrinsics covered by cfg
+ unsafe {
+ match N {
+ #[cfg(target_feature = "neon")]
+ 8 => transize(vtbl1_u8, self, idxs),
+ #[cfg(target_feature = "ssse3")]
+ 16 => transize(x86::_mm_shuffle_epi8, self, idxs),
+ #[cfg(target_feature = "simd128")]
+ 16 => transize(wasm::i8x16_swizzle, self, idxs),
+ #[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
+ 16 => transize(vqtbl1q_u8, self, idxs),
+ #[cfg(all(target_feature = "avx2", not(target_feature = "avx512vbmi")))]
+ 32 => transize_raw(avx2_pshufb, self, idxs),
+ #[cfg(target_feature = "avx512vl,avx512vbmi")]
+ 32 => transize(x86::_mm256_permutexvar_epi8, self, idxs),
+ // Notable absence: avx512bw shuffle
+ // If avx512bw is available, odds of avx512vbmi are good
+ // FIXME: initial AVX512VBMI variant didn't actually pass muster
+ // #[cfg(target_feature = "avx512vbmi")]
+ // 64 => transize(x86::_mm512_permutexvar_epi8, self, idxs),
+ _ => {
+ let mut array = [0; N];
+ for (i, k) in idxs.to_array().into_iter().enumerate() {
+ if (k as usize) < N {
+ array[i] = self[k as usize];
+ };
+ }
+ array.into()
+ }
+ }
+ }
+ }
+}
+
+/// "vpshufb like it was meant to be" on AVX2
+///
+/// # Safety
+/// This requires AVX2 to work
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+#[target_feature(enable = "avx2")]
+#[allow(unused)]
+#[inline]
+#[allow(clippy::let_and_return)]
+unsafe fn avx2_pshufb(bytes: Simd<u8, 32>, idxs: Simd<u8, 32>) -> Simd<u8, 32> {
+ use crate::simd::SimdPartialOrd;
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64 as x86;
+ use x86::_mm256_permute2x128_si256 as avx2_cross_shuffle;
+ use x86::_mm256_shuffle_epi8 as avx2_half_pshufb;
+ let mid = Simd::splat(16u8);
+ let high = mid + mid;
+ // SAFETY: Caller promised AVX2
+ unsafe {
+ // This is ordering sensitive, and LLVM will order these how you put them.
+ // Most AVX2 impls use ~5 "ports", and only 1 or 2 are capable of permutes.
+ // But the "compose" step will lower to ops that can also use at least 1 other port.
+ // So this tries to break up permutes so composition flows through "open" ports.
+ // Comparative benches should be done on multiple AVX2 CPUs before reordering this
+
+ let hihi = avx2_cross_shuffle::<0x11>(bytes.into(), bytes.into());
+ let hi_shuf = Simd::from(avx2_half_pshufb(
+ hihi, // duplicate the vector's top half
+ idxs.into(), // so that using only 4 bits of an index still picks bytes 16-31
+ ));
+ // A zero-fill during the compose step gives the "all-Neon-like" OOB-is-0 semantics
+ let compose = idxs.simd_lt(high).select(hi_shuf, Simd::splat(0));
+ let lolo = avx2_cross_shuffle::<0x00>(bytes.into(), bytes.into());
+ let lo_shuf = Simd::from(avx2_half_pshufb(lolo, idxs.into()));
+ // Repeat, then pick indices < 16, overwriting indices 0-15 from previous compose step
+ let compose = idxs.simd_lt(mid).select(lo_shuf, compose);
+ compose
+ }
+}
+
+/// This sets up a call to an architecture-specific function, and in doing so
+/// it persuades rustc that everything is the correct size. Which it is.
+/// This would not be needed if one could convince Rust that, by matching on N,
+/// N is that value, and thus it would be valid to substitute e.g. 16.
+///
+/// # Safety
+/// The correctness of this function hinges on the sizes agreeing in actuality.
+#[allow(dead_code)]
+#[inline(always)]
+unsafe fn transize<T, const N: usize>(
+ f: unsafe fn(T, T) -> T,
+ bytes: Simd<u8, N>,
+ idxs: Simd<u8, N>,
+) -> Simd<u8, N>
+where
+ LaneCount<N>: SupportedLaneCount,
+{
+ let idxs = zeroing_idxs(idxs);
+ // SAFETY: Same obligation to use this function as to use mem::transmute_copy.
+ unsafe { mem::transmute_copy(&f(mem::transmute_copy(&bytes), mem::transmute_copy(&idxs))) }
+}
+
+/// Make indices that yield 0 for this architecture
+#[inline(always)]
+fn zeroing_idxs<const N: usize>(idxs: Simd<u8, N>) -> Simd<u8, N>
+where
+ LaneCount<N>: SupportedLaneCount,
+{
+ // On x86, make sure the top bit is set.
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ let idxs = {
+ use crate::simd::SimdPartialOrd;
+ idxs.simd_lt(Simd::splat(N as u8))
+ .select(idxs, Simd::splat(u8::MAX))
+ };
+ // Simply do nothing on most architectures.
+ idxs
+}
+
+/// As transize but no implicit call to `zeroing_idxs`.
+#[allow(dead_code)]
+#[inline(always)]
+unsafe fn transize_raw<T, const N: usize>(
+ f: unsafe fn(T, T) -> T,
+ bytes: Simd<u8, N>,
+ idxs: Simd<u8, N>,
+) -> Simd<u8, N>
+where
+ LaneCount<N>: SupportedLaneCount,
+{
+ // SAFETY: Same obligation to use this function as to use mem::transmute_copy.
+ unsafe { mem::transmute_copy(&f(mem::transmute_copy(&bytes), mem::transmute_copy(&idxs))) }
+}
diff --git a/library/portable-simd/crates/core_simd/src/vector.rs b/library/portable-simd/crates/core_simd/src/vector.rs
index d52d1ac4d..3809cc961 100644
--- a/library/portable-simd/crates/core_simd/src/vector.rs
+++ b/library/portable-simd/crates/core_simd/src/vector.rs
@@ -1,60 +1,63 @@
-mod float;
-mod int;
-mod uint;
-
-pub use float::*;
-pub use int::*;
-pub use uint::*;
-
-// Vectors of pointers are not for public use at the current time.
-pub(crate) mod ptr;
-
use crate::simd::{
- intrinsics, LaneCount, Mask, MaskElement, SimdPartialOrd, SupportedLaneCount, Swizzle,
+ intrinsics, LaneCount, Mask, MaskElement, SimdCast, SimdCastPtr, SimdConstPtr, SimdMutPtr,
+ SimdPartialOrd, SupportedLaneCount, Swizzle,
};
+use core::convert::{TryFrom, TryInto};
-/// A SIMD vector of `LANES` elements of type `T`. `Simd<T, N>` has the same shape as [`[T; N]`](array), but operates like `T`.
+/// A SIMD vector with the shape of `[T; N]` but the operations of `T`.
///
-/// Two vectors of the same type and length will, by convention, support the operators (+, *, etc.) that `T` does.
-/// These take the lanes at each index on the left-hand side and right-hand side, perform the operation,
-/// and return the result in the same lane in a vector of equal size. For a given operator, this is equivalent to zipping
-/// the two arrays together and mapping the operator over each lane.
+/// `Simd<T, N>` supports the operators (+, *, etc.) that `T` does in "elementwise" fashion.
+/// These take the element at each index from the left-hand side and right-hand side,
+/// perform the operation, then return the result in the same index in a vector of equal size.
+/// However, `Simd` differs from normal iteration and normal arrays:
+/// - `Simd<T, N>` executes `N` operations in a single step with no `break`s
+/// - `Simd<T, N>` can have an alignment greater than `T`, for better mechanical sympathy
+///
+/// By always imposing these constraints on `Simd`, it is easier to compile elementwise operations
+/// into machine instructions that can themselves be executed in parallel.
///
/// ```rust
-/// # #![feature(array_zip, portable_simd)]
+/// # #![feature(portable_simd)]
/// # use core::simd::{Simd};
-/// let a0: [i32; 4] = [-2, 0, 2, 4];
-/// let a1 = [10, 9, 8, 7];
-/// let zm_add = a0.zip(a1).map(|(lhs, rhs)| lhs + rhs);
-/// let zm_mul = a0.zip(a1).map(|(lhs, rhs)| lhs * rhs);
+/// # use core::array;
+/// let a: [i32; 4] = [-2, 0, 2, 4];
+/// let b = [10, 9, 8, 7];
+/// let sum = array::from_fn(|i| a[i] + b[i]);
+/// let prod = array::from_fn(|i| a[i] * b[i]);
///
/// // `Simd<T, N>` implements `From<[T; N]>`
-/// let (v0, v1) = (Simd::from(a0), Simd::from(a1));
+/// let (v, w) = (Simd::from(a), Simd::from(b));
/// // Which means arrays implement `Into<Simd<T, N>>`.
-/// assert_eq!(v0 + v1, zm_add.into());
-/// assert_eq!(v0 * v1, zm_mul.into());
+/// assert_eq!(v + w, sum.into());
+/// assert_eq!(v * w, prod.into());
/// ```
///
-/// `Simd` with integers has the quirk that these operations are also inherently wrapping, as if `T` was [`Wrapping<T>`].
+///
+/// `Simd` with integer elements treats operators as wrapping, as if `T` was [`Wrapping<T>`].
/// Thus, `Simd` does not implement `wrapping_add`, because that is the default behavior.
/// This means there is no warning on overflows, even in "debug" builds.
/// For most applications where `Simd` is appropriate, it is "not a bug" to wrap,
/// and even "debug builds" are unlikely to tolerate the loss of performance.
/// You may want to consider using explicitly checked arithmetic if such is required.
-/// Division by zero still causes a panic, so you may want to consider using floating point numbers if that is unacceptable.
+/// Division by zero on integers still causes a panic, so
+/// you may want to consider using `f32` or `f64` if that is unacceptable.
///
/// [`Wrapping<T>`]: core::num::Wrapping
///
/// # Layout
-/// `Simd<T, N>` has a layout similar to `[T; N]` (identical "shapes"), but with a greater alignment.
+/// `Simd<T, N>` has a layout similar to `[T; N]` (identical "shapes"), with a greater alignment.
/// `[T; N]` is aligned to `T`, but `Simd<T, N>` will have an alignment based on both `T` and `N`.
-/// It is thus sound to [`transmute`] `Simd<T, N>` to `[T; N]`, and will typically optimize to zero cost,
-/// but the reverse transmutation is more likely to require a copy the compiler cannot simply elide.
+/// Thus it is sound to [`transmute`] `Simd<T, N>` to `[T; N]` and should optimize to "zero cost",
+/// but the reverse transmutation may require a copy the compiler cannot simply elide.
///
/// # ABI "Features"
-/// Due to Rust's safety guarantees, `Simd<T, N>` is currently passed to and from functions via memory, not SIMD registers,
-/// except as an optimization. `#[inline]` hints are recommended on functions that accept `Simd<T, N>` or return it.
-/// The need for this may be corrected in the future.
+/// Due to Rust's safety guarantees, `Simd<T, N>` is currently passed and returned via memory,
+/// not SIMD registers, except as an optimization. Using `#[inline]` on functions that accept
+/// `Simd<T, N>` or return it is recommended, at the cost of code generation time, as
+/// inlining SIMD-using functions can omit a large function prolog or epilog and thus
+/// improve both speed and code size. The need for this may be corrected in the future.
+///
+/// Using `#[inline(always)]` still requires additional care.
///
/// # Safe SIMD with Unsafe Rust
///
@@ -65,18 +68,22 @@ use crate::simd::{
/// Thus, when using `unsafe` Rust to read and write `Simd<T, N>` through [raw pointers], it is a good idea to first try with
/// [`read_unaligned`] and [`write_unaligned`]. This is because:
/// - [`read`] and [`write`] require full alignment (in this case, `Simd<T, N>`'s alignment)
-/// - the likely source for reading or destination for writing `Simd<T, N>` is [`[T]`](slice) and similar types, aligned to `T`
-/// - combining these actions would violate the `unsafe` contract and explode the program into a puff of **undefined behavior**
-/// - the compiler can implicitly adjust layouts to make unaligned reads or writes fully aligned if it sees the optimization
-/// - most contemporary processors suffer no performance penalty for "unaligned" reads and writes that are aligned at runtime
+/// - `Simd<T, N>` is often read from or written to [`[T]`](slice) and other types aligned to `T`
+/// - combining these actions violates the `unsafe` contract and explodes the program into
+/// a puff of **undefined behavior**
+/// - the compiler can implicitly adjust layouts to make unaligned reads or writes fully aligned
+/// if it sees the optimization
+/// - most contemporary processors with "aligned" and "unaligned" read and write instructions
+/// exhibit no performance difference if the "unaligned" variant is aligned at runtime
///
-/// By imposing less obligations, unaligned functions are less likely to make the program unsound,
+/// Less obligations mean unaligned reads and writes are less likely to make the program unsound,
/// and may be just as fast as stricter alternatives.
-/// When trying to guarantee alignment, [`[T]::as_simd`][as_simd] is an option for converting `[T]` to `[Simd<T, N>]`,
-/// and allows soundly operating on an aligned SIMD body, but it may cost more time when handling the scalar head and tail.
-/// If these are not sufficient, then it is most ideal to design data structures to be already aligned
-/// to the `Simd<T, N>` you wish to use before using `unsafe` Rust to read or write.
-/// More conventional ways to compensate for these facts, like materializing `Simd` to or from an array first,
+/// When trying to guarantee alignment, [`[T]::as_simd`][as_simd] is an option for
+/// converting `[T]` to `[Simd<T, N>]`, and allows soundly operating on an aligned SIMD body,
+/// but it may cost more time when handling the scalar head and tail.
+/// If these are not enough, it is most ideal to design data structures to be already aligned
+/// to `mem::align_of::<Simd<T, N>>()` before using `unsafe` Rust to read or write.
+/// Other ways to compensate for these facts, like materializing `Simd` to or from an array first,
/// are handled by safe methods like [`Simd::from_array`] and [`Simd::from_slice`].
///
/// [`transmute`]: core::mem::transmute
@@ -86,21 +93,26 @@ use crate::simd::{
/// [`read`]: pointer::read
/// [`write`]: pointer::write
/// [as_simd]: slice::as_simd
+//
+// NOTE: Accessing the inner array directly in any way (e.g. by using the `.0` field syntax) or
+// directly constructing an instance of the type (i.e. `let vector = Simd(array)`) should be
+// avoided, as it will likely become illegal on `#[repr(simd)]` structs in the future. It also
+// causes rustc to emit illegal LLVM IR in some cases.
#[repr(simd)]
-pub struct Simd<T, const LANES: usize>([T; LANES])
+pub struct Simd<T, const N: usize>([T; N])
where
- T: SimdElement,
- LaneCount<LANES>: SupportedLaneCount;
+ LaneCount<N>: SupportedLaneCount,
+ T: SimdElement;
-impl<T, const LANES: usize> Simd<T, LANES>
+impl<T, const N: usize> Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement,
{
- /// Number of lanes in this vector.
- pub const LANES: usize = LANES;
+ /// Number of elements in this vector.
+ pub const LANES: usize = N;
- /// Returns the number of lanes in this SIMD vector.
+ /// Returns the number of elements in this SIMD vector.
///
/// # Examples
///
@@ -111,10 +123,10 @@ where
/// assert_eq!(v.lanes(), 4);
/// ```
pub const fn lanes(&self) -> usize {
- LANES
+ Self::LANES
}
- /// Constructs a new SIMD vector with all lanes set to the given value.
+ /// Constructs a new SIMD vector with all elements set to the given value.
///
/// # Examples
///
@@ -125,11 +137,11 @@ where
/// assert_eq!(v.as_array(), &[8, 8, 8, 8]);
/// ```
pub fn splat(value: T) -> Self {
- // This is preferred over `[value; LANES]`, since it's explicitly a splat:
+ // This is preferred over `[value; N]`, since it's explicitly a splat:
// https://github.com/rust-lang/rust/issues/97804
struct Splat;
- impl<const LANES: usize> Swizzle<1, LANES> for Splat {
- const INDEX: [usize; LANES] = [0; LANES];
+ impl<const N: usize> Swizzle<1, N> for Splat {
+ const INDEX: [usize; N] = [0; N];
}
Splat::swizzle(Simd::<T, 1>::from([value]))
}
@@ -144,32 +156,100 @@ where
/// let v: u64x4 = Simd::from_array([0, 1, 2, 3]);
/// assert_eq!(v.as_array(), &[0, 1, 2, 3]);
/// ```
- pub const fn as_array(&self) -> &[T; LANES] {
- &self.0
+ pub const fn as_array(&self) -> &[T; N] {
+ // SAFETY: `Simd<T, N>` is just an overaligned `[T; N]` with
+ // potential padding at the end, so pointer casting to a
+ // `&[T; N]` is safe.
+ //
+ // NOTE: This deliberately doesn't just use `&self.0`, see the comment
+ // on the struct definition for details.
+ unsafe { &*(self as *const Self as *const [T; N]) }
}
/// Returns a mutable array reference containing the entire SIMD vector.
- pub fn as_mut_array(&mut self) -> &mut [T; LANES] {
- &mut self.0
+ pub fn as_mut_array(&mut self) -> &mut [T; N] {
+ // SAFETY: `Simd<T, N>` is just an overaligned `[T; N]` with
+ // potential padding at the end, so pointer casting to a
+ // `&mut [T; N]` is safe.
+ //
+ // NOTE: This deliberately doesn't just use `&mut self.0`, see the comment
+ // on the struct definition for details.
+ unsafe { &mut *(self as *mut Self as *mut [T; N]) }
+ }
+
+ /// Load a vector from an array of `T`.
+ ///
+ /// This function is necessary since `repr(simd)` has padding for non-power-of-2 vectors (at the time of writing).
+ /// With padding, `read_unaligned` will read past the end of an array of N elements.
+ ///
+ /// # Safety
+ /// Reading `ptr` must be safe, as if by `<*const [T; N]>::read_unaligned`.
+ const unsafe fn load(ptr: *const [T; N]) -> Self {
+ // There are potentially simpler ways to write this function, but this should result in
+ // LLVM `load <N x T>`
+
+ let mut tmp = core::mem::MaybeUninit::<Self>::uninit();
+ // SAFETY: `Simd<T, N>` always contains `N` elements of type `T`. It may have padding
+ // which does not need to be initialized. The safety of reading `ptr` is ensured by the
+ // caller.
+ unsafe {
+ core::ptr::copy_nonoverlapping(ptr, tmp.as_mut_ptr().cast(), 1);
+ tmp.assume_init()
+ }
+ }
+
+ /// Store a vector to an array of `T`.
+ ///
+ /// See `load` as to why this function is necessary.
+ ///
+ /// # Safety
+ /// Writing to `ptr` must be safe, as if by `<*mut [T; N]>::write_unaligned`.
+ const unsafe fn store(self, ptr: *mut [T; N]) {
+ // There are potentially simpler ways to write this function, but this should result in
+ // LLVM `store <N x T>`
+
+ // Creating a temporary helps LLVM turn the memcpy into a store.
+ let tmp = self;
+ // SAFETY: `Simd<T, N>` always contains `N` elements of type `T`. The safety of writing
+ // `ptr` is ensured by the caller.
+ unsafe { core::ptr::copy_nonoverlapping(tmp.as_array(), ptr, 1) }
}
/// Converts an array to a SIMD vector.
- pub const fn from_array(array: [T; LANES]) -> Self {
- Self(array)
+ pub const fn from_array(array: [T; N]) -> Self {
+ // SAFETY: `&array` is safe to read.
+ //
+ // FIXME: We currently use a pointer load instead of `transmute_copy` because `repr(simd)`
+ // results in padding for non-power-of-2 vectors (so vectors are larger than arrays).
+ //
+ // NOTE: This deliberately doesn't just use `Self(array)`, see the comment
+ // on the struct definition for details.
+ unsafe { Self::load(&array) }
}
/// Converts a SIMD vector to an array.
- pub const fn to_array(self) -> [T; LANES] {
- self.0
+ pub const fn to_array(self) -> [T; N] {
+ let mut tmp = core::mem::MaybeUninit::uninit();
+ // SAFETY: writing to `tmp` is safe and initializes it.
+ //
+ // FIXME: We currently use a pointer store instead of `transmute_copy` because `repr(simd)`
+ // results in padding for non-power-of-2 vectors (so vectors are larger than arrays).
+ //
+ // NOTE: This deliberately doesn't just use `self.0`, see the comment
+ // on the struct definition for details.
+ unsafe {
+ self.store(tmp.as_mut_ptr());
+ tmp.assume_init()
+ }
}
- /// Converts a slice to a SIMD vector containing `slice[..LANES]`.
+ /// Converts a slice to a SIMD vector containing `slice[..N]`.
///
/// # Panics
///
- /// Panics if the slice's length is less than the vector's `Simd::LANES`.
+ /// Panics if the slice's length is less than the vector's `Simd::N`.
///
- /// # Examples
+ /// # Example
///
/// ```
/// # #![feature(portable_simd)]
@@ -180,22 +260,49 @@ where
/// ```
#[must_use]
pub const fn from_slice(slice: &[T]) -> Self {
- assert!(slice.len() >= LANES, "slice length must be at least the number of lanes");
- let mut array = [slice[0]; LANES];
- let mut i = 0;
- while i < LANES {
- array[i] = slice[i];
- i += 1;
- }
- Self(array)
+ assert!(
+ slice.len() >= Self::LANES,
+ "slice length must be at least the number of elements"
+ );
+ // SAFETY: We just checked that the slice contains
+ // at least `N` elements.
+ unsafe { Self::load(slice.as_ptr().cast()) }
+ }
+
+ /// Writes a SIMD vector to the first `N` elements of a slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the slice's length is less than the vector's `Simd::N`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::u32x4;
+ /// let mut dest = vec![0; 6];
+ /// let v = u32x4::from_array([1, 2, 3, 4]);
+ /// v.copy_to_slice(&mut dest);
+ /// assert_eq!(&dest, &[1, 2, 3, 4, 0, 0]);
+ /// ```
+ pub fn copy_to_slice(self, slice: &mut [T]) {
+ assert!(
+ slice.len() >= Self::LANES,
+ "slice length must be at least the number of elements"
+ );
+ // SAFETY: We just checked that the slice contains
+ // at least `N` elements.
+ unsafe { self.store(slice.as_mut_ptr().cast()) }
}
- /// Performs lanewise conversion of a SIMD vector's elements to another SIMD-valid type.
+ /// Performs elementwise conversion of a SIMD vector's elements to another SIMD-valid type.
///
- /// This follows the semantics of Rust's `as` conversion for casting
- /// integers to unsigned integers (interpreting as the other type, so `-1` to `MAX`),
- /// and from floats to integers (truncating, or saturating at the limits) for each lane,
- /// or vice versa.
+ /// This follows the semantics of Rust's `as` conversion for casting integers between
+ /// signed and unsigned (interpreting integers as 2s complement, so `-1` to `U::MAX` and
+ /// `1 << (U::BITS -1)` becoming `I::MIN` ), and from floats to integers (truncating,
+ /// or saturating at the limits) for each element.
///
/// # Examples
/// ```
@@ -215,11 +322,26 @@ where
/// ```
#[must_use]
#[inline]
- pub fn cast<U: SimdElement>(self) -> Simd<U, LANES> {
- // Safety: The input argument is a vector of a valid SIMD element type.
+ pub fn cast<U: SimdCast>(self) -> Simd<U, N>
+ where
+ T: SimdCast,
+ {
+ // Safety: supported types are guaranteed by SimdCast
unsafe { intrinsics::simd_as(self) }
}
+ /// Casts a vector of pointers to another pointer type.
+ #[must_use]
+ #[inline]
+ pub fn cast_ptr<U>(self) -> Simd<U, N>
+ where
+ T: SimdCastPtr<U>,
+ U: SimdElement,
+ {
+ // Safety: supported types are guaranteed by SimdCastPtr
+ unsafe { intrinsics::simd_cast_ptr(self) }
+ }
+
/// Rounds toward zero and converts to the same-width integer type, assuming that
/// the value is finite and fits in that type.
///
@@ -235,90 +357,90 @@ where
///
/// [cast]: Simd::cast
#[inline]
- pub unsafe fn to_int_unchecked<I>(self) -> Simd<I, LANES>
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn to_int_unchecked<I>(self) -> Simd<I, N>
where
- T: core::convert::FloatToInt<I>,
- I: SimdElement,
+ T: core::convert::FloatToInt<I> + SimdCast,
+ I: SimdCast,
{
- // Safety: `self` is a vector, and `FloatToInt` ensures the type can be casted to
- // an integer.
+ // Safety: supported types are guaranteed by SimdCast, the caller is responsible for the extra invariants
unsafe { intrinsics::simd_cast(self) }
}
/// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
- /// If an index is out-of-bounds, the lane is instead selected from the `or` vector.
+ /// If an index is out-of-bounds, the element is instead selected from the `or` vector.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
- /// let idxs = Simd::from_array([9, 3, 0, 5]);
+ /// let idxs = Simd::from_array([9, 3, 0, 5]); // Note the index that is out-of-bounds
/// let alt = Simd::from_array([-5, -4, -3, -2]);
///
- /// let result = Simd::gather_or(&vec, idxs, alt); // Note the lane that is out-of-bounds.
+ /// let result = Simd::gather_or(&vec, idxs, alt);
/// assert_eq!(result, Simd::from_array([-5, 13, 10, 15]));
/// ```
#[must_use]
#[inline]
- pub fn gather_or(slice: &[T], idxs: Simd<usize, LANES>, or: Self) -> Self {
+ pub fn gather_or(slice: &[T], idxs: Simd<usize, N>, or: Self) -> Self {
Self::gather_select(slice, Mask::splat(true), idxs, or)
}
- /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
- /// If an index is out-of-bounds, the lane is set to the default value for the type.
+ /// Reads from indices in `slice` to construct a SIMD vector.
+ /// If an index is out-of-bounds, the element is set to the default given by `T: Default`.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
- /// let idxs = Simd::from_array([9, 3, 0, 5]);
+ /// let idxs = Simd::from_array([9, 3, 0, 5]); // Note the index that is out-of-bounds
///
- /// let result = Simd::gather_or_default(&vec, idxs); // Note the lane that is out-of-bounds.
+ /// let result = Simd::gather_or_default(&vec, idxs);
/// assert_eq!(result, Simd::from_array([0, 13, 10, 15]));
/// ```
#[must_use]
#[inline]
- pub fn gather_or_default(slice: &[T], idxs: Simd<usize, LANES>) -> Self
+ pub fn gather_or_default(slice: &[T], idxs: Simd<usize, N>) -> Self
where
T: Default,
{
Self::gather_or(slice, idxs, Self::splat(T::default()))
}
- /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
- /// The mask `enable`s all `true` lanes and disables all `false` lanes.
- /// If an index is disabled or is out-of-bounds, the lane is selected from the `or` vector.
+ /// Reads from indices in `slice` to construct a SIMD vector.
+ /// The mask `enable`s all `true` indices and disables all `false` indices.
+ /// If an index is disabled or is out-of-bounds, the element is selected from the `or` vector.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::{Simd, Mask};
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
- /// let idxs = Simd::from_array([9, 3, 0, 5]);
+ /// let idxs = Simd::from_array([9, 3, 0, 5]); // Includes an out-of-bounds index
/// let alt = Simd::from_array([-5, -4, -3, -2]);
- /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane.
+ /// let enable = Mask::from_array([true, true, true, false]); // Includes a masked element
///
- /// let result = Simd::gather_select(&vec, enable, idxs, alt); // Note the lane that is out-of-bounds.
+ /// let result = Simd::gather_select(&vec, enable, idxs, alt);
/// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
/// ```
#[must_use]
#[inline]
pub fn gather_select(
slice: &[T],
- enable: Mask<isize, LANES>,
- idxs: Simd<usize, LANES>,
+ enable: Mask<isize, N>,
+ idxs: Simd<usize, N>,
or: Self,
) -> Self {
- let enable: Mask<isize, LANES> = enable & idxs.simd_lt(Simd::splat(slice.len()));
- // Safety: We have masked-off out-of-bounds lanes.
+ let enable: Mask<isize, N> = enable & idxs.simd_lt(Simd::splat(slice.len()));
+ // Safety: We have masked-off out-of-bounds indices.
unsafe { Self::gather_select_unchecked(slice, enable, idxs, or) }
}
- /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
- /// The mask `enable`s all `true` lanes and disables all `false` lanes.
- /// If an index is disabled, the lane is selected from the `or` vector.
+ /// Reads from indices in `slice` to construct a SIMD vector.
+ /// The mask `enable`s all `true` indices and disables all `false` indices.
+ /// If an index is disabled, the element is selected from the `or` vector.
///
/// # Safety
///
@@ -332,57 +454,123 @@ where
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
/// # use simd::{Simd, SimdPartialOrd, Mask};
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
- /// let idxs = Simd::from_array([9, 3, 0, 5]);
+ /// let idxs = Simd::from_array([9, 3, 0, 5]); // Includes an out-of-bounds index
/// let alt = Simd::from_array([-5, -4, -3, -2]);
- /// let enable = Mask::from_array([true, true, true, false]); // Note the final mask lane.
+ /// let enable = Mask::from_array([true, true, true, false]); // Includes a masked element
/// // If this mask was used to gather, it would be unsound. Let's fix that.
/// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
///
- /// // We have masked the OOB lane, so it's safe to gather now.
+ /// // The out-of-bounds index has been masked, so it's safe to gather now.
/// let result = unsafe { Simd::gather_select_unchecked(&vec, enable, idxs, alt) };
/// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
/// ```
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[must_use]
#[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub unsafe fn gather_select_unchecked(
slice: &[T],
- enable: Mask<isize, LANES>,
- idxs: Simd<usize, LANES>,
+ enable: Mask<isize, N>,
+ idxs: Simd<usize, N>,
or: Self,
) -> Self {
- let base_ptr = crate::simd::ptr::SimdConstPtr::splat(slice.as_ptr());
+ let base_ptr = Simd::<*const T, N>::splat(slice.as_ptr());
// Ferris forgive me, I have done pointer arithmetic here.
let ptrs = base_ptr.wrapping_add(idxs);
- // Safety: The ptrs have been bounds-masked to prevent memory-unsafe reads insha'allah
- unsafe { intrinsics::simd_gather(or, ptrs, enable.to_int()) }
+ // Safety: The caller is responsible for determining the indices are okay to read
+ unsafe { Self::gather_select_ptr(ptrs, enable, or) }
+ }
+
+ /// Read elementwise from pointers into a SIMD vector.
+ ///
+ /// # Safety
+ ///
+ /// Each read must satisfy the same conditions as [`core::ptr::read`].
+ ///
+ /// # Example
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdConstPtr};
+ /// let values = [6, 2, 4, 9];
+ /// let offsets = Simd::from_array([1, 0, 0, 3]);
+ /// let source = Simd::splat(values.as_ptr()).wrapping_add(offsets);
+ /// let gathered = unsafe { Simd::gather_ptr(source) };
+ /// assert_eq!(gathered, Simd::from_array([2, 6, 6, 9]));
+ /// ```
+ #[must_use]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn gather_ptr(source: Simd<*const T, N>) -> Self
+ where
+ T: Default,
+ {
+ // TODO: add an intrinsic that doesn't use a passthru vector, and remove the T: Default bound
+ // Safety: The caller is responsible for upholding all invariants
+ unsafe { Self::gather_select_ptr(source, Mask::splat(true), Self::default()) }
+ }
+
+ /// Conditionally read elementwise from pointers into a SIMD vector.
+ /// The mask `enable`s all `true` pointers and disables all `false` pointers.
+ /// If a pointer is disabled, the element is selected from the `or` vector,
+ /// and no read is performed.
+ ///
+ /// # Safety
+ ///
+ /// Enabled elements must satisfy the same conditions as [`core::ptr::read`].
+ ///
+ /// # Example
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Mask, Simd, SimdConstPtr};
+ /// let values = [6, 2, 4, 9];
+ /// let enable = Mask::from_array([true, true, false, true]);
+ /// let offsets = Simd::from_array([1, 0, 0, 3]);
+ /// let source = Simd::splat(values.as_ptr()).wrapping_add(offsets);
+ /// let gathered = unsafe { Simd::gather_select_ptr(source, enable, Simd::splat(0)) };
+ /// assert_eq!(gathered, Simd::from_array([2, 6, 0, 9]));
+ /// ```
+ #[must_use]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn gather_select_ptr(
+ source: Simd<*const T, N>,
+ enable: Mask<isize, N>,
+ or: Self,
+ ) -> Self {
+ // Safety: The caller is responsible for upholding all invariants
+ unsafe { intrinsics::simd_gather(or, source, enable.to_int()) }
}
/// Writes the values in a SIMD vector to potentially discontiguous indices in `slice`.
- /// If two lanes in the scattered vector would write to the same index
- /// only the last lane is guaranteed to actually be written.
+ /// If an index is out-of-bounds, the write is suppressed without panicking.
+ /// If two elements in the scattered vector would write to the same index
+ /// only the last element is guaranteed to actually be written.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
- /// let idxs = Simd::from_array([9, 3, 0, 0]);
+ /// let idxs = Simd::from_array([9, 3, 0, 0]); // Note the duplicate index.
/// let vals = Simd::from_array([-27, 82, -41, 124]);
///
- /// vals.scatter(&mut vec, idxs); // index 0 receives two writes.
+ /// vals.scatter(&mut vec, idxs); // two logical writes means the last wins.
/// assert_eq!(vec, vec![124, 11, 12, 82, 14, 15, 16, 17, 18]);
/// ```
#[inline]
- pub fn scatter(self, slice: &mut [T], idxs: Simd<usize, LANES>) {
+ pub fn scatter(self, slice: &mut [T], idxs: Simd<usize, N>) {
self.scatter_select(slice, Mask::splat(true), idxs)
}
- /// Writes the values in a SIMD vector to multiple potentially discontiguous indices in `slice`.
- /// The mask `enable`s all `true` lanes and disables all `false` lanes.
- /// If an enabled index is out-of-bounds, the lane is not written.
- /// If two enabled lanes in the scattered vector would write to the same index,
- /// only the last lane is guaranteed to actually be written.
+ /// Writes values from a SIMD vector to multiple potentially discontiguous indices in `slice`.
+ /// The mask `enable`s all `true` indices and disables all `false` indices.
+ /// If an enabled index is out-of-bounds, the write is suppressed without panicking.
+ /// If two enabled elements in the scattered vector would write to the same index,
+ /// only the last element is guaranteed to actually be written.
///
/// # Examples
/// ```
@@ -391,29 +579,24 @@ where
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
/// # use simd::{Simd, Mask};
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
- /// let idxs = Simd::from_array([9, 3, 0, 0]);
+ /// let idxs = Simd::from_array([9, 3, 0, 0]); // Includes an out-of-bounds index
/// let vals = Simd::from_array([-27, 82, -41, 124]);
- /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane.
+ /// let enable = Mask::from_array([true, true, true, false]); // Includes a masked element
///
- /// vals.scatter_select(&mut vec, enable, idxs); // index 0's second write is masked, thus omitted.
+ /// vals.scatter_select(&mut vec, enable, idxs); // The last write is masked, thus omitted.
/// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
/// ```
#[inline]
- pub fn scatter_select(
- self,
- slice: &mut [T],
- enable: Mask<isize, LANES>,
- idxs: Simd<usize, LANES>,
- ) {
- let enable: Mask<isize, LANES> = enable & idxs.simd_lt(Simd::splat(slice.len()));
- // Safety: We have masked-off out-of-bounds lanes.
+ pub fn scatter_select(self, slice: &mut [T], enable: Mask<isize, N>, idxs: Simd<usize, N>) {
+ let enable: Mask<isize, N> = enable & idxs.simd_lt(Simd::splat(slice.len()));
+ // Safety: We have masked-off out-of-bounds indices.
unsafe { self.scatter_select_unchecked(slice, enable, idxs) }
}
- /// Writes the values in a SIMD vector to multiple potentially discontiguous indices in `slice`.
- /// The mask `enable`s all `true` lanes and disables all `false` lanes.
- /// If two enabled lanes in the scattered vector would write to the same index,
- /// only the last lane is guaranteed to actually be written.
+ /// Writes values from a SIMD vector to multiple potentially discontiguous indices in `slice`.
+ /// The mask `enable`s all `true` indices and disables all `false` indices.
+ /// If two enabled elements in the scattered vector would write to the same index,
+ /// only the last element is guaranteed to actually be written.
///
/// # Safety
///
@@ -429,22 +612,23 @@ where
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 0]);
/// let vals = Simd::from_array([-27, 82, -41, 124]);
- /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane.
+ /// let enable = Mask::from_array([true, true, true, false]); // Masks the final index
/// // If this mask was used to scatter, it would be unsound. Let's fix that.
/// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
///
- /// // We have masked the OOB lane, so it's safe to scatter now.
+ /// // We have masked the OOB index, so it's safe to scatter now.
/// unsafe { vals.scatter_select_unchecked(&mut vec, enable, idxs); }
- /// // index 0's second write is masked, thus was omitted.
+ /// // The second write to index 0 was masked, thus omitted.
/// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
/// ```
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub unsafe fn scatter_select_unchecked(
self,
slice: &mut [T],
- enable: Mask<isize, LANES>,
- idxs: Simd<usize, LANES>,
+ enable: Mask<isize, N>,
+ idxs: Simd<usize, N>,
) {
// Safety: This block works with *mut T derived from &mut 'a [T],
// which means it is delicate in Rust's borrowing model, circa 2021:
@@ -458,36 +642,89 @@ where
// 3. &mut [T] which will become our base ptr.
unsafe {
// Now Entering ☢️ *mut T Zone
- let base_ptr = crate::simd::ptr::SimdMutPtr::splat(slice.as_mut_ptr());
+ let base_ptr = Simd::<*mut T, N>::splat(slice.as_mut_ptr());
// Ferris forgive me, I have done pointer arithmetic here.
let ptrs = base_ptr.wrapping_add(idxs);
// The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah
- intrinsics::simd_scatter(self, ptrs, enable.to_int())
+ self.scatter_select_ptr(ptrs, enable);
// Cleared ☢️ *mut T Zone
}
}
+
+ /// Write pointers elementwise into a SIMD vector.
+ ///
+ /// # Safety
+ ///
+ /// Each write must satisfy the same conditions as [`core::ptr::write`].
+ ///
+ /// # Example
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Simd, SimdMutPtr};
+ /// let mut values = [0; 4];
+ /// let offset = Simd::from_array([3, 2, 1, 0]);
+ /// let ptrs = Simd::splat(values.as_mut_ptr()).wrapping_add(offset);
+ /// unsafe { Simd::from_array([6, 3, 5, 7]).scatter_ptr(ptrs); }
+ /// assert_eq!(values, [7, 5, 3, 6]);
+ /// ```
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn scatter_ptr(self, dest: Simd<*mut T, N>) {
+ // Safety: The caller is responsible for upholding all invariants
+ unsafe { self.scatter_select_ptr(dest, Mask::splat(true)) }
+ }
+
+ /// Conditionally write pointers elementwise into a SIMD vector.
+ /// The mask `enable`s all `true` pointers and disables all `false` pointers.
+ /// If a pointer is disabled, the write to its pointee is skipped.
+ ///
+ /// # Safety
+ ///
+ /// Enabled pointers must satisfy the same conditions as [`core::ptr::write`].
+ ///
+ /// # Example
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{Mask, Simd, SimdMutPtr};
+ /// let mut values = [0; 4];
+ /// let offset = Simd::from_array([3, 2, 1, 0]);
+ /// let ptrs = Simd::splat(values.as_mut_ptr()).wrapping_add(offset);
+ /// let enable = Mask::from_array([true, true, false, false]);
+ /// unsafe { Simd::from_array([6, 3, 5, 7]).scatter_select_ptr(ptrs, enable); }
+ /// assert_eq!(values, [0, 0, 3, 6]);
+ /// ```
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn scatter_select_ptr(self, dest: Simd<*mut T, N>, enable: Mask<isize, N>) {
+ // Safety: The caller is responsible for upholding all invariants
+ unsafe { intrinsics::simd_scatter(self, dest, enable.to_int()) }
+ }
}
-impl<T, const LANES: usize> Copy for Simd<T, LANES>
+impl<T, const N: usize> Copy for Simd<T, N>
where
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement,
- LaneCount<LANES>: SupportedLaneCount,
{
}
-impl<T, const LANES: usize> Clone for Simd<T, LANES>
+impl<T, const N: usize> Clone for Simd<T, N>
where
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement,
- LaneCount<LANES>: SupportedLaneCount,
{
fn clone(&self) -> Self {
*self
}
}
-impl<T, const LANES: usize> Default for Simd<T, LANES>
+impl<T, const N: usize> Default for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement + Default,
{
#[inline]
@@ -496,20 +733,20 @@ where
}
}
-impl<T, const LANES: usize> PartialEq for Simd<T, LANES>
+impl<T, const N: usize> PartialEq for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement + PartialEq,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
// Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
let mask = unsafe {
- let tfvec: Simd<<T as SimdElement>::Mask, LANES> = intrinsics::simd_eq(*self, *other);
+ let tfvec: Simd<<T as SimdElement>::Mask, N> = intrinsics::simd_eq(*self, *other);
Mask::from_int_unchecked(tfvec)
};
- // Two vectors are equal if all lanes tested true for vertical equality.
+ // Two vectors are equal if all elements are equal when compared elementwise
mask.all()
}
@@ -518,18 +755,18 @@ where
fn ne(&self, other: &Self) -> bool {
// Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
let mask = unsafe {
- let tfvec: Simd<<T as SimdElement>::Mask, LANES> = intrinsics::simd_ne(*self, *other);
+ let tfvec: Simd<<T as SimdElement>::Mask, N> = intrinsics::simd_ne(*self, *other);
Mask::from_int_unchecked(tfvec)
};
- // Two vectors are non-equal if any lane tested true for vertical non-equality.
+ // Two vectors are non-equal if any elements are non-equal when compared elementwise
mask.any()
}
}
-impl<T, const LANES: usize> PartialOrd for Simd<T, LANES>
+impl<T, const N: usize> PartialOrd for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement + PartialOrd,
{
#[inline]
@@ -539,16 +776,16 @@ where
}
}
-impl<T, const LANES: usize> Eq for Simd<T, LANES>
+impl<T, const N: usize> Eq for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement + Eq,
{
}
-impl<T, const LANES: usize> Ord for Simd<T, LANES>
+impl<T, const N: usize> Ord for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement + Ord,
{
#[inline]
@@ -558,9 +795,9 @@ where
}
}
-impl<T, const LANES: usize> core::hash::Hash for Simd<T, LANES>
+impl<T, const N: usize> core::hash::Hash for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement + core::hash::Hash,
{
#[inline]
@@ -573,72 +810,96 @@ where
}
// array references
-impl<T, const LANES: usize> AsRef<[T; LANES]> for Simd<T, LANES>
+impl<T, const N: usize> AsRef<[T; N]> for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement,
{
#[inline]
- fn as_ref(&self) -> &[T; LANES] {
- &self.0
+ fn as_ref(&self) -> &[T; N] {
+ self.as_array()
}
}
-impl<T, const LANES: usize> AsMut<[T; LANES]> for Simd<T, LANES>
+impl<T, const N: usize> AsMut<[T; N]> for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement,
{
#[inline]
- fn as_mut(&mut self) -> &mut [T; LANES] {
- &mut self.0
+ fn as_mut(&mut self) -> &mut [T; N] {
+ self.as_mut_array()
}
}
// slice references
-impl<T, const LANES: usize> AsRef<[T]> for Simd<T, LANES>
+impl<T, const N: usize> AsRef<[T]> for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement,
{
#[inline]
fn as_ref(&self) -> &[T] {
- &self.0
+ self.as_array()
}
}
-impl<T, const LANES: usize> AsMut<[T]> for Simd<T, LANES>
+impl<T, const N: usize> AsMut<[T]> for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement,
{
#[inline]
fn as_mut(&mut self) -> &mut [T] {
- &mut self.0
+ self.as_mut_array()
}
}
// vector/array conversion
-impl<T, const LANES: usize> From<[T; LANES]> for Simd<T, LANES>
+impl<T, const N: usize> From<[T; N]> for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement,
{
- fn from(array: [T; LANES]) -> Self {
- Self(array)
+ fn from(array: [T; N]) -> Self {
+ Self::from_array(array)
}
}
-impl<T, const LANES: usize> From<Simd<T, LANES>> for [T; LANES]
+impl<T, const N: usize> From<Simd<T, N>> for [T; N]
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement,
{
- fn from(vector: Simd<T, LANES>) -> Self {
+ fn from(vector: Simd<T, N>) -> Self {
vector.to_array()
}
}
+impl<T, const N: usize> TryFrom<&[T]> for Simd<T, N>
+where
+ LaneCount<N>: SupportedLaneCount,
+ T: SimdElement,
+{
+ type Error = core::array::TryFromSliceError;
+
+ fn try_from(slice: &[T]) -> Result<Self, core::array::TryFromSliceError> {
+ Ok(Self::from_array(slice.try_into()?))
+ }
+}
+
+impl<T, const N: usize> TryFrom<&mut [T]> for Simd<T, N>
+where
+ LaneCount<N>: SupportedLaneCount,
+ T: SimdElement,
+{
+ type Error = core::array::TryFromSliceError;
+
+ fn try_from(slice: &mut [T]) -> Result<Self, core::array::TryFromSliceError> {
+ Ok(Self::from_array(slice.try_into()?))
+ }
+}
+
mod sealed {
pub trait Sealed {}
}
@@ -740,3 +1001,27 @@ impl Sealed for f64 {}
unsafe impl SimdElement for f64 {
type Mask = i64;
}
+
+impl<T> Sealed for *const T {}
+
+// Safety: (thin) const pointers are valid SIMD element types, and are supported by this API
+//
+// Fat pointers may be supported in the future.
+unsafe impl<T> SimdElement for *const T
+where
+ T: core::ptr::Pointee<Metadata = ()>,
+{
+ type Mask = isize;
+}
+
+impl<T> Sealed for *mut T {}
+
+// Safety: (thin) mut pointers are valid SIMD element types, and are supported by this API
+//
+// Fat pointers may be supported in the future.
+unsafe impl<T> SimdElement for *mut T
+where
+ T: core::ptr::Pointee<Metadata = ()>,
+{
+ type Mask = isize;
+}
diff --git a/library/portable-simd/crates/core_simd/src/vector/float.rs b/library/portable-simd/crates/core_simd/src/vector/float.rs
deleted file mode 100644
index f836c99b1..000000000
--- a/library/portable-simd/crates/core_simd/src/vector/float.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-#![allow(non_camel_case_types)]
-
-use crate::simd::Simd;
-
-/// A 64-bit SIMD vector with two elements of type `f32`.
-pub type f32x2 = Simd<f32, 2>;
-
-/// A 128-bit SIMD vector with four elements of type `f32`.
-pub type f32x4 = Simd<f32, 4>;
-
-/// A 256-bit SIMD vector with eight elements of type `f32`.
-pub type f32x8 = Simd<f32, 8>;
-
-/// A 512-bit SIMD vector with 16 elements of type `f32`.
-pub type f32x16 = Simd<f32, 16>;
-
-/// A 128-bit SIMD vector with two elements of type `f64`.
-pub type f64x2 = Simd<f64, 2>;
-
-/// A 256-bit SIMD vector with four elements of type `f64`.
-pub type f64x4 = Simd<f64, 4>;
-
-/// A 512-bit SIMD vector with eight elements of type `f64`.
-pub type f64x8 = Simd<f64, 8>;
diff --git a/library/portable-simd/crates/core_simd/src/vector/int.rs b/library/portable-simd/crates/core_simd/src/vector/int.rs
deleted file mode 100644
index 20e56c7dc..000000000
--- a/library/portable-simd/crates/core_simd/src/vector/int.rs
+++ /dev/null
@@ -1,63 +0,0 @@
-#![allow(non_camel_case_types)]
-
-use crate::simd::Simd;
-
-/// A SIMD vector with two elements of type `isize`.
-pub type isizex2 = Simd<isize, 2>;
-
-/// A SIMD vector with four elements of type `isize`.
-pub type isizex4 = Simd<isize, 4>;
-
-/// A SIMD vector with eight elements of type `isize`.
-pub type isizex8 = Simd<isize, 8>;
-
-/// A 32-bit SIMD vector with two elements of type `i16`.
-pub type i16x2 = Simd<i16, 2>;
-
-/// A 64-bit SIMD vector with four elements of type `i16`.
-pub type i16x4 = Simd<i16, 4>;
-
-/// A 128-bit SIMD vector with eight elements of type `i16`.
-pub type i16x8 = Simd<i16, 8>;
-
-/// A 256-bit SIMD vector with 16 elements of type `i16`.
-pub type i16x16 = Simd<i16, 16>;
-
-/// A 512-bit SIMD vector with 32 elements of type `i16`.
-pub type i16x32 = Simd<i16, 32>;
-
-/// A 64-bit SIMD vector with two elements of type `i32`.
-pub type i32x2 = Simd<i32, 2>;
-
-/// A 128-bit SIMD vector with four elements of type `i32`.
-pub type i32x4 = Simd<i32, 4>;
-
-/// A 256-bit SIMD vector with eight elements of type `i32`.
-pub type i32x8 = Simd<i32, 8>;
-
-/// A 512-bit SIMD vector with 16 elements of type `i32`.
-pub type i32x16 = Simd<i32, 16>;
-
-/// A 128-bit SIMD vector with two elements of type `i64`.
-pub type i64x2 = Simd<i64, 2>;
-
-/// A 256-bit SIMD vector with four elements of type `i64`.
-pub type i64x4 = Simd<i64, 4>;
-
-/// A 512-bit SIMD vector with eight elements of type `i64`.
-pub type i64x8 = Simd<i64, 8>;
-
-/// A 32-bit SIMD vector with four elements of type `i8`.
-pub type i8x4 = Simd<i8, 4>;
-
-/// A 64-bit SIMD vector with eight elements of type `i8`.
-pub type i8x8 = Simd<i8, 8>;
-
-/// A 128-bit SIMD vector with 16 elements of type `i8`.
-pub type i8x16 = Simd<i8, 16>;
-
-/// A 256-bit SIMD vector with 32 elements of type `i8`.
-pub type i8x32 = Simd<i8, 32>;
-
-/// A 512-bit SIMD vector with 64 elements of type `i8`.
-pub type i8x64 = Simd<i8, 64>;
diff --git a/library/portable-simd/crates/core_simd/src/vector/ptr.rs b/library/portable-simd/crates/core_simd/src/vector/ptr.rs
deleted file mode 100644
index fa756344d..000000000
--- a/library/portable-simd/crates/core_simd/src/vector/ptr.rs
+++ /dev/null
@@ -1,51 +0,0 @@
-//! Private implementation details of public gather/scatter APIs.
-use crate::simd::intrinsics;
-use crate::simd::{LaneCount, Simd, SupportedLaneCount};
-
-/// A vector of *const T.
-#[derive(Debug, Copy, Clone)]
-#[repr(simd)]
-pub(crate) struct SimdConstPtr<T, const LANES: usize>([*const T; LANES]);
-
-impl<T, const LANES: usize> SimdConstPtr<T, LANES>
-where
- LaneCount<LANES>: SupportedLaneCount,
- T: Sized,
-{
- #[inline]
- #[must_use]
- pub fn splat(ptr: *const T) -> Self {
- Self([ptr; LANES])
- }
-
- #[inline]
- #[must_use]
- pub fn wrapping_add(self, addend: Simd<usize, LANES>) -> Self {
- // Safety: this intrinsic doesn't have a precondition
- unsafe { intrinsics::simd_arith_offset(self, addend) }
- }
-}
-
-/// A vector of *mut T. Be very careful around potential aliasing.
-#[derive(Debug, Copy, Clone)]
-#[repr(simd)]
-pub(crate) struct SimdMutPtr<T, const LANES: usize>([*mut T; LANES]);
-
-impl<T, const LANES: usize> SimdMutPtr<T, LANES>
-where
- LaneCount<LANES>: SupportedLaneCount,
- T: Sized,
-{
- #[inline]
- #[must_use]
- pub fn splat(ptr: *mut T) -> Self {
- Self([ptr; LANES])
- }
-
- #[inline]
- #[must_use]
- pub fn wrapping_add(self, addend: Simd<usize, LANES>) -> Self {
- // Safety: this intrinsic doesn't have a precondition
- unsafe { intrinsics::simd_arith_offset(self, addend) }
- }
-}
diff --git a/library/portable-simd/crates/core_simd/src/vector/uint.rs b/library/portable-simd/crates/core_simd/src/vector/uint.rs
deleted file mode 100644
index b4a69c443..000000000
--- a/library/portable-simd/crates/core_simd/src/vector/uint.rs
+++ /dev/null
@@ -1,63 +0,0 @@
-#![allow(non_camel_case_types)]
-
-use crate::simd::Simd;
-
-/// A SIMD vector with two elements of type `usize`.
-pub type usizex2 = Simd<usize, 2>;
-
-/// A SIMD vector with four elements of type `usize`.
-pub type usizex4 = Simd<usize, 4>;
-
-/// A SIMD vector with eight elements of type `usize`.
-pub type usizex8 = Simd<usize, 8>;
-
-/// A 32-bit SIMD vector with two elements of type `u16`.
-pub type u16x2 = Simd<u16, 2>;
-
-/// A 64-bit SIMD vector with four elements of type `u16`.
-pub type u16x4 = Simd<u16, 4>;
-
-/// A 128-bit SIMD vector with eight elements of type `u16`.
-pub type u16x8 = Simd<u16, 8>;
-
-/// A 256-bit SIMD vector with 16 elements of type `u16`.
-pub type u16x16 = Simd<u16, 16>;
-
-/// A 512-bit SIMD vector with 32 elements of type `u16`.
-pub type u16x32 = Simd<u16, 32>;
-
-/// A 64-bit SIMD vector with two elements of type `u32`.
-pub type u32x2 = Simd<u32, 2>;
-
-/// A 128-bit SIMD vector with four elements of type `u32`.
-pub type u32x4 = Simd<u32, 4>;
-
-/// A 256-bit SIMD vector with eight elements of type `u32`.
-pub type u32x8 = Simd<u32, 8>;
-
-/// A 512-bit SIMD vector with 16 elements of type `u32`.
-pub type u32x16 = Simd<u32, 16>;
-
-/// A 128-bit SIMD vector with two elements of type `u64`.
-pub type u64x2 = Simd<u64, 2>;
-
-/// A 256-bit SIMD vector with four elements of type `u64`.
-pub type u64x4 = Simd<u64, 4>;
-
-/// A 512-bit SIMD vector with eight elements of type `u64`.
-pub type u64x8 = Simd<u64, 8>;
-
-/// A 32-bit SIMD vector with four elements of type `u8`.
-pub type u8x4 = Simd<u8, 4>;
-
-/// A 64-bit SIMD vector with eight elements of type `u8`.
-pub type u8x8 = Simd<u8, 8>;
-
-/// A 128-bit SIMD vector with 16 elements of type `u8`.
-pub type u8x16 = Simd<u8, 16>;
-
-/// A 256-bit SIMD vector with 32 elements of type `u8`.
-pub type u8x32 = Simd<u8, 32>;
-
-/// A 512-bit SIMD vector with 64 elements of type `u8`.
-pub type u8x64 = Simd<u8, 64>;