summaryrefslogtreecommitdiffstats
path: root/vendor/packed_simd_2/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/packed_simd_2/src/codegen')
-rw-r--r--vendor/packed_simd_2/src/codegen/bit_manip.rs17
-rw-r--r--vendor/packed_simd_2/src/codegen/llvm.rs207
-rw-r--r--vendor/packed_simd_2/src/codegen/math.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float.rs30
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/abs.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/cos.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/exp.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/ln.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/macros.rs133
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/mul_add.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs10
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/powf.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sin.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs23
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sqrt.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sqrte.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/tanh.rs8
-rw-r--r--vendor/packed_simd_2/src/codegen/pointer_sized_int.rs24
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask.rs6
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs38
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs26
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs4
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs70
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs10
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs3
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs6
-rw-r--r--vendor/packed_simd_2/src/codegen/shuffle.rs4
-rw-r--r--vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs25
-rw-r--r--vendor/packed_simd_2/src/codegen/swap_bytes.rs52
-rw-r--r--vendor/packed_simd_2/src/codegen/vPtr.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/vSize.rs33
34 files changed, 316 insertions, 441 deletions
diff --git a/vendor/packed_simd_2/src/codegen/bit_manip.rs b/vendor/packed_simd_2/src/codegen/bit_manip.rs
index 83c7d1987..32d8d717a 100644
--- a/vendor/packed_simd_2/src/codegen/bit_manip.rs
+++ b/vendor/packed_simd_2/src/codegen/bit_manip.rs
@@ -1,7 +1,7 @@
//! LLVM bit manipulation intrinsics.
#[rustfmt::skip]
-use crate::*;
+pub(crate) use crate::*;
#[allow(improper_ctypes, dead_code)]
extern "C" {
@@ -147,7 +147,7 @@ extern "C" {
fn ctpop_u128x4(x: u128x4) -> u128x4;
}
-crate trait BitManip {
+pub(crate) trait BitManip {
fn ctpop(self) -> Self;
fn ctlz(self) -> Self;
fn cttz(self) -> Self;
@@ -212,8 +212,7 @@ macro_rules! impl_bit_manip {
fn ctpop(self) -> Self {
let mut ones = self;
for i in 0..Self::lanes() {
- ones = ones
- .replace(i, self.extract(i).count_ones() as $scalar);
+ ones = ones.replace(i, self.extract(i).count_ones() as $scalar);
}
ones
}
@@ -222,10 +221,7 @@ macro_rules! impl_bit_manip {
fn ctlz(self) -> Self {
let mut lz = self;
for i in 0..Self::lanes() {
- lz = lz.replace(
- i,
- self.extract(i).leading_zeros() as $scalar,
- );
+ lz = lz.replace(i, self.extract(i).leading_zeros() as $scalar);
}
lz
}
@@ -234,10 +230,7 @@ macro_rules! impl_bit_manip {
fn cttz(self) -> Self {
let mut tz = self;
for i in 0..Self::lanes() {
- tz = tz.replace(
- i,
- self.extract(i).trailing_zeros() as $scalar,
- );
+ tz = tz.replace(i, self.extract(i).trailing_zeros() as $scalar);
}
tz
}
diff --git a/vendor/packed_simd_2/src/codegen/llvm.rs b/vendor/packed_simd_2/src/codegen/llvm.rs
index 93c6ce6b7..b4c09849b 100644
--- a/vendor/packed_simd_2/src/codegen/llvm.rs
+++ b/vendor/packed_simd_2/src/codegen/llvm.rs
@@ -7,101 +7,122 @@ use crate::sealed::Simd;
// Shuffle intrinsics: expanded in users' crates, therefore public.
extern "platform-intrinsic" {
- // FIXME: Passing this intrinsics an `idx` array with an index that is
- // out-of-bounds will produce a monomorphization-time error.
- // https://github.com/rust-lang-nursery/packed_simd/issues/21
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 2], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 4], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 8], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle16<T, U>(x: T, y: T, idx: [u32; 16]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 16], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 32], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle64<T, U>(x: T, y: T, idx: [u32; 64]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 64], Output = U>;
+ pub fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U;
+ pub fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U;
+ pub fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U;
+ pub fn simd_shuffle16<T, U>(x: T, y: T, idx: [u32; 16]) -> U;
+ pub fn simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U;
+ pub fn simd_shuffle64<T, U>(x: T, y: T, idx: [u32; 64]) -> U;
}
-pub use self::simd_shuffle16 as __shuffle_vector16;
-pub use self::simd_shuffle2 as __shuffle_vector2;
-pub use self::simd_shuffle32 as __shuffle_vector32;
-pub use self::simd_shuffle4 as __shuffle_vector4;
-pub use self::simd_shuffle64 as __shuffle_vector64;
-pub use self::simd_shuffle8 as __shuffle_vector8;
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector2<const IDX: [u32; 2], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 2], Output = U>,
+{
+ simd_shuffle2(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector4<const IDX: [u32; 4], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 4], Output = U>,
+{
+ simd_shuffle4(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector8<const IDX: [u32; 8], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 8], Output = U>,
+{
+ simd_shuffle8(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector16<const IDX: [u32; 16], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 16], Output = U>,
+{
+ simd_shuffle16(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector32<const IDX: [u32; 32], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 32], Output = U>,
+{
+ simd_shuffle32(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector64<const IDX: [u32; 64], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 64], Output = U>,
+{
+ simd_shuffle64(x, y, IDX)
+}
extern "platform-intrinsic" {
- crate fn simd_eq<T, U>(x: T, y: T) -> U;
- crate fn simd_ne<T, U>(x: T, y: T) -> U;
- crate fn simd_lt<T, U>(x: T, y: T) -> U;
- crate fn simd_le<T, U>(x: T, y: T) -> U;
- crate fn simd_gt<T, U>(x: T, y: T) -> U;
- crate fn simd_ge<T, U>(x: T, y: T) -> U;
-
- crate fn simd_insert<T, U>(x: T, idx: u32, val: U) -> T;
- crate fn simd_extract<T, U>(x: T, idx: u32) -> U;
-
- crate fn simd_cast<T, U>(x: T) -> U;
-
- crate fn simd_add<T>(x: T, y: T) -> T;
- crate fn simd_sub<T>(x: T, y: T) -> T;
- crate fn simd_mul<T>(x: T, y: T) -> T;
- crate fn simd_div<T>(x: T, y: T) -> T;
- crate fn simd_rem<T>(x: T, y: T) -> T;
- crate fn simd_shl<T>(x: T, y: T) -> T;
- crate fn simd_shr<T>(x: T, y: T) -> T;
- crate fn simd_and<T>(x: T, y: T) -> T;
- crate fn simd_or<T>(x: T, y: T) -> T;
- crate fn simd_xor<T>(x: T, y: T) -> T;
-
- crate fn simd_reduce_add_unordered<T, U>(x: T) -> U;
- crate fn simd_reduce_mul_unordered<T, U>(x: T) -> U;
- crate fn simd_reduce_add_ordered<T, U>(x: T, acc: U) -> U;
- crate fn simd_reduce_mul_ordered<T, U>(x: T, acc: U) -> U;
- crate fn simd_reduce_min<T, U>(x: T) -> U;
- crate fn simd_reduce_max<T, U>(x: T) -> U;
- crate fn simd_reduce_min_nanless<T, U>(x: T) -> U;
- crate fn simd_reduce_max_nanless<T, U>(x: T) -> U;
- crate fn simd_reduce_and<T, U>(x: T) -> U;
- crate fn simd_reduce_or<T, U>(x: T) -> U;
- crate fn simd_reduce_xor<T, U>(x: T) -> U;
- crate fn simd_reduce_all<T>(x: T) -> bool;
- crate fn simd_reduce_any<T>(x: T) -> bool;
-
- crate fn simd_select<M, T>(m: M, a: T, b: T) -> T;
-
- crate fn simd_fmin<T>(a: T, b: T) -> T;
- crate fn simd_fmax<T>(a: T, b: T) -> T;
-
- crate fn simd_fsqrt<T>(a: T) -> T;
- crate fn simd_fma<T>(a: T, b: T, c: T) -> T;
-
- crate fn simd_gather<T, P, M>(value: T, pointers: P, mask: M) -> T;
- crate fn simd_scatter<T, P, M>(value: T, pointers: P, mask: M);
-
- crate fn simd_bitmask<T, U>(value: T) -> U;
+ pub(crate) fn simd_eq<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_ne<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_lt<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_le<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_gt<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_ge<T, U>(x: T, y: T) -> U;
+
+ pub(crate) fn simd_insert<T, U>(x: T, idx: u32, val: U) -> T;
+ pub(crate) fn simd_extract<T, U>(x: T, idx: u32) -> U;
+
+ pub(crate) fn simd_cast<T, U>(x: T) -> U;
+
+ pub(crate) fn simd_add<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_sub<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_mul<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_div<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_rem<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_shl<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_shr<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_and<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_or<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_xor<T>(x: T, y: T) -> T;
+
+ pub(crate) fn simd_reduce_add_unordered<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_mul_unordered<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_add_ordered<T, U>(x: T, acc: U) -> U;
+ pub(crate) fn simd_reduce_mul_ordered<T, U>(x: T, acc: U) -> U;
+ pub(crate) fn simd_reduce_min<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_max<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_min_nanless<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_max_nanless<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_and<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_or<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_xor<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_all<T>(x: T) -> bool;
+ pub(crate) fn simd_reduce_any<T>(x: T) -> bool;
+
+ pub(crate) fn simd_select<M, T>(m: M, a: T, b: T) -> T;
+
+ pub(crate) fn simd_fmin<T>(a: T, b: T) -> T;
+ pub(crate) fn simd_fmax<T>(a: T, b: T) -> T;
+
+ pub(crate) fn simd_fsqrt<T>(a: T) -> T;
+ pub(crate) fn simd_fma<T>(a: T, b: T, c: T) -> T;
+
+ pub(crate) fn simd_gather<T, P, M>(value: T, pointers: P, mask: M) -> T;
+ pub(crate) fn simd_scatter<T, P, M>(value: T, pointers: P, mask: M);
+
+ pub(crate) fn simd_bitmask<T, U>(value: T) -> U;
}
diff --git a/vendor/packed_simd_2/src/codegen/math.rs b/vendor/packed_simd_2/src/codegen/math.rs
index f3997c7f1..9a0ea7a4e 100644
--- a/vendor/packed_simd_2/src/codegen/math.rs
+++ b/vendor/packed_simd_2/src/codegen/math.rs
@@ -1,3 +1,3 @@
//! Vertical math operations
-crate mod float;
+pub(crate) mod float;
diff --git a/vendor/packed_simd_2/src/codegen/math/float.rs b/vendor/packed_simd_2/src/codegen/math/float.rs
index 3743b4990..10d21831f 100644
--- a/vendor/packed_simd_2/src/codegen/math/float.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float.rs
@@ -2,18 +2,18 @@
#![allow(clippy::useless_transmute)]
#[macro_use]
-crate mod macros;
-crate mod abs;
-crate mod cos;
-crate mod cos_pi;
-crate mod exp;
-crate mod ln;
-crate mod mul_add;
-crate mod mul_adde;
-crate mod powf;
-crate mod sin;
-crate mod sin_cos_pi;
-crate mod sin_pi;
-crate mod sqrt;
-crate mod sqrte;
-crate mod tanh;
+pub(crate) mod macros;
+pub(crate) mod abs;
+pub(crate) mod cos;
+pub(crate) mod cos_pi;
+pub(crate) mod exp;
+pub(crate) mod ln;
+pub(crate) mod mul_add;
+pub(crate) mod mul_adde;
+pub(crate) mod powf;
+pub(crate) mod sin;
+pub(crate) mod sin_cos_pi;
+pub(crate) mod sin_pi;
+pub(crate) mod sqrt;
+pub(crate) mod sqrte;
+pub(crate) mod tanh;
diff --git a/vendor/packed_simd_2/src/codegen/math/float/abs.rs b/vendor/packed_simd_2/src/codegen/math/float/abs.rs
index bc4421f61..34aacc25b 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/abs.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/abs.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Abs {
+pub(crate) trait Abs {
fn abs(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/cos.rs b/vendor/packed_simd_2/src/codegen/math/float/cos.rs
index 50f6c16da..dec390cb7 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/cos.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/cos.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Cos {
+pub(crate) trait Cos {
fn cos(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs b/vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs
index ebff5fd1c..e283280ee 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait CosPi {
+pub(crate) trait CosPi {
fn cos_pi(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/exp.rs b/vendor/packed_simd_2/src/codegen/math/float/exp.rs
index 00d10e9fa..a7b20580e 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/exp.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/exp.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Exp {
+pub(crate) trait Exp {
fn exp(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/ln.rs b/vendor/packed_simd_2/src/codegen/math/float/ln.rs
index 88a5a6c6c..a5e38cb40 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/ln.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/ln.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Ln {
+pub(crate) trait Ln {
fn ln(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/macros.rs b/vendor/packed_simd_2/src/codegen/math/float/macros.rs
index 02d0ca3f5..8daee1afe 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/macros.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/macros.rs
@@ -1,7 +1,6 @@
//! Utility macros
#![allow(unused)]
-
macro_rules! impl_unary_ {
// implementation mapping 1:1
(vec | $trait_id:ident, $trait_method:ident, $vec_id:ident,
@@ -64,10 +63,8 @@ macro_rules! impl_unary_ {
let mut halves = U { vec: self }.halves;
- *halves.get_unchecked_mut(0) =
- transmute($fun(transmute(*halves.get_unchecked(0))));
- *halves.get_unchecked_mut(1) =
- transmute($fun(transmute(*halves.get_unchecked(1))));
+ *halves.get_unchecked_mut(0) = transmute($fun(transmute(*halves.get_unchecked(0))));
+ *halves.get_unchecked_mut(1) = transmute($fun(transmute(*halves.get_unchecked(1))));
U { halves }.vec
}
@@ -89,14 +86,10 @@ macro_rules! impl_unary_ {
let mut quarters = U { vec: self }.quarters;
- *quarters.get_unchecked_mut(0) =
- transmute($fun(transmute(*quarters.get_unchecked(0))));
- *quarters.get_unchecked_mut(1) =
- transmute($fun(transmute(*quarters.get_unchecked(1))));
- *quarters.get_unchecked_mut(2) =
- transmute($fun(transmute(*quarters.get_unchecked(2))));
- *quarters.get_unchecked_mut(3) =
- transmute($fun(transmute(*quarters.get_unchecked(3))));
+ *quarters.get_unchecked_mut(0) = transmute($fun(transmute(*quarters.get_unchecked(0))));
+ *quarters.get_unchecked_mut(1) = transmute($fun(transmute(*quarters.get_unchecked(1))));
+ *quarters.get_unchecked_mut(2) = transmute($fun(transmute(*quarters.get_unchecked(2))));
+ *quarters.get_unchecked_mut(3) = transmute($fun(transmute(*quarters.get_unchecked(3))));
U { quarters }.vec
}
@@ -137,43 +130,19 @@ macro_rules! gen_unary_impl_table {
impl_unary_!(gen | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[$sid:ident; $sc:expr]: $fun:ident) => {
- impl_unary_!(
- scalar | $trait_id,
- $trait_method,
- $vid,
- [$sid; $sc],
- $fun
- );
+ impl_unary_!(scalar | $trait_id, $trait_method, $vid, [$sid; $sc], $fun);
};
($vid:ident[s]: $fun:ident) => {
impl_unary_!(scalar | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[h => $vid_h:ident]: $fun:ident) => {
- impl_unary_!(
- halves | $trait_id,
- $trait_method,
- $vid,
- $vid_h,
- $fun
- );
+ impl_unary_!(halves | $trait_id, $trait_method, $vid, $vid_h, $fun);
};
($vid:ident[q => $vid_q:ident]: $fun:ident) => {
- impl_unary_!(
- quarter | $trait_id,
- $trait_method,
- $vid,
- $vid_q,
- $fun
- );
+ impl_unary_!(quarter | $trait_id, $trait_method, $vid, $vid_q, $fun);
};
($vid:ident[t => $vid_t:ident]: $fun:ident) => {
- impl_unary_!(
- twice | $trait_id,
- $trait_method,
- $vid,
- $vid_t,
- $fun
- );
+ impl_unary_!(twice | $trait_id, $trait_method, $vid, $vid_t, $fun);
};
}
};
@@ -188,11 +157,7 @@ macro_rules! impl_tertiary_ {
fn $trait_method(self, y: Self, z: Self) -> Self {
unsafe {
use crate::mem::transmute;
- transmute($fun(
- transmute(self),
- transmute(y),
- transmute(z),
- ))
+ transmute($fun(transmute(self), transmute(y), transmute(z)))
}
}
}
@@ -314,11 +279,8 @@ macro_rules! impl_tertiary_ {
let x_twice = U { vec: [self, uninitialized()] }.twice;
let y_twice = U { vec: [y, uninitialized()] }.twice;
let z_twice = U { vec: [z, uninitialized()] }.twice;
- let twice: $vect_id = transmute($fun(
- transmute(x_twice),
- transmute(y_twice),
- transmute(z_twice),
- ));
+ let twice: $vect_id =
+ transmute($fun(transmute(x_twice), transmute(y_twice), transmute(z_twice)));
*(U { twice }.vec.get_unchecked(0))
}
@@ -334,43 +296,19 @@ macro_rules! gen_tertiary_impl_table {
impl_tertiary_!(vec | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[$sid:ident; $sc:expr]: $fun:ident) => {
- impl_tertiary_!(
- scalar | $trait_id,
- $trait_method,
- $vid,
- [$sid; $sc],
- $fun
- );
+ impl_tertiary_!(scalar | $trait_id, $trait_method, $vid, [$sid; $sc], $fun);
};
($vid:ident[s]: $fun:ident) => {
impl_tertiary_!(scalar | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[h => $vid_h:ident]: $fun:ident) => {
- impl_tertiary_!(
- halves | $trait_id,
- $trait_method,
- $vid,
- $vid_h,
- $fun
- );
+ impl_tertiary_!(halves | $trait_id, $trait_method, $vid, $vid_h, $fun);
};
($vid:ident[q => $vid_q:ident]: $fun:ident) => {
- impl_tertiary_!(
- quarter | $trait_id,
- $trait_method,
- $vid,
- $vid_q,
- $fun
- );
+ impl_tertiary_!(quarter | $trait_id, $trait_method, $vid, $vid_q, $fun);
};
($vid:ident[t => $vid_t:ident]: $fun:ident) => {
- impl_tertiary_!(
- twice | $trait_id,
- $trait_method,
- $vid,
- $vid_t,
- $fun
- );
+ impl_tertiary_!(twice | $trait_id, $trait_method, $vid, $vid_t, $fun);
};
}
};
@@ -497,10 +435,7 @@ macro_rules! impl_binary_ {
let x_twice = U { vec: [self, uninitialized()] }.twice;
let y_twice = U { vec: [y, uninitialized()] }.twice;
- let twice: $vect_id = transmute($fun(
- transmute(x_twice),
- transmute(y_twice),
- ));
+ let twice: $vect_id = transmute($fun(transmute(x_twice), transmute(y_twice)));
*(U { twice }.vec.get_unchecked(0))
}
@@ -516,43 +451,19 @@ macro_rules! gen_binary_impl_table {
impl_binary_!(vec | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[$sid:ident; $sc:expr]: $fun:ident) => {
- impl_binary_!(
- scalar | $trait_id,
- $trait_method,
- $vid,
- [$sid; $sc],
- $fun
- );
+ impl_binary_!(scalar | $trait_id, $trait_method, $vid, [$sid; $sc], $fun);
};
($vid:ident[s]: $fun:ident) => {
impl_binary_!(scalar | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[h => $vid_h:ident]: $fun:ident) => {
- impl_binary_!(
- halves | $trait_id,
- $trait_method,
- $vid,
- $vid_h,
- $fun
- );
+ impl_binary_!(halves | $trait_id, $trait_method, $vid, $vid_h, $fun);
};
($vid:ident[q => $vid_q:ident]: $fun:ident) => {
- impl_binary_!(
- quarter | $trait_id,
- $trait_method,
- $vid,
- $vid_q,
- $fun
- );
+ impl_binary_!(quarter | $trait_id, $trait_method, $vid, $vid_q, $fun);
};
($vid:ident[t => $vid_t:ident]: $fun:ident) => {
- impl_binary_!(
- twice | $trait_id,
- $trait_method,
- $vid,
- $vid_t,
- $fun
- );
+ impl_binary_!(twice | $trait_id, $trait_method, $vid, $vid_t, $fun);
};
}
};
diff --git a/vendor/packed_simd_2/src/codegen/math/float/mul_add.rs b/vendor/packed_simd_2/src/codegen/math/float/mul_add.rs
index f48a57dc4..d37f30fa8 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/mul_add.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/mul_add.rs
@@ -4,7 +4,7 @@ use crate::*;
// FIXME: 64-bit 1 element mul_add
-crate trait MulAdd {
+pub(crate) trait MulAdd {
fn mul_add(self, y: Self, z: Self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs b/vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs
index 8c41fb131..c0baeacec 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs
@@ -3,7 +3,7 @@ use crate::*;
// FIXME: 64-bit 1 element mul_adde
-crate trait MulAddE {
+pub(crate) trait MulAddE {
fn mul_adde(self, y: Self, z: Self) -> Self;
}
@@ -38,13 +38,7 @@ macro_rules! impl_mul_adde {
#[cfg(not(target_arch = "s390x"))]
{
use crate::mem::transmute;
- unsafe {
- transmute($fn(
- transmute(self),
- transmute(y),
- transmute(z),
- ))
- }
+ unsafe { transmute($fn(transmute(self), transmute(y), transmute(z))) }
}
#[cfg(target_arch = "s390x")]
{
diff --git a/vendor/packed_simd_2/src/codegen/math/float/powf.rs b/vendor/packed_simd_2/src/codegen/math/float/powf.rs
index bc15067d7..89ca52e96 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/powf.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/powf.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Powf {
+pub(crate) trait Powf {
fn powf(self, x: Self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sin.rs b/vendor/packed_simd_2/src/codegen/math/float/sin.rs
index 7b014d07d..d88141590 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sin.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sin.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Sin {
+pub(crate) trait Sin {
fn sin(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs b/vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs
index 0f1249ec8..b283d1111 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait SinCosPi: Sized {
+pub(crate) trait SinCosPi: Sized {
type Output;
fn sin_cos_pi(self) -> Self::Output;
}
@@ -85,17 +85,14 @@ macro_rules! impl_unary_t {
let halves = U { vec: self }.halves;
- let res_0: ($vid_h, $vid_h) =
- transmute($fun(transmute(*halves.get_unchecked(0))));
- let res_1: ($vid_h, $vid_h) =
- transmute($fun(transmute(*halves.get_unchecked(1))));
+ let res_0: ($vid_h, $vid_h) = transmute($fun(transmute(*halves.get_unchecked(0))));
+ let res_1: ($vid_h, $vid_h) = transmute($fun(transmute(*halves.get_unchecked(1))));
union R {
result: ($vid, $vid),
halves: ([$vid_h; 2], [$vid_h; 2]),
}
- R { halves: ([res_0.0, res_1.0], [res_0.1, res_1.1]) }
- .result
+ R { halves: ([res_0.0, res_1.0], [res_0.1, res_1.1]) }.result
}
}
}
@@ -114,14 +111,10 @@ macro_rules! impl_unary_t {
let quarters = U { vec: self }.quarters;
- let res_0: ($vid_q, $vid_q) =
- transmute($fun(transmute(*quarters.get_unchecked(0))));
- let res_1: ($vid_q, $vid_q) =
- transmute($fun(transmute(*quarters.get_unchecked(1))));
- let res_2: ($vid_q, $vid_q) =
- transmute($fun(transmute(*quarters.get_unchecked(2))));
- let res_3: ($vid_q, $vid_q) =
- transmute($fun(transmute(*quarters.get_unchecked(3))));
+ let res_0: ($vid_q, $vid_q) = transmute($fun(transmute(*quarters.get_unchecked(0))));
+ let res_1: ($vid_q, $vid_q) = transmute($fun(transmute(*quarters.get_unchecked(1))));
+ let res_2: ($vid_q, $vid_q) = transmute($fun(transmute(*quarters.get_unchecked(2))));
+ let res_3: ($vid_q, $vid_q) = transmute($fun(transmute(*quarters.get_unchecked(3))));
union R {
result: ($vid, $vid),
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs b/vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs
index 72df98c93..0c8f6bb12 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait SinPi {
+pub(crate) trait SinPi {
fn sin_pi(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sqrt.rs b/vendor/packed_simd_2/src/codegen/math/float/sqrt.rs
index 7ce31df62..67bb0a2a9 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sqrt.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sqrt.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Sqrt {
+pub(crate) trait Sqrt {
fn sqrt(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sqrte.rs b/vendor/packed_simd_2/src/codegen/math/float/sqrte.rs
index c1e379c34..58a1de1f4 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sqrte.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sqrte.rs
@@ -6,7 +6,7 @@
use crate::llvm::simd_fsqrt;
use crate::*;
-crate trait Sqrte {
+pub(crate) trait Sqrte {
fn sqrte(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/tanh.rs b/vendor/packed_simd_2/src/codegen/math/float/tanh.rs
index 5220c7d10..2c0dd3dc3 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/tanh.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/tanh.rs
@@ -5,12 +5,11 @@
use crate::*;
-crate trait Tanh {
+pub(crate) trait Tanh {
fn tanh(self) -> Self;
}
macro_rules! define_tanh {
-
($name:ident, $basetype:ty, $simdtype:ty, $lanes:expr, $trait:path) => {
fn $name(x: $simdtype) -> $simdtype {
use core::intrinsics::transmute;
@@ -31,8 +30,9 @@ macro_rules! define_tanh {
};
}
-// llvm does not seem to expose the hyperbolic versions of trigonometric functions;
-// we thus call the classical rust versions on all of them (which stem from cmath).
+// llvm does not seem to expose the hyperbolic versions of trigonometric
+// functions; we thus call the classical rust versions on all of them (which
+// stem from cmath).
define_tanh!(f32 => tanh_v2f32, f32x2, 2);
define_tanh!(f32 => tanh_v4f32, f32x4, 4);
define_tanh!(f32 => tanh_v8f32, f32x8, 8);
diff --git a/vendor/packed_simd_2/src/codegen/pointer_sized_int.rs b/vendor/packed_simd_2/src/codegen/pointer_sized_int.rs
index 39f493d3b..55cbc297a 100644
--- a/vendor/packed_simd_2/src/codegen/pointer_sized_int.rs
+++ b/vendor/packed_simd_2/src/codegen/pointer_sized_int.rs
@@ -4,24 +4,24 @@ use cfg_if::cfg_if;
cfg_if! {
if #[cfg(target_pointer_width = "8")] {
- crate type isize_ = i8;
- crate type usize_ = u8;
+ pub(crate) type isize_ = i8;
+ pub(crate) type usize_ = u8;
} else if #[cfg(target_pointer_width = "16")] {
- crate type isize_ = i16;
- crate type usize_ = u16;
+ pub(crate) type isize_ = i16;
+ pub(crate) type usize_ = u16;
} else if #[cfg(target_pointer_width = "32")] {
- crate type isize_ = i32;
- crate type usize_ = u32;
+ pub(crate) type isize_ = i32;
+ pub(crate) type usize_ = u32;
} else if #[cfg(target_pointer_width = "64")] {
- crate type isize_ = i64;
- crate type usize_ = u64;
+ pub(crate) type isize_ = i64;
+ pub(crate) type usize_ = u64;
} else if #[cfg(target_pointer_width = "64")] {
- crate type isize_ = i64;
- crate type usize_ = u64;
+ pub(crate) type isize_ = i64;
+ pub(crate) type usize_ = u64;
} else if #[cfg(target_pointer_width = "128")] {
- crate type isize_ = i128;
- crate type usize_ = u128;
+ pub(crate) type isize_ = i128;
+ pub(crate) type usize_ = u128;
} else {
compile_error!("unsupported target_pointer_width");
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions.rs b/vendor/packed_simd_2/src/codegen/reductions.rs
index 7be4f5fab..302ca6d88 100644
--- a/vendor/packed_simd_2/src/codegen/reductions.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions.rs
@@ -1 +1 @@
-crate mod mask;
+pub(crate) mod mask;
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask.rs b/vendor/packed_simd_2/src/codegen/reductions/mask.rs
index 97260c6d4..a78bcc563 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask.rs
@@ -1,17 +1,17 @@
//! Code generation workaround for `all()` mask horizontal reduction.
//!
-//! Works arround [LLVM bug 36702].
+//! Works around [LLVM bug 36702].
//!
//! [LLVM bug 36702]: https://bugs.llvm.org/show_bug.cgi?id=36702
#![allow(unused_macros)]
use crate::*;
-crate trait All: crate::marker::Sized {
+pub(crate) trait All: crate::marker::Sized {
unsafe fn all(self) -> bool;
}
-crate trait Any: crate::marker::Sized {
+pub(crate) trait Any: crate::marker::Sized {
unsafe fn any(self) -> bool;
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs
index e9586eace..b2db52c89 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs
@@ -19,7 +19,7 @@ macro_rules! aarch64_128_neon_impl {
$vmax(crate::mem::transmute(self)) != 0
}
}
- }
+ };
}
/// 64-bit wide vectors
@@ -35,9 +35,7 @@ macro_rules! aarch64_64_neon_impl {
halves: ($id, $id),
vec: $vec128,
}
- U {
- halves: (self, self),
- }.vec.all()
+ U { halves: (self, self) }.vec.all()
}
}
impl Any for $id {
@@ -48,9 +46,7 @@ macro_rules! aarch64_64_neon_impl {
halves: ($id, $id),
vec: $vec128,
}
- U {
- halves: (self, self),
- }.vec.any()
+ U { halves: (self, self) }.vec.any()
}
}
};
@@ -59,13 +55,27 @@ macro_rules! aarch64_64_neon_impl {
/// Mask reduction implementation for `aarch64` targets
macro_rules! impl_mask_reductions {
// 64-bit wide masks
- (m8x8) => { aarch64_64_neon_impl!(m8x8, m8x16); };
- (m16x4) => { aarch64_64_neon_impl!(m16x4, m16x8); };
- (m32x2) => { aarch64_64_neon_impl!(m32x2, m32x4); };
+ (m8x8) => {
+ aarch64_64_neon_impl!(m8x8, m8x16);
+ };
+ (m16x4) => {
+ aarch64_64_neon_impl!(m16x4, m16x8);
+ };
+ (m32x2) => {
+ aarch64_64_neon_impl!(m32x2, m32x4);
+ };
// 128-bit wide masks
- (m8x16) => { aarch64_128_neon_impl!(m8x16, vminvq_u8, vmaxvq_u8); };
- (m16x8) => { aarch64_128_neon_impl!(m16x8, vminvq_u16, vmaxvq_u16); };
- (m32x4) => { aarch64_128_neon_impl!(m32x4, vminvq_u32, vmaxvq_u32); };
+ (m8x16) => {
+ aarch64_128_neon_impl!(m8x16, vminvq_u8, vmaxvq_u8);
+ };
+ (m16x8) => {
+ aarch64_128_neon_impl!(m16x8, vminvq_u16, vmaxvq_u16);
+ };
+ (m32x4) => {
+ aarch64_128_neon_impl!(m32x4, vminvq_u32, vmaxvq_u32);
+ };
// Fallback to LLVM's default code-generation:
- ($id:ident) => { fallback_impl!($id); };
+ ($id:ident) => {
+ fallback_impl!($id);
+ };
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs
index 1987af7a9..41c3cbc58 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs
@@ -15,10 +15,7 @@ macro_rules! arm_128_v7_neon_impl {
vec: $id,
}
let halves = U { vec: self }.halves;
- let h: $half = transmute($vpmin(
- transmute(halves.0),
- transmute(halves.1),
- ));
+ let h: $half = transmute($vpmin(transmute(halves.0), transmute(halves.1)));
h.all()
}
}
@@ -33,10 +30,7 @@ macro_rules! arm_128_v7_neon_impl {
vec: $id,
}
let halves = U { vec: self }.halves;
- let h: $half = transmute($vpmax(
- transmute(halves.0),
- transmute(halves.1),
- ));
+ let h: $half = transmute($vpmax(transmute(halves.0), transmute(halves.1)));
h.any()
}
}
@@ -46,9 +40,17 @@ macro_rules! arm_128_v7_neon_impl {
/// Mask reduction implementation for `arm` targets
macro_rules! impl_mask_reductions {
// 128-bit wide masks
- (m8x16) => { arm_128_v7_neon_impl!(m8x16, m8x8, vpmin_u8, vpmax_u8); };
- (m16x8) => { arm_128_v7_neon_impl!(m16x8, m16x4, vpmin_u16, vpmax_u16); };
- (m32x4) => { arm_128_v7_neon_impl!(m32x4, m32x2, vpmin_u32, vpmax_u32); };
+ (m8x16) => {
+ arm_128_v7_neon_impl!(m8x16, m8x8, vpmin_u8, vpmax_u8);
+ };
+ (m16x8) => {
+ arm_128_v7_neon_impl!(m16x8, m16x4, vpmin_u16, vpmax_u16);
+ };
+ (m32x4) => {
+ arm_128_v7_neon_impl!(m32x4, m32x2, vpmin_u32, vpmax_u32);
+ };
// Fallback to LLVM's default code-generation:
- ($id:ident) => { fallback_impl!($id); };
+ ($id:ident) => {
+ fallback_impl!($id);
+ };
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs
index 25e5c813a..4c377a687 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs
@@ -2,5 +2,7 @@
/// Default mask reduction implementation
macro_rules! impl_mask_reductions {
- ($id:ident) => { fallback_impl!($id); };
+ ($id:ident) => {
+ fallback_impl!($id);
+ };
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs
index bcfb1a6e1..4bf509806 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs
@@ -114,17 +114,17 @@ macro_rules! x86_m64x4_impl {
/// Fallback implementation.
macro_rules! x86_intr_impl {
($id:ident) => {
- impl All for $id {
- #[inline]
- unsafe fn all(self) -> bool {
- use crate::llvm::simd_reduce_all;
- simd_reduce_all(self.0)
+ impl All for $id {
+ #[inline]
+ unsafe fn all(self) -> bool {
+ use crate::llvm::simd_reduce_all;
+ simd_reduce_all(self.0)
+ }
}
- }
impl Any for $id {
#[inline]
unsafe fn any(self) -> bool {
- use crate::llvm::simd_reduce_any;
+ use crate::llvm::simd_reduce_any;
simd_reduce_any(self.0)
}
}
@@ -134,21 +134,47 @@ macro_rules! x86_intr_impl {
/// Mask reduction implementation for `x86` and `x86_64` targets
macro_rules! impl_mask_reductions {
// 64-bit wide masks
- (m8x8) => { x86_m8x8_impl!(m8x8); };
- (m16x4) => { x86_m8x8_impl!(m16x4); };
- (m32x2) => { x86_m8x8_impl!(m32x2); };
+ (m8x8) => {
+ x86_m8x8_impl!(m8x8);
+ };
+ (m16x4) => {
+ x86_m8x8_impl!(m16x4);
+ };
+ (m32x2) => {
+ x86_m8x8_impl!(m32x2);
+ };
// 128-bit wide masks
- (m8x16) => { x86_m8x16_impl!(m8x16); };
- (m16x8) => { x86_m8x16_impl!(m16x8); };
- (m32x4) => { x86_m32x4_impl!(m32x4); };
- (m64x2) => { x86_m64x2_impl!(m64x2); };
- (m128x1) => { x86_intr_impl!(m128x1); };
+ (m8x16) => {
+ x86_m8x16_impl!(m8x16);
+ };
+ (m16x8) => {
+ x86_m8x16_impl!(m16x8);
+ };
+ (m32x4) => {
+ x86_m32x4_impl!(m32x4);
+ };
+ (m64x2) => {
+ x86_m64x2_impl!(m64x2);
+ };
+ (m128x1) => {
+ x86_intr_impl!(m128x1);
+ };
// 256-bit wide masks:
- (m8x32) => { x86_m8x32_impl!(m8x32, m8x16); };
- (m16x16) => { x86_m8x32_impl!(m16x16, m16x8); };
- (m32x8) => { x86_m32x8_impl!(m32x8, m32x4); };
- (m64x4) => { x86_m64x4_impl!(m64x4, m64x2); };
- (m128x2) => { x86_intr_impl!(m128x2); };
+ (m8x32) => {
+ x86_m8x32_impl!(m8x32, m8x16);
+ };
+ (m16x16) => {
+ x86_m8x32_impl!(m16x16, m16x8);
+ };
+ (m32x8) => {
+ x86_m32x8_impl!(m32x8, m32x4);
+ };
+ (m64x4) => {
+ x86_m64x4_impl!(m64x4, m64x2);
+ };
+ (m128x2) => {
+ x86_intr_impl!(m128x2);
+ };
(msizex2) => {
cfg_if! {
if #[cfg(target_pointer_width = "64")] {
@@ -184,5 +210,7 @@ macro_rules! impl_mask_reductions {
};
// Fallback to LLVM's default code-generation:
- ($id:ident) => { fallback_impl!($id); };
+ ($id:ident) => {
+ fallback_impl!($id);
+ };
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs
index d18736fb0..61f352d22 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs
@@ -13,10 +13,7 @@ macro_rules! x86_m8x32_avx_impl {
use crate::arch::x86::_mm256_testc_si256;
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::_mm256_testc_si256;
- _mm256_testc_si256(
- crate::mem::transmute(self),
- crate::mem::transmute($id::splat(true)),
- ) != 0
+ _mm256_testc_si256(crate::mem::transmute(self), crate::mem::transmute($id::splat(true))) != 0
}
}
impl Any for $id {
@@ -27,10 +24,7 @@ macro_rules! x86_m8x32_avx_impl {
use crate::arch::x86::_mm256_testz_si256;
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::_mm256_testz_si256;
- _mm256_testz_si256(
- crate::mem::transmute(self),
- crate::mem::transmute(self),
- ) == 0
+ _mm256_testz_si256(crate::mem::transmute(self), crate::mem::transmute(self)) == 0
}
}
};
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs
index eb1ef7fac..e0c9aee92 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs
@@ -16,8 +16,7 @@ macro_rules! x86_m32x4_sse_impl {
// most significant bit of each lane of `a`. If all
// bits are set, then all 4 lanes of the mask are
// true.
- _mm_movemask_ps(crate::mem::transmute(self))
- == 0b_1111_i32
+ _mm_movemask_ps(crate::mem::transmute(self)) == 0b_1111_i32
}
}
impl Any for $id {
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs
index a99c606f5..bbb52fa47 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs
@@ -16,8 +16,7 @@ macro_rules! x86_m64x2_sse2_impl {
// most significant bit of each lane of `a`. If all
// bits are set, then all 2 lanes of the mask are
// true.
- _mm_movemask_pd(crate::mem::transmute(self))
- == 0b_11_i32
+ _mm_movemask_pd(crate::mem::transmute(self)) == 0b_11_i32
}
}
impl Any for $id {
@@ -50,8 +49,7 @@ macro_rules! x86_m8x16_sse2_impl {
// most significant bit of each byte of `a`. If all
// bits are set, then all 16 lanes of the mask are
// true.
- _mm_movemask_epi8(crate::mem::transmute(self))
- == i32::from(u16::max_value())
+ _mm_movemask_epi8(crate::mem::transmute(self)) == i32::from(u16::max_value())
}
}
impl Any for $id {
diff --git a/vendor/packed_simd_2/src/codegen/shuffle.rs b/vendor/packed_simd_2/src/codegen/shuffle.rs
index d92c9ee22..d3acd48f5 100644
--- a/vendor/packed_simd_2/src/codegen/shuffle.rs
+++ b/vendor/packed_simd_2/src/codegen/shuffle.rs
@@ -2,7 +2,7 @@
//! lanes and vector element types.
use crate::masks::*;
-use crate::sealed::{Shuffle, Seal};
+use crate::sealed::{Seal, Shuffle};
macro_rules! impl_shuffle {
($array:ty, $base:ty, $out:ty) => {
@@ -10,7 +10,7 @@ macro_rules! impl_shuffle {
impl Shuffle<$array> for $base {
type Output = $out;
}
- }
+ };
}
impl_shuffle! { [u32; 2], i8, crate::codegen::i8x2 }
diff --git a/vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs b/vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs
index 8d9577b26..19d457a45 100644
--- a/vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs
+++ b/vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs
@@ -16,8 +16,7 @@ macro_rules! impl_fallback {
fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
let mut result = Self::splat(0);
for i in 0..$id::lanes() {
- result = result
- .replace(i, self.extract(indices.extract(i) as usize));
+ result = result.replace(i, self.extract(indices.extract(i) as usize));
}
result
}
@@ -31,7 +30,7 @@ macro_rules! impl_shuffle1_dyn {
if #[cfg(all(
any(
all(target_arch = "aarch64", target_feature = "neon"),
- all(target_arch = "arm", target_feature = "v7",
+ all(target_arch = "doesnotexist", target_feature = "v7",
target_feature = "neon")
),
any(feature = "core_arch", libcore_neon)
@@ -43,7 +42,7 @@ macro_rules! impl_shuffle1_dyn {
fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
#[cfg(target_arch = "aarch64")]
use crate::arch::aarch64::vtbl1_u8;
- #[cfg(target_arch = "arm")]
+ #[cfg(target_arch = "doesnotexist")]
use crate::arch::arm::vtbl1_u8;
// This is safe because the binary is compiled with
@@ -104,7 +103,7 @@ macro_rules! impl_shuffle1_dyn {
}
}
}
- } else if #[cfg(all(target_arch = "arm", target_feature = "v7",
+ } else if #[cfg(all(target_arch = "doesnotexist", target_feature = "v7",
target_feature = "neon",
any(feature = "core_arch", libcore_neon)))] {
impl Shuffle1Dyn for u8x16 {
@@ -150,16 +149,12 @@ macro_rules! impl_shuffle1_dyn {
#[inline]
fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
let indices: u8x8 = (indices * 2).cast();
- let indices: u8x16 = shuffle!(
- indices, [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7]
- );
- let v = u8x16::new(
- 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
- );
+ let indices: u8x16 = shuffle!(indices, [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7]);
+ let v = u8x16::new(0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1);
let indices = indices + v;
unsafe {
- let s: u8x16 =crate::mem::transmute(self);
- crate::mem::transmute(s.shuffle1_dyn(indices))
+ let s: u8x16 = crate::mem::transmute(self);
+ crate::mem::transmute(s.shuffle1_dyn(indices))
}
}
}
@@ -268,7 +263,9 @@ macro_rules! impl_shuffle1_dyn {
}
}
};
- ($id:ident) => { impl_fallback!($id); }
+ ($id:ident) => {
+ impl_fallback!($id);
+ };
}
impl_shuffle1_dyn!(u8x2);
diff --git a/vendor/packed_simd_2/src/codegen/swap_bytes.rs b/vendor/packed_simd_2/src/codegen/swap_bytes.rs
index b435fb5da..9cf34a3e0 100644
--- a/vendor/packed_simd_2/src/codegen/swap_bytes.rs
+++ b/vendor/packed_simd_2/src/codegen/swap_bytes.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait SwapBytes {
+pub(crate) trait SwapBytes {
fn swap_bytes(self) -> Self;
}
@@ -15,7 +15,7 @@ macro_rules! impl_swap_bytes {
impl SwapBytes for $id {
#[inline]
fn swap_bytes(self) -> Self {
- unsafe { shuffle!(self, [1, 0]) }
+ shuffle!(self, [1, 0])
}
}
)+
@@ -119,52 +119,12 @@ macro_rules! impl_swap_bytes {
impl_swap_bytes!(v16: u8x2, i8x2,);
impl_swap_bytes!(v32: u8x4, i8x4, u16x2, i16x2,);
// FIXME: 64-bit single element vector
-impl_swap_bytes!(
- v64: u8x8,
- i8x8,
- u16x4,
- i16x4,
- u32x2,
- i32x2, /* u64x1, i64x1, */
-);
+impl_swap_bytes!(v64: u8x8, i8x8, u16x4, i16x4, u32x2, i32x2 /* u64x1, i64x1, */,);
-impl_swap_bytes!(
- v128: u8x16,
- i8x16,
- u16x8,
- i16x8,
- u32x4,
- i32x4,
- u64x2,
- i64x2,
- u128x1,
- i128x1,
-);
-impl_swap_bytes!(
- v256: u8x32,
- i8x32,
- u16x16,
- i16x16,
- u32x8,
- i32x8,
- u64x4,
- i64x4,
- u128x2,
- i128x2,
-);
+impl_swap_bytes!(v128: u8x16, i8x16, u16x8, i16x8, u32x4, i32x4, u64x2, i64x2, u128x1, i128x1,);
+impl_swap_bytes!(v256: u8x32, i8x32, u16x16, i16x16, u32x8, i32x8, u64x4, i64x4, u128x2, i128x2,);
-impl_swap_bytes!(
- v512: u8x64,
- i8x64,
- u16x32,
- i16x32,
- u32x16,
- i32x16,
- u64x8,
- i64x8,
- u128x4,
- i128x4,
-);
+impl_swap_bytes!(v512: u8x64, i8x64, u16x32, i16x32, u32x16, i32x16, u64x8, i64x8, u128x4, i128x4,);
cfg_if! {
if #[cfg(target_pointer_width = "8")] {
diff --git a/vendor/packed_simd_2/src/codegen/vPtr.rs b/vendor/packed_simd_2/src/codegen/vPtr.rs
index cf4765538..abd3aa877 100644
--- a/vendor/packed_simd_2/src/codegen/vPtr.rs
+++ b/vendor/packed_simd_2/src/codegen/vPtr.rs
@@ -5,7 +5,7 @@ macro_rules! impl_simd_ptr {
| $($tys:ty),*) => {
#[derive(Copy, Clone)]
#[repr(simd)]
- pub struct $tuple_id<$ty>($(crate $tys),*);
+ pub struct $tuple_id<$ty>($(pub(crate) $tys),*);
//^^^^^^^ leaked through SimdArray
impl<$ty> crate::sealed::Seal for [$ptr_ty; $elem_count] {}
diff --git a/vendor/packed_simd_2/src/codegen/vSize.rs b/vendor/packed_simd_2/src/codegen/vSize.rs
index 3911b2134..d5db03991 100644
--- a/vendor/packed_simd_2/src/codegen/vSize.rs
+++ b/vendor/packed_simd_2/src/codegen/vSize.rs
@@ -11,33 +11,6 @@ impl_simd_array!([isize; 4]: isizex4 | isize_, isize_, isize_, isize_);
impl_simd_array!([usize; 4]: usizex4 | usize_, usize_, usize_, usize_);
impl_simd_array!([msize; 4]: msizex4 | isize_, isize_, isize_, isize_);
-impl_simd_array!(
- [isize; 8]: isizex8 | isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_
-);
-impl_simd_array!(
- [usize; 8]: usizex8 | usize_,
- usize_,
- usize_,
- usize_,
- usize_,
- usize_,
- usize_,
- usize_
-);
-impl_simd_array!(
- [msize; 8]: msizex8 | isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_
-);
+impl_simd_array!([isize; 8]: isizex8 | isize_, isize_, isize_, isize_, isize_, isize_, isize_, isize_);
+impl_simd_array!([usize; 8]: usizex8 | usize_, usize_, usize_, usize_, usize_, usize_, usize_, usize_);
+impl_simd_array!([msize; 8]: msizex8 | isize_, isize_, isize_, isize_, isize_, isize_, isize_, isize_);