summaryrefslogtreecommitdiffstats
path: root/vendor/packed_simd_2/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:13 +0000
commit218caa410aa38c29984be31a5229b9fa717560ee (patch)
treec54bd55eeb6e4c508940a30e94c0032fbd45d677 /vendor/packed_simd_2/src
parentReleasing progress-linux version 1.67.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-218caa410aa38c29984be31a5229b9fa717560ee.tar.xz
rustc-218caa410aa38c29984be31a5229b9fa717560ee.zip
Merging upstream version 1.68.2+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/packed_simd_2/src')
-rw-r--r--vendor/packed_simd_2/src/api.rs4
-rw-r--r--vendor/packed_simd_2/src/api/cast/v128.rs297
-rw-r--r--vendor/packed_simd_2/src/api/cast/v16.rs63
-rw-r--r--vendor/packed_simd_2/src/api/cast/v256.rs297
-rw-r--r--vendor/packed_simd_2/src/api/cast/v32.rs126
-rw-r--r--vendor/packed_simd_2/src/api/cast/v512.rs205
-rw-r--r--vendor/packed_simd_2/src/api/cast/v64.rs201
-rw-r--r--vendor/packed_simd_2/src/api/cmp/partial_eq.rs4
-rw-r--r--vendor/packed_simd_2/src/api/cmp/partial_ord.rs8
-rw-r--r--vendor/packed_simd_2/src/api/fmt/binary.rs4
-rw-r--r--vendor/packed_simd_2/src/api/fmt/debug.rs4
-rw-r--r--vendor/packed_simd_2/src/api/fmt/lower_hex.rs4
-rw-r--r--vendor/packed_simd_2/src/api/fmt/octal.rs4
-rw-r--r--vendor/packed_simd_2/src/api/fmt/upper_hex.rs4
-rw-r--r--vendor/packed_simd_2/src/api/into_bits.rs4
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/arch_specific.rs272
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/macros.rs2
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/v128.rs232
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/v256.rs231
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/v512.rs231
-rw-r--r--vendor/packed_simd_2/src/api/math/float/consts.rs36
-rw-r--r--vendor/packed_simd_2/src/api/ops/scalar_shifts.rs7
-rw-r--r--vendor/packed_simd_2/src/api/ops/vector_rotates.rs2
-rw-r--r--vendor/packed_simd_2/src/api/ops/vector_shifts.rs7
-rw-r--r--vendor/packed_simd_2/src/api/ptr/gather_scatter.rs21
-rw-r--r--vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs8
-rw-r--r--vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs8
-rw-r--r--vendor/packed_simd_2/src/api/reductions/min_max.rs65
-rw-r--r--vendor/packed_simd_2/src/api/select.rs4
-rw-r--r--vendor/packed_simd_2/src/api/shuffle.rs54
-rw-r--r--vendor/packed_simd_2/src/api/slice/from_slice.rs28
-rw-r--r--vendor/packed_simd_2/src/api/slice/write_to_slice.rs35
-rw-r--r--vendor/packed_simd_2/src/codegen.rs50
-rw-r--r--vendor/packed_simd_2/src/codegen/bit_manip.rs17
-rw-r--r--vendor/packed_simd_2/src/codegen/llvm.rs207
-rw-r--r--vendor/packed_simd_2/src/codegen/math.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float.rs30
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/abs.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/cos.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/exp.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/ln.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/macros.rs133
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/mul_add.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs10
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/powf.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sin.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs23
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sqrt.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/sqrte.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/math/float/tanh.rs8
-rw-r--r--vendor/packed_simd_2/src/codegen/pointer_sized_int.rs24
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask.rs6
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs38
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs26
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs4
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs70
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs10
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs3
-rw-r--r--vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs6
-rw-r--r--vendor/packed_simd_2/src/codegen/shuffle.rs4
-rw-r--r--vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs25
-rw-r--r--vendor/packed_simd_2/src/codegen/swap_bytes.rs52
-rw-r--r--vendor/packed_simd_2/src/codegen/vPtr.rs2
-rw-r--r--vendor/packed_simd_2/src/codegen/vSize.rs33
-rw-r--r--vendor/packed_simd_2/src/lib.rs60
-rw-r--r--vendor/packed_simd_2/src/masks.rs8
-rw-r--r--vendor/packed_simd_2/src/testing.rs2
-rw-r--r--vendor/packed_simd_2/src/testing/macros.rs40
-rw-r--r--vendor/packed_simd_2/src/testing/utils.rs34
72 files changed, 2428 insertions, 995 deletions
diff --git a/vendor/packed_simd_2/src/api.rs b/vendor/packed_simd_2/src/api.rs
index 953685925..262fc4ee6 100644
--- a/vendor/packed_simd_2/src/api.rs
+++ b/vendor/packed_simd_2/src/api.rs
@@ -2,7 +2,7 @@
#[macro_use]
mod bitmask;
-crate mod cast;
+pub(crate) mod cast;
#[macro_use]
mod cmp;
#[macro_use]
@@ -37,7 +37,7 @@ mod swap_bytes;
mod bit_manip;
#[cfg(feature = "into_bits")]
-crate mod into_bits;
+pub(crate) mod into_bits;
macro_rules! impl_i {
([$elem_ty:ident; $elem_n:expr]: $tuple_id:ident, $mask_ty:ident
diff --git a/vendor/packed_simd_2/src/api/cast/v128.rs b/vendor/packed_simd_2/src/api/cast/v128.rs
index ab47ddc00..2e10b97b7 100644
--- a/vendor/packed_simd_2/src/api/cast/v128.rs
+++ b/vendor/packed_simd_2/src/api/cast/v128.rs
@@ -3,74 +3,297 @@
use crate::*;
-impl_from_cast!(
- i8x16[test_v128]: u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16
-);
-impl_from_cast!(
- u8x16[test_v128]: i8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16
-);
-impl_from_cast_mask!(
- m8x16[test_v128]: i8x16, u8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16
-);
+impl_from_cast!(i8x16[test_v128]: u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
+impl_from_cast!(u8x16[test_v128]: i8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
+impl_from_cast_mask!(m8x16[test_v128]: i8x16, u8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
impl_from_cast!(
- i16x8[test_v128]: i8x8, u8x8, m8x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ i16x8[test_v128]: i8x8,
+ u8x8,
+ m8x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- u16x8[test_v128]: i8x8, u8x8, m8x8, i16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ u16x8[test_v128]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast_mask!(
- m16x8[test_v128]: i8x8, u8x8, m8x8, i16x8, u16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ m16x8[test_v128]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- i32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i32x4[test_v128]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ u32x4[test_v128]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- f32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ f32x4[test_v128]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ m32x4[test_v128]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- i64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i64x2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ u64x2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- f64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ f64x2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ m64x2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- isizex2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, usizex2, msizex2
+ isizex2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- usizex2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, msizex2
+ usizex2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ msizex2
);
impl_from_cast_mask!(
- msizex2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2
+ msizex2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2
);
// FIXME[test_v128]: 64-bit single element vectors into_cast impls
diff --git a/vendor/packed_simd_2/src/api/cast/v16.rs b/vendor/packed_simd_2/src/api/cast/v16.rs
index cf974bb08..896febacb 100644
--- a/vendor/packed_simd_2/src/api/cast/v16.rs
+++ b/vendor/packed_simd_2/src/api/cast/v16.rs
@@ -4,14 +4,65 @@
use crate::*;
impl_from_cast!(
- i8x2[test_v16]: u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i8x2[test_v16]: u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u8x2[test_v16]: i8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ u8x2[test_v16]: i8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m8x2[test_v16]: i8x2, u8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ m8x2[test_v16]: i8x2,
+ u8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
diff --git a/vendor/packed_simd_2/src/api/cast/v256.rs b/vendor/packed_simd_2/src/api/cast/v256.rs
index 9389dcb4c..fe0c835e3 100644
--- a/vendor/packed_simd_2/src/api/cast/v256.rs
+++ b/vendor/packed_simd_2/src/api/cast/v256.rs
@@ -7,75 +7,292 @@ impl_from_cast!(i8x32[test_v256]: u8x32, m8x32, i16x32, u16x32, m16x32);
impl_from_cast!(u8x32[test_v256]: i8x32, m8x32, i16x32, u16x32, m16x32);
impl_from_cast_mask!(m8x32[test_v256]: i8x32, u8x32, i16x32, u16x32, m16x32);
-impl_from_cast!(
- i16x16[test_v256]: i8x16, u8x16, m8x16, u16x16, m16x16,
- i32x16, u32x16, f32x16, m32x16
-);
-impl_from_cast!(
- u16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, m16x16,
- i32x16, u32x16, f32x16, m32x16
-);
-impl_from_cast_mask!(
- m16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, u16x16,
- i32x16, u32x16, f32x16, m32x16
-);
+impl_from_cast!(i16x16[test_v256]: i8x16, u8x16, m8x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
+impl_from_cast!(u16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
+impl_from_cast_mask!(m16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, u16x16, i32x16, u32x16, f32x16, m32x16);
impl_from_cast!(
- i32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ i32x8[test_v256]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- u32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ u32x8[test_v256]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- f32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ f32x8[test_v256]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast_mask!(
- m32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ m32x8[test_v256]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- i64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i64x4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ u64x4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- f64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ f64x4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ m64x4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- i128x2[test_v256]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i128x2[test_v256]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u128x2[test_v256]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, m128x2, isizex2, usizex2, msizex2
+ u128x2[test_v256]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m128x2[test_v256]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, m64x2, f64x2, i128x2, u128x2, isizex2, usizex2, msizex2
+ m128x2[test_v256]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ m64x2,
+ f64x2,
+ i128x2,
+ u128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- isizex4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, usizex4, msizex4
+ isizex4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- usizex4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, msizex4
+ usizex4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ msizex4
);
impl_from_cast_mask!(
- msizex4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4
+ msizex4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4
);
diff --git a/vendor/packed_simd_2/src/api/cast/v32.rs b/vendor/packed_simd_2/src/api/cast/v32.rs
index 2b254ba0c..4ad1cbf74 100644
--- a/vendor/packed_simd_2/src/api/cast/v32.rs
+++ b/vendor/packed_simd_2/src/api/cast/v32.rs
@@ -4,27 +4,129 @@
use crate::*;
impl_from_cast!(
- i8x4[test_v32]: u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i8x4[test_v32]: u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u8x4[test_v32]: i8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ u8x4[test_v32]: i8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m8x4[test_v32]: i8x4, u8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ m8x4[test_v32]: i8x4,
+ u8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- i16x2[test_v32]: i8x2, u8x2, m8x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i16x2[test_v32]: i8x2,
+ u8x2,
+ m8x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u16x2[test_v32]: i8x2, u8x2, m8x2, i16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ u16x2[test_v32]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m16x2[test_v32]: i8x2, u8x2, m8x2, i16x2, u16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ m16x2[test_v32]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
diff --git a/vendor/packed_simd_2/src/api/cast/v512.rs b/vendor/packed_simd_2/src/api/cast/v512.rs
index 5a10ab066..b64605045 100644
--- a/vendor/packed_simd_2/src/api/cast/v512.rs
+++ b/vendor/packed_simd_2/src/api/cast/v512.rs
@@ -11,58 +11,199 @@ impl_from_cast!(i16x32[test_v512]: i8x32, u8x32, m8x32, u16x32, m16x32);
impl_from_cast!(u16x32[test_v512]: i8x32, u8x32, m8x32, i16x32, m16x32);
impl_from_cast_mask!(m16x32[test_v512]: i8x32, u8x32, m8x32, i16x32, u16x32);
-impl_from_cast!(
- i32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, u32x16, f32x16, m32x16
-);
-impl_from_cast!(
- u32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, f32x16, m32x16
-);
-impl_from_cast!(
- f32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, m32x16
-);
-impl_from_cast_mask!(
- m32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16
-);
+impl_from_cast!(i32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, u32x16, f32x16, m32x16);
+impl_from_cast!(u32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, f32x16, m32x16);
+impl_from_cast!(f32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, m32x16);
+impl_from_cast_mask!(m32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16);
impl_from_cast!(
- i64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ i64x8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- u64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ u64x8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- f64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, m64x8, isizex8, usizex8, msizex8
+ f64x8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast_mask!(
- m64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, isizex8, usizex8, msizex8
+ m64x8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- i128x4[test_v512]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i128x4[test_v512]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u128x4[test_v512]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, m128x4, isizex4, usizex4, msizex4
+ u128x4[test_v512]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m128x4[test_v512]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, m64x4, f64x4, i128x4, u128x4, isizex4, usizex4, msizex4
+ m128x4[test_v512]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ m64x4,
+ f64x4,
+ i128x4,
+ u128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- isizex8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, usizex8, msizex8
+ isizex8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- usizex8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, msizex8
+ usizex8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ msizex8
);
impl_from_cast_mask!(
- msizex8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8
+ msizex8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8
);
diff --git a/vendor/packed_simd_2/src/api/cast/v64.rs b/vendor/packed_simd_2/src/api/cast/v64.rs
index 192a4638a..b23d1a491 100644
--- a/vendor/packed_simd_2/src/api/cast/v64.rs
+++ b/vendor/packed_simd_2/src/api/cast/v64.rs
@@ -4,44 +4,205 @@
use crate::*;
impl_from_cast!(
- i8x8[test_v64]: u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ i8x8[test_v64]: u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- u8x8[test_v64]: i8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ u8x8[test_v64]: i8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast_mask!(
- m8x8[test_v64]: i8x8, u8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ m8x8[test_v64]: i8x8,
+ u8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- i16x4[test_v64]: i8x4, u8x4, m8x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i16x4[test_v64]: i8x4,
+ u8x4,
+ m8x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u16x4[test_v64]: i8x4, u8x4, m8x4, i16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ u16x4[test_v64]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m16x4[test_v64]: i8x4, u8x4, m8x4, i16x4, u16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ m16x4[test_v64]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- i32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i32x2[test_v64]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ u32x2[test_v64]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- f32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ f32x2[test_v64]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ m32x2[test_v64]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
diff --git a/vendor/packed_simd_2/src/api/cmp/partial_eq.rs b/vendor/packed_simd_2/src/api/cmp/partial_eq.rs
index 1712a0de5..d69dd4742 100644
--- a/vendor/packed_simd_2/src/api/cmp/partial_eq.rs
+++ b/vendor/packed_simd_2/src/api/cmp/partial_eq.rs
@@ -21,9 +21,7 @@ macro_rules! impl_cmp_partial_eq {
// FIXME: https://github.com/rust-lang-nursery/rust-clippy/issues/2892
#[allow(clippy::partialeq_ne_impl)]
- impl crate::cmp::PartialEq<LexicographicallyOrdered<$id>>
- for LexicographicallyOrdered<$id>
- {
+ impl crate::cmp::PartialEq<LexicographicallyOrdered<$id>> for LexicographicallyOrdered<$id> {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
diff --git a/vendor/packed_simd_2/src/api/cmp/partial_ord.rs b/vendor/packed_simd_2/src/api/cmp/partial_ord.rs
index a2292918b..76ed9ebe4 100644
--- a/vendor/packed_simd_2/src/api/cmp/partial_ord.rs
+++ b/vendor/packed_simd_2/src/api/cmp/partial_ord.rs
@@ -12,13 +12,9 @@ macro_rules! impl_cmp_partial_ord {
}
}
- impl crate::cmp::PartialOrd<LexicographicallyOrdered<$id>>
- for LexicographicallyOrdered<$id>
- {
+ impl crate::cmp::PartialOrd<LexicographicallyOrdered<$id>> for LexicographicallyOrdered<$id> {
#[inline]
- fn partial_cmp(
- &self, other: &Self,
- ) -> Option<crate::cmp::Ordering> {
+ fn partial_cmp(&self, other: &Self) -> Option<crate::cmp::Ordering> {
if PartialEq::eq(self, other) {
Some(crate::cmp::Ordering::Equal)
} else if PartialOrd::lt(self, other) {
diff --git a/vendor/packed_simd_2/src/api/fmt/binary.rs b/vendor/packed_simd_2/src/api/fmt/binary.rs
index b60769082..91c082555 100644
--- a/vendor/packed_simd_2/src/api/fmt/binary.rs
+++ b/vendor/packed_simd_2/src/api/fmt/binary.rs
@@ -4,9 +4,7 @@ macro_rules! impl_fmt_binary {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::Binary for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/fmt/debug.rs b/vendor/packed_simd_2/src/api/fmt/debug.rs
index ad0b8a59a..1e209b3bf 100644
--- a/vendor/packed_simd_2/src/api/fmt/debug.rs
+++ b/vendor/packed_simd_2/src/api/fmt/debug.rs
@@ -44,9 +44,7 @@ macro_rules! impl_fmt_debug {
([$elem_ty:ty; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::Debug for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/fmt/lower_hex.rs b/vendor/packed_simd_2/src/api/fmt/lower_hex.rs
index 5a7aa14b5..8f11d3119 100644
--- a/vendor/packed_simd_2/src/api/fmt/lower_hex.rs
+++ b/vendor/packed_simd_2/src/api/fmt/lower_hex.rs
@@ -4,9 +4,7 @@ macro_rules! impl_fmt_lower_hex {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::LowerHex for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/fmt/octal.rs b/vendor/packed_simd_2/src/api/fmt/octal.rs
index 83ac8abc7..e708e094c 100644
--- a/vendor/packed_simd_2/src/api/fmt/octal.rs
+++ b/vendor/packed_simd_2/src/api/fmt/octal.rs
@@ -4,9 +4,7 @@ macro_rules! impl_fmt_octal {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::Octal for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/fmt/upper_hex.rs b/vendor/packed_simd_2/src/api/fmt/upper_hex.rs
index aa88f673a..5ad455706 100644
--- a/vendor/packed_simd_2/src/api/fmt/upper_hex.rs
+++ b/vendor/packed_simd_2/src/api/fmt/upper_hex.rs
@@ -4,9 +4,7 @@ macro_rules! impl_fmt_upper_hex {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::UpperHex for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/into_bits.rs b/vendor/packed_simd_2/src/api/into_bits.rs
index f2cc1bae5..32b6d2ddc 100644
--- a/vendor/packed_simd_2/src/api/into_bits.rs
+++ b/vendor/packed_simd_2/src/api/into_bits.rs
@@ -19,9 +19,7 @@ where
{
#[inline]
fn into_bits(self) -> U {
- debug_assert!(
- crate::mem::size_of::<Self>() == crate::mem::size_of::<U>()
- );
+ debug_assert!(crate::mem::size_of::<Self>() == crate::mem::size_of::<U>());
U::from_bits(self)
}
}
diff --git a/vendor/packed_simd_2/src/api/into_bits/arch_specific.rs b/vendor/packed_simd_2/src/api/into_bits/arch_specific.rs
index fee614005..bfac91557 100644
--- a/vendor/packed_simd_2/src/api/into_bits/arch_specific.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/arch_specific.rs
@@ -84,15 +84,48 @@ macro_rules! impl_arch {
// FIXME: 64-bit single element types
// FIXME: arm/aarch float16x4_t missing
impl_arch!(
- [arm["arm"]: int8x8_t, uint8x8_t, poly8x8_t, int16x4_t, uint16x4_t,
- poly16x4_t, int32x2_t, uint32x2_t, float32x2_t, int64x1_t,
- uint64x1_t],
- [aarch64["aarch64"]: int8x8_t, uint8x8_t, poly8x8_t, int16x4_t, uint16x4_t,
- poly16x4_t, int32x2_t, uint32x2_t, float32x2_t, int64x1_t, uint64x1_t,
- float64x1_t] |
- from: i8x8, u8x8, m8x8, i16x4, u16x4, m16x4, i32x2, u32x2, f32x2, m32x2 |
- into: i8x8, u8x8, i16x4, u16x4, i32x2, u32x2, f32x2 |
- test: test_v64
+ [
+ arm["arm"]: int8x8_t,
+ uint8x8_t,
+ poly8x8_t,
+ int16x4_t,
+ uint16x4_t,
+ poly16x4_t,
+ int32x2_t,
+ uint32x2_t,
+ float32x2_t,
+ int64x1_t,
+ uint64x1_t
+ ],
+ [
+ aarch64["aarch64"]: int8x8_t,
+ uint8x8_t,
+ poly8x8_t,
+ int16x4_t,
+ uint16x4_t,
+ poly16x4_t,
+ int32x2_t,
+ uint32x2_t,
+ float32x2_t,
+ int64x1_t,
+ uint64x1_t,
+ float64x1_t
+ ] | from: i8x8,
+ u8x8,
+ m8x8,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2 | into: i8x8,
+ u8x8,
+ i16x4,
+ u16x4,
+ i32x2,
+ u32x2,
+ f32x2 | test: test_v64
);
////////////////////////////////////////////////////////////////////////////////
@@ -108,67 +141,169 @@ impl_arch!(
// FIXME: ppc64 vector_unsigned___int128 missing
impl_arch!(
[x86["x86"]: __m128, __m128i, __m128d],
- [x86_64["x86_64"]: __m128, __m128i, __m128d],
- [arm["arm"]: int8x16_t, uint8x16_t, poly8x16_t, int16x8_t, uint16x8_t,
- poly16x8_t, int32x4_t, uint32x4_t, float32x4_t, int64x2_t, uint64x2_t],
- [aarch64["aarch64"]: int8x16_t, uint8x16_t, poly8x16_t, int16x8_t,
- uint16x8_t, poly16x8_t, int32x4_t, uint32x4_t, float32x4_t, int64x2_t,
- uint64x2_t, float64x2_t],
- [powerpc["powerpc"]: vector_signed_char, vector_unsigned_char,
- vector_signed_short, vector_unsigned_short, vector_signed_int,
- vector_unsigned_int, vector_float],
- [powerpc64["powerpc64"]: vector_signed_char, vector_unsigned_char,
- vector_signed_short, vector_unsigned_short, vector_signed_int,
- vector_unsigned_int, vector_float, vector_signed_long,
- vector_unsigned_long, vector_double] |
- from: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4,
- i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4, i64x2, u64x2, f64x2,
- i128x1, u128x1 |
- test: test_v128
+ [x86_64["x86_64"]: __m128, __m128i, __m128d],
+ [
+ arm["arm"]: int8x16_t,
+ uint8x16_t,
+ poly8x16_t,
+ int16x8_t,
+ uint16x8_t,
+ poly16x8_t,
+ int32x4_t,
+ uint32x4_t,
+ float32x4_t,
+ int64x2_t,
+ uint64x2_t
+ ],
+ [
+ aarch64["aarch64"]: int8x16_t,
+ uint8x16_t,
+ poly8x16_t,
+ int16x8_t,
+ uint16x8_t,
+ poly16x8_t,
+ int32x4_t,
+ uint32x4_t,
+ float32x4_t,
+ int64x2_t,
+ uint64x2_t,
+ float64x2_t
+ ],
+ [
+ powerpc["powerpc"]: vector_signed_char,
+ vector_unsigned_char,
+ vector_signed_short,
+ vector_unsigned_short,
+ vector_signed_int,
+ vector_unsigned_int,
+ vector_float
+ ],
+ [
+ powerpc64["powerpc64"]: vector_signed_char,
+ vector_unsigned_char,
+ vector_signed_short,
+ vector_unsigned_short,
+ vector_signed_int,
+ vector_unsigned_int,
+ vector_float,
+ vector_signed_long,
+ vector_unsigned_long,
+ vector_double
+ ] | from: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1 | test: test_v128
);
impl_arch!(
[powerpc["powerpc"]: vector_bool_char],
- [powerpc64["powerpc64"]: vector_bool_char] |
- from: m8x16, m16x8, m32x4, m64x2, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
- i64x2, u64x2, f64x2, i128x1, u128x1,
+ [powerpc64["powerpc64"]: vector_bool_char] | from: m8x16,
+ m16x8,
+ m32x4,
+ m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
// Masks:
- m8x16 |
- test: test_v128
+ m8x16 | test: test_v128
);
impl_arch!(
[powerpc["powerpc"]: vector_bool_short],
- [powerpc64["powerpc64"]: vector_bool_short] |
- from: m16x8, m32x4, m64x2, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
- i64x2, u64x2, f64x2, i128x1, u128x1,
+ [powerpc64["powerpc64"]: vector_bool_short] | from: m16x8,
+ m32x4,
+ m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
// Masks:
- m8x16, m16x8 |
- test: test_v128
+ m8x16,
+ m16x8 | test: test_v128
);
impl_arch!(
[powerpc["powerpc"]: vector_bool_int],
- [powerpc64["powerpc64"]: vector_bool_int] |
- from: m32x4, m64x2, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
- i64x2, u64x2, f64x2, i128x1, u128x1,
+ [powerpc64["powerpc64"]: vector_bool_int] | from: m32x4,
+ m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
// Masks:
- m8x16, m16x8, m32x4 |
- test: test_v128
+ m8x16,
+ m16x8,
+ m32x4 | test: test_v128
);
impl_arch!(
- [powerpc64["powerpc64"]: vector_bool_long] |
- from: m64x2, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
- i64x2, u64x2, f64x2, i128x1, u128x1,
+ [powerpc64["powerpc64"]: vector_bool_long] | from: m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
// Masks:
- m8x16, m16x8, m32x4, m64x2 |
- test: test_v128
+ m8x16,
+ m16x8,
+ m32x4,
+ m64x2 | test: test_v128
);
////////////////////////////////////////////////////////////////////////////////
@@ -176,13 +311,34 @@ impl_arch!(
impl_arch!(
[x86["x86"]: __m256, __m256i, __m256d],
- [x86_64["x86_64"]: __m256, __m256i, __m256d] |
- from: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16,
- i32x8, u32x8, f32x8, m32x8,
- i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2 |
- into: i8x32, u8x32, i16x16, u16x16, i32x8, u32x8, f32x8,
- i64x4, u64x4, f64x4, i128x2, u128x2 |
- test: test_v256
+ [x86_64["x86_64"]: __m256, __m256i, __m256d] | from: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2 | into: i8x32,
+ u8x32,
+ i16x16,
+ u16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ i128x2,
+ u128x2 | test: test_v256
);
////////////////////////////////////////////////////////////////////////////////
diff --git a/vendor/packed_simd_2/src/api/into_bits/macros.rs b/vendor/packed_simd_2/src/api/into_bits/macros.rs
index 8cec5b004..265ab34ae 100644
--- a/vendor/packed_simd_2/src/api/into_bits/macros.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/macros.rs
@@ -24,7 +24,7 @@ macro_rules! impl_from_bits_ {
use crate::IntoBits;
assert_eq!(size_of::<$id>(),
size_of::<$from_ty>());
- // This is safe becasue we never create a reference to
+ // This is safe because we never create a reference to
// uninitialized memory:
let a: $from_ty = unsafe { zeroed() };
diff --git a/vendor/packed_simd_2/src/api/into_bits/v128.rs b/vendor/packed_simd_2/src/api/into_bits/v128.rs
index e32cd7f9f..639c09c2c 100644
--- a/vendor/packed_simd_2/src/api/into_bits/v128.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/v128.rs
@@ -4,25 +4,229 @@
#[allow(unused)] // wasm_bindgen_test
use crate::*;
-impl_from_bits!(i8x16[test_v128]: u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(u8x16[test_v128]: i8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(
+ i8x16[test_v128]: u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u8x16[test_v128]: i8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
impl_from_bits!(m8x16[test_v128]: m16x8, m32x4, m64x2, m128x1);
-impl_from_bits!(i16x8[test_v128]: i8x16, u8x16, m8x16, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(u16x8[test_v128]: i8x16, u8x16, m8x16, i16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(
+ i16x8[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u16x8[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
impl_from_bits!(m16x8[test_v128]: m32x4, m64x2, m128x1);
-impl_from_bits!(i32x4[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(u32x4[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(f32x4[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(
+ i32x4[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u32x4[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ f32x4[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
impl_from_bits!(m32x4[test_v128]: m64x2, m128x1);
-impl_from_bits!(i64x2[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(u64x2[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(f64x2[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(
+ i64x2[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u64x2[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ f64x2[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
impl_from_bits!(m64x2[test_v128]: m128x1);
-impl_from_bits!(i128x1[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, u128x1, m128x1);
-impl_from_bits!(u128x1[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, m128x1);
-// note: m128x1 cannot be constructed from all the other masks bit patterns in here
-
+impl_from_bits!(
+ i128x1[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u128x1[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ m128x1
+);
+// note: m128x1 cannot be constructed from all the other masks bit patterns in
+// here
diff --git a/vendor/packed_simd_2/src/api/into_bits/v256.rs b/vendor/packed_simd_2/src/api/into_bits/v256.rs
index c4c373e0d..e432bbbc9 100644
--- a/vendor/packed_simd_2/src/api/into_bits/v256.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/v256.rs
@@ -4,24 +4,229 @@
#[allow(unused)] // wasm_bindgen_test
use crate::*;
-impl_from_bits!(i8x32[test_v256]: u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(u8x32[test_v256]: i8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(
+ i8x32[test_v256]: u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u8x32[test_v256]: i8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
impl_from_bits!(m8x32[test_v256]: m16x16, m32x8, m64x4, m128x2);
-impl_from_bits!(i16x16[test_v256]: i8x32, u8x32, m8x32, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(u16x16[test_v256]: i8x32, u8x32, m8x32, i16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(
+ i16x16[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u16x16[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
impl_from_bits!(m16x16[test_v256]: m32x8, m64x4, m128x2);
-impl_from_bits!(i32x8[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(u32x8[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(f32x8[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(
+ i32x8[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u32x8[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ f32x8[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
impl_from_bits!(m32x8[test_v256]: m64x4, m128x2);
-impl_from_bits!(i64x4[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(u64x4[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(f64x4[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(
+ i64x4[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u64x4[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ f64x4[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
impl_from_bits!(m64x4[test_v256]: m128x2);
-impl_from_bits!(i128x2[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, u128x2, m128x2);
-impl_from_bits!(u128x2[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, m128x2);
-// note: m128x2 cannot be constructed from all the other masks bit patterns in here
+impl_from_bits!(
+ i128x2[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u128x2[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ m128x2
+);
+// note: m128x2 cannot be constructed from all the other masks bit patterns in
+// here
diff --git a/vendor/packed_simd_2/src/api/into_bits/v512.rs b/vendor/packed_simd_2/src/api/into_bits/v512.rs
index 4a771962c..f6e9bb8bf 100644
--- a/vendor/packed_simd_2/src/api/into_bits/v512.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/v512.rs
@@ -4,24 +4,229 @@
#[allow(unused)] // wasm_bindgen_test
use crate::*;
-impl_from_bits!(i8x64[test_v512]: u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(u8x64[test_v512]: i8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(
+ i8x64[test_v512]: u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u8x64[test_v512]: i8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
impl_from_bits!(m8x64[test_v512]: m16x32, m32x16, m64x8, m128x4);
-impl_from_bits!(i16x32[test_v512]: i8x64, u8x64, m8x64, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(u16x32[test_v512]: i8x64, u8x64, m8x64, i16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(
+ i16x32[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u16x32[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
impl_from_bits!(m16x32[test_v512]: m32x16, m64x8, m128x4);
-impl_from_bits!(i32x16[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(u32x16[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(f32x16[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(
+ i32x16[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u32x16[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ f32x16[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
impl_from_bits!(m32x16[test_v512]: m64x8, m128x4);
-impl_from_bits!(i64x8[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(u64x8[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(f64x8[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(
+ i64x8[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u64x8[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ f64x8[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
impl_from_bits!(m64x8[test_v512]: m128x4);
-impl_from_bits!(i128x4[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, u128x4, m128x4);
-impl_from_bits!(u128x4[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, m128x4);
-// note: m128x4 cannot be constructed from all the other masks bit patterns in here
+impl_from_bits!(
+ i128x4[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u128x4[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ m128x4
+);
+// note: m128x4 cannot be constructed from all the other masks bit patterns in
+// here
diff --git a/vendor/packed_simd_2/src/api/math/float/consts.rs b/vendor/packed_simd_2/src/api/math/float/consts.rs
index 89f93a6d6..7f41acbf1 100644
--- a/vendor/packed_simd_2/src/api/math/float/consts.rs
+++ b/vendor/packed_simd_2/src/api/math/float/consts.rs
@@ -8,8 +8,7 @@ macro_rules! impl_float_consts {
pub const MIN: $id = $id::splat(core::$elem_ty::MIN);
/// Smallest positive normal value.
- pub const MIN_POSITIVE: $id =
- $id::splat(core::$elem_ty::MIN_POSITIVE);
+ pub const MIN_POSITIVE: $id = $id::splat(core::$elem_ty::MIN_POSITIVE);
/// Largest finite value.
pub const MAX: $id = $id::splat(core::$elem_ty::MAX);
@@ -21,50 +20,40 @@ macro_rules! impl_float_consts {
pub const INFINITY: $id = $id::splat(core::$elem_ty::INFINITY);
/// Negative infinity (-∞).
- pub const NEG_INFINITY: $id =
- $id::splat(core::$elem_ty::NEG_INFINITY);
+ pub const NEG_INFINITY: $id = $id::splat(core::$elem_ty::NEG_INFINITY);
/// Archimedes' constant (Ï€)
pub const PI: $id = $id::splat(core::$elem_ty::consts::PI);
/// π/2
- pub const FRAC_PI_2: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_2);
+ pub const FRAC_PI_2: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_2);
/// π/3
- pub const FRAC_PI_3: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_3);
+ pub const FRAC_PI_3: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_3);
/// π/4
- pub const FRAC_PI_4: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_4);
+ pub const FRAC_PI_4: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_4);
/// π/6
- pub const FRAC_PI_6: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_6);
+ pub const FRAC_PI_6: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_6);
/// π/8
- pub const FRAC_PI_8: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_8);
+ pub const FRAC_PI_8: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_8);
/// 1/Ï€
- pub const FRAC_1_PI: $id =
- $id::splat(core::$elem_ty::consts::FRAC_1_PI);
+ pub const FRAC_1_PI: $id = $id::splat(core::$elem_ty::consts::FRAC_1_PI);
/// 2/Ï€
- pub const FRAC_2_PI: $id =
- $id::splat(core::$elem_ty::consts::FRAC_2_PI);
+ pub const FRAC_2_PI: $id = $id::splat(core::$elem_ty::consts::FRAC_2_PI);
/// 2/sqrt(Ï€)
- pub const FRAC_2_SQRT_PI: $id =
- $id::splat(core::$elem_ty::consts::FRAC_2_SQRT_PI);
+ pub const FRAC_2_SQRT_PI: $id = $id::splat(core::$elem_ty::consts::FRAC_2_SQRT_PI);
/// sqrt(2)
pub const SQRT_2: $id = $id::splat(core::$elem_ty::consts::SQRT_2);
/// 1/sqrt(2)
- pub const FRAC_1_SQRT_2: $id =
- $id::splat(core::$elem_ty::consts::FRAC_1_SQRT_2);
+ pub const FRAC_1_SQRT_2: $id = $id::splat(core::$elem_ty::consts::FRAC_1_SQRT_2);
/// Euler's number (e)
pub const E: $id = $id::splat(core::$elem_ty::consts::E);
@@ -73,8 +62,7 @@ macro_rules! impl_float_consts {
pub const LOG2_E: $id = $id::splat(core::$elem_ty::consts::LOG2_E);
/// log<sub>10</sub>(e)
- pub const LOG10_E: $id =
- $id::splat(core::$elem_ty::consts::LOG10_E);
+ pub const LOG10_E: $id = $id::splat(core::$elem_ty::consts::LOG10_E);
/// ln(2)
pub const LN_2: $id = $id::splat(core::$elem_ty::consts::LN_2);
diff --git a/vendor/packed_simd_2/src/api/ops/scalar_shifts.rs b/vendor/packed_simd_2/src/api/ops/scalar_shifts.rs
index 9c164ad56..4a7a09626 100644
--- a/vendor/packed_simd_2/src/api/ops/scalar_shifts.rs
+++ b/vendor/packed_simd_2/src/api/ops/scalar_shifts.rs
@@ -36,11 +36,10 @@ macro_rules! impl_ops_scalar_shifts {
use super::*;
#[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
#[cfg_attr(any(target_arch = "s390x", target_arch = "sparc64"),
- allow(unreachable_code,
- unused_variables,
- unused_mut)
+ allow(unreachable_code, unused_variables)
)]
- // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
+ #[cfg(not(target_arch = "aarch64"))]
+ //~^ FIXME: https://github.com/rust-lang/packed_simd/issues/317
fn ops_scalar_shifts() {
let z = $id::splat(0 as $elem_ty);
let o = $id::splat(1 as $elem_ty);
diff --git a/vendor/packed_simd_2/src/api/ops/vector_rotates.rs b/vendor/packed_simd_2/src/api/ops/vector_rotates.rs
index 6c794ecf4..147fc2e37 100644
--- a/vendor/packed_simd_2/src/api/ops/vector_rotates.rs
+++ b/vendor/packed_simd_2/src/api/ops/vector_rotates.rs
@@ -47,6 +47,8 @@ macro_rules! impl_ops_vector_rotates {
pub mod [<$id _ops_vector_rotate>] {
use super::*;
#[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+ #[cfg(not(target_arch = "aarch64"))]
+ //~^ FIXME: https://github.com/rust-lang/packed_simd/issues/317
fn rotate_ops() {
let z = $id::splat(0 as $elem_ty);
let o = $id::splat(1 as $elem_ty);
diff --git a/vendor/packed_simd_2/src/api/ops/vector_shifts.rs b/vendor/packed_simd_2/src/api/ops/vector_shifts.rs
index 22e1fbc0e..8bb5ac2fc 100644
--- a/vendor/packed_simd_2/src/api/ops/vector_shifts.rs
+++ b/vendor/packed_simd_2/src/api/ops/vector_shifts.rs
@@ -37,11 +37,10 @@ macro_rules! impl_ops_vector_shifts {
use super::*;
#[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
#[cfg_attr(any(target_arch = "s390x", target_arch = "sparc64"),
- allow(unreachable_code,
- unused_variables,
- unused_mut)
+ allow(unreachable_code, unused_variables)
)]
- // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
+ #[cfg(not(target_arch = "aarch64"))]
+ //~^ FIXME: https://github.com/rust-lang/packed_simd/issues/317
fn ops_vector_shifts() {
let z = $id::splat(0 as $elem_ty);
let o = $id::splat(1 as $elem_ty);
diff --git a/vendor/packed_simd_2/src/api/ptr/gather_scatter.rs b/vendor/packed_simd_2/src/api/ptr/gather_scatter.rs
index 430435620..374482ac3 100644
--- a/vendor/packed_simd_2/src/api/ptr/gather_scatter.rs
+++ b/vendor/packed_simd_2/src/api/ptr/gather_scatter.rs
@@ -22,7 +22,8 @@ macro_rules! impl_ptr_read {
/// pointers must be aligned to `mem::align_of::<T>()`.
#[inline]
pub unsafe fn read<M>(
- self, mask: Simd<[M; $elem_count]>,
+ self,
+ mask: Simd<[M; $elem_count]>,
value: Simd<[T; $elem_count]>,
) -> Simd<[T; $elem_count]>
where
@@ -128,10 +129,8 @@ macro_rules! impl_ptr_write {
/// This method is unsafe because it dereferences raw pointers. The
/// pointers must be aligned to `mem::align_of::<T>()`.
#[inline]
- pub unsafe fn write<M>(
- self, mask: Simd<[M; $elem_count]>,
- value: Simd<[T; $elem_count]>,
- ) where
+ pub unsafe fn write<M>(self, mask: Simd<[M; $elem_count]>, value: Simd<[T; $elem_count]>)
+ where
M: sealed::Mask,
[M; $elem_count]: sealed::SimdArray,
{
@@ -147,8 +146,8 @@ macro_rules! impl_ptr_write {
use super::*;
#[test]
fn write() {
- // fourty_two = [42, 42, 42, ...]
- let fourty_two
+ // forty_two = [42, 42, 42, ...]
+ let forty_two
= Simd::<[i32; $elem_count]>::splat(42_i32);
// This test will write to this array
@@ -166,11 +165,11 @@ macro_rules! impl_ptr_write {
}
// ptr = [&arr[0], &arr[1], ...]
- // write `fourty_two` to all elements of `v`
+ // write `forty_two` to all elements of `v`
{
let backup = arr;
unsafe {
- ptr.write($mask_ty::splat(true), fourty_two)
+ ptr.write($mask_ty::splat(true), forty_two)
};
assert_eq!(arr, [42_i32; $elem_count]);
arr = backup; // arr = [0, 1, 2, ...]
@@ -196,7 +195,7 @@ macro_rules! impl_ptr_write {
}
let backup = arr;
- unsafe { ptr.write(mask, fourty_two) };
+ unsafe { ptr.write(mask, forty_two) };
assert_eq!(arr, r);
arr = backup; // arr = [0, 1, 2, 3, ...]
}
@@ -205,7 +204,7 @@ macro_rules! impl_ptr_write {
{
let backup = arr;
unsafe {
- ptr.write($mask_ty::splat(false), fourty_two)
+ ptr.write($mask_ty::splat(false), forty_two)
};
assert_eq!(arr, backup);
}
diff --git a/vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs b/vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs
index 4a47452e5..9dc8783db 100644
--- a/vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs
+++ b/vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs
@@ -144,8 +144,6 @@ macro_rules! impl_reduction_float_arithmetic {
#[cfg_attr(not(target_arch = "wasm32"), test)]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
#[allow(unreachable_code)]
- #[allow(unused_mut)]
- // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
fn sum_nan() {
// FIXME: https://bugs.llvm.org/show_bug.cgi?id=36732
// https://github.com/rust-lang-nursery/packed_simd/issues/6
@@ -175,8 +173,6 @@ macro_rules! impl_reduction_float_arithmetic {
#[cfg_attr(not(target_arch = "wasm32"), test)]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
#[allow(unreachable_code)]
- #[allow(unused_mut)]
- // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
fn product_nan() {
// FIXME: https://bugs.llvm.org/show_bug.cgi?id=36732
// https://github.com/rust-lang-nursery/packed_simd/issues/6
@@ -247,7 +243,7 @@ macro_rules! impl_reduction_float_arithmetic {
tree_bits - red_bits
} < 2,
"vector: {:?} | simd_reduction: {:?} | \
- tree_reduction: {} | scalar_reduction: {}",
+tree_reduction: {} | scalar_reduction: {}",
v,
simd_reduction,
tree_reduction,
@@ -303,7 +299,7 @@ macro_rules! impl_reduction_float_arithmetic {
tree_bits - red_bits
} < ulp_limit.try_into().unwrap(),
"vector: {:?} | simd_reduction: {:?} | \
- tree_reduction: {} | scalar_reduction: {}",
+tree_reduction: {} | scalar_reduction: {}",
v,
simd_reduction,
tree_reduction,
diff --git a/vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs b/vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs
index 91dffad31..e99e6cb5d 100644
--- a/vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs
+++ b/vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs
@@ -18,9 +18,7 @@ macro_rules! impl_reduction_integer_arithmetic {
#[cfg(not(target_arch = "aarch64"))]
{
use crate::llvm::simd_reduce_add_ordered;
- let v: $ielem_ty = unsafe {
- simd_reduce_add_ordered(self.0, 0 as $ielem_ty)
- };
+ let v: $ielem_ty = unsafe { simd_reduce_add_ordered(self.0, 0 as $ielem_ty) };
v as $elem_ty
}
#[cfg(target_arch = "aarch64")]
@@ -49,9 +47,7 @@ macro_rules! impl_reduction_integer_arithmetic {
#[cfg(not(target_arch = "aarch64"))]
{
use crate::llvm::simd_reduce_mul_ordered;
- let v: $ielem_ty = unsafe {
- simd_reduce_mul_ordered(self.0, 1 as $ielem_ty)
- };
+ let v: $ielem_ty = unsafe { simd_reduce_mul_ordered(self.0, 1 as $ielem_ty) };
v as $elem_ty
}
#[cfg(target_arch = "aarch64")]
diff --git a/vendor/packed_simd_2/src/api/reductions/min_max.rs b/vendor/packed_simd_2/src/api/reductions/min_max.rs
index c4c1400a8..a3ce13a45 100644
--- a/vendor/packed_simd_2/src/api/reductions/min_max.rs
+++ b/vendor/packed_simd_2/src/api/reductions/min_max.rs
@@ -123,7 +123,7 @@ macro_rules! impl_reduction_min_max {
macro_rules! test_reduction_float_min_max {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
- test_if!{
+ test_if! {
$test_tt:
paste::item! {
// Comparisons use integer casts within mantissa^1 range.
@@ -160,20 +160,7 @@ macro_rules! test_reduction_float_min_max {
// targets:
if i == $id::lanes() - 1 &&
target_with_broken_last_lane_nan {
- // FIXME:
- // https://github.com/rust-lang-nursery/packed_simd/issues/5
- //
- // If there is a NaN, the result should always
- // the smallest element, but currently when the
- // last element is NaN the current
- // implementation incorrectly returns NaN.
- //
- // The targets mentioned above use different
- // codegen that produces the correct result.
- //
- // These asserts detect if this behavior changes
- assert!(v.min_element().is_nan(),
- // FIXME: ^^^ should be -3.
+ assert_eq!(v.min_element(), -3.,
"[A]: nan at {} => {} | {:?}",
i, v.min_element(), v);
@@ -181,14 +168,17 @@ macro_rules! test_reduction_float_min_max {
// up-to the `i-th` lane with `NaN`s, the result
// is still always `-3.` unless all elements of
// the vector are `NaN`s:
- //
- // This is also broken:
for j in 0..i {
v = v.replace(j, n);
- assert!(v.min_element().is_nan(),
- // FIXME: ^^^ should be -3.
+ if j == i-1 {
+ assert!(v.min_element().is_nan(),
+ "[B]: nan at {} => {} | {:?}",
+ i, v.min_element(), v);
+ } else {
+ assert_eq!(v.min_element(), -3.,
"[B]: nan at {} => {} | {:?}",
i, v.min_element(), v);
+ }
}
// We are done here, since we were in the last
@@ -203,7 +193,7 @@ macro_rules! test_reduction_float_min_max {
if $id::lanes() == 1 {
assert!(v.min_element().is_nan(),
"[C]: all nans | v={:?} | min={} | \
- is_nan: {}",
+is_nan: {}",
v, v.min_element(),
v.min_element().is_nan()
);
@@ -235,7 +225,7 @@ macro_rules! test_reduction_float_min_max {
// "i - 1" does not overflow.
assert!(v.min_element().is_nan(),
"[E]: all nans | v={:?} | min={} | \
- is_nan: {}",
+is_nan: {}",
v, v.min_element(),
v.min_element().is_nan());
} else {
@@ -280,21 +270,7 @@ macro_rules! test_reduction_float_min_max {
// targets:
if i == $id::lanes() - 1 &&
target_with_broken_last_lane_nan {
- // FIXME:
- // https://github.com/rust-lang-nursery/packed_simd/issues/5
- //
- // If there is a NaN, the result should
- // always the largest element, but currently
- // when the last element is NaN the current
- // implementation incorrectly returns NaN.
- //
- // The targets mentioned above use different
- // codegen that produces the correct result.
- //
- // These asserts detect if this behavior
- // changes
- assert!(v.max_element().is_nan(),
- // FIXME: ^^^ should be -3.
+ assert_eq!(v.max_element(), -3.,
"[A]: nan at {} => {} | {:?}",
i, v.max_element(), v);
@@ -302,14 +278,17 @@ macro_rules! test_reduction_float_min_max {
// up-to the `i-th` lane with `NaN`s, the result
// is still always `-3.` unless all elements of
// the vector are `NaN`s:
- //
- // This is also broken:
for j in 0..i {
v = v.replace(j, n);
- assert!(v.max_element().is_nan(),
- // FIXME: ^^^ should be -3.
+ if j == i-1 {
+ assert!(v.min_element().is_nan(),
+ "[B]: nan at {} => {} | {:?}",
+ i, v.min_element(), v);
+ } else {
+ assert_eq!(v.max_element(), -3.,
"[B]: nan at {} => {} | {:?}",
i, v.max_element(), v);
+ }
}
// We are done here, since we were in the last
@@ -324,7 +303,7 @@ macro_rules! test_reduction_float_min_max {
if $id::lanes() == 1 {
assert!(v.max_element().is_nan(),
"[C]: all nans | v={:?} | min={} | \
- is_nan: {}",
+is_nan: {}",
v, v.max_element(),
v.max_element().is_nan());
@@ -355,7 +334,7 @@ macro_rules! test_reduction_float_min_max {
// "i - 1" does not overflow.
assert!(v.max_element().is_nan(),
"[E]: all nans | v={:?} | max={} | \
- is_nan: {}",
+is_nan: {}",
v, v.max_element(),
v.max_element().is_nan());
} else {
@@ -377,5 +356,5 @@ macro_rules! test_reduction_float_min_max {
}
}
}
- }
+ };
}
diff --git a/vendor/packed_simd_2/src/api/select.rs b/vendor/packed_simd_2/src/api/select.rs
index 24525df56..daf629472 100644
--- a/vendor/packed_simd_2/src/api/select.rs
+++ b/vendor/packed_simd_2/src/api/select.rs
@@ -12,9 +12,7 @@ macro_rules! impl_select {
#[inline]
pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T>
where
- T: sealed::SimdArray<
- NT = <[$elem_ty; $elem_count] as sealed::SimdArray>::NT,
- >,
+ T: sealed::SimdArray<NT = <[$elem_ty; $elem_count] as sealed::SimdArray>::NT>,
{
use crate::llvm::simd_select;
Simd(unsafe { simd_select(self.0, a.0, b.0) })
diff --git a/vendor/packed_simd_2/src/api/shuffle.rs b/vendor/packed_simd_2/src/api/shuffle.rs
index 13a7fae5f..fda29ccdd 100644
--- a/vendor/packed_simd_2/src/api/shuffle.rs
+++ b/vendor/packed_simd_2/src/api/shuffle.rs
@@ -27,9 +27,7 @@
/// Shuffling elements of two vectors:
///
/// ```
-/// # #[macro_use]
-/// # extern crate packed_simd;
-/// # use packed_simd::*;
+/// # use packed_simd_2::*;
/// # fn main() {
/// // Shuffle allows reordering the elements:
/// let x = i32x4::new(1, 2, 3, 4);
@@ -51,9 +49,7 @@
/// Shuffling elements of one vector:
///
/// ```
-/// # #[macro_use]
-/// # extern crate packed_simd;
-/// # use packed_simd::*;
+/// # use packed_simd_2::*;
/// # fn main() {
/// // Shuffle allows reordering the elements of a vector:
/// let x = i32x4::new(1, 2, 3, 4);
@@ -79,20 +75,18 @@ macro_rules! shuffle {
($vec0:expr, $vec1:expr, [$l0:expr, $l1:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector2(
+ $crate::Simd($crate::__shuffle_vector2::<{[$l0, $l1]}, _, _>(
$vec0.0,
$vec1.0,
- [$l0, $l1],
))
}
}};
($vec0:expr, $vec1:expr, [$l0:expr, $l1:expr, $l2:expr, $l3:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector4(
+ $crate::Simd($crate::__shuffle_vector4::<{[$l0, $l1, $l2, $l3]}, _, _>(
$vec0.0,
$vec1.0,
- [$l0, $l1, $l2, $l3],
))
}
}};
@@ -101,10 +95,9 @@ macro_rules! shuffle {
$l4:expr, $l5:expr, $l6:expr, $l7:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector8(
+ $crate::Simd($crate::__shuffle_vector8::<{[$l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7]}, _, _>(
$vec0.0,
$vec1.0,
- [$l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7],
))
}
}};
@@ -115,13 +108,14 @@ macro_rules! shuffle {
$l12:expr, $l13:expr, $l14:expr, $l15:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector16(
- $vec0.0,
- $vec1.0,
+ $crate::Simd($crate::__shuffle_vector16::<{
[
$l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
$l11, $l12, $l13, $l14, $l15,
- ],
+ ]
+ }, _, _>(
+ $vec0.0,
+ $vec1.0,
))
}
}};
@@ -136,15 +130,16 @@ macro_rules! shuffle {
$l28:expr, $l29:expr, $l30:expr, $l31:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector32(
- $vec0.0,
- $vec1.0,
+ $crate::Simd($crate::__shuffle_vector32::<{
[
$l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
$l11, $l12, $l13, $l14, $l15, $l16, $l17, $l18, $l19,
$l20, $l21, $l22, $l23, $l24, $l25, $l26, $l27, $l28,
$l29, $l30, $l31,
- ],
+ ]
+ }, _, _>(
+ $vec0.0,
+ $vec1.0,
))
}
}};
@@ -167,18 +162,17 @@ macro_rules! shuffle {
$l60:expr, $l61:expr, $l62:expr, $l63:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector64(
+ $crate::Simd($crate::__shuffle_vector64::<{[
+ $l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
+ $l11, $l12, $l13, $l14, $l15, $l16, $l17, $l18, $l19,
+ $l20, $l21, $l22, $l23, $l24, $l25, $l26, $l27, $l28,
+ $l29, $l30, $l31, $l32, $l33, $l34, $l35, $l36, $l37,
+ $l38, $l39, $l40, $l41, $l42, $l43, $l44, $l45, $l46,
+ $l47, $l48, $l49, $l50, $l51, $l52, $l53, $l54, $l55,
+ $l56, $l57, $l58, $l59, $l60, $l61, $l62, $l63,
+ ]}, _, _>(
$vec0.0,
$vec1.0,
- [
- $l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
- $l11, $l12, $l13, $l14, $l15, $l16, $l17, $l18, $l19,
- $l20, $l21, $l22, $l23, $l24, $l25, $l26, $l27, $l28,
- $l29, $l30, $l31, $l32, $l33, $l34, $l35, $l36, $l37,
- $l38, $l39, $l40, $l41, $l42, $l43, $l44, $l45, $l46,
- $l47, $l48, $l49, $l50, $l51, $l52, $l53, $l54, $l55,
- $l56, $l57, $l58, $l59, $l60, $l61, $l62, $l63,
- ],
))
}
}};
diff --git a/vendor/packed_simd_2/src/api/slice/from_slice.rs b/vendor/packed_simd_2/src/api/slice/from_slice.rs
index 25082d1e6..50f3914f7 100644
--- a/vendor/packed_simd_2/src/api/slice/from_slice.rs
+++ b/vendor/packed_simd_2/src/api/slice/from_slice.rs
@@ -14,11 +14,7 @@ macro_rules! impl_slice_from_slice {
unsafe {
assert!(slice.len() >= $elem_count);
let target_ptr = slice.get_unchecked(0) as *const $elem_ty;
- assert_eq!(
- target_ptr
- .align_offset(crate::mem::align_of::<Self>()),
- 0
- );
+ assert_eq!(target_ptr.align_offset(crate::mem::align_of::<Self>()), 0);
Self::from_slice_aligned_unchecked(slice)
}
}
@@ -43,15 +39,10 @@ macro_rules! impl_slice_from_slice {
/// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned
/// to an `align_of::<Self>()` boundary, the behavior is undefined.
#[inline]
- pub unsafe fn from_slice_aligned_unchecked(
- slice: &[$elem_ty],
- ) -> Self {
+ pub unsafe fn from_slice_aligned_unchecked(slice: &[$elem_ty]) -> Self {
debug_assert!(slice.len() >= $elem_count);
let target_ptr = slice.get_unchecked(0) as *const $elem_ty;
- debug_assert_eq!(
- target_ptr.align_offset(crate::mem::align_of::<Self>()),
- 0
- );
+ debug_assert_eq!(target_ptr.align_offset(crate::mem::align_of::<Self>()), 0);
#[allow(clippy::cast_ptr_alignment)]
*(target_ptr as *const Self)
@@ -63,20 +54,13 @@ macro_rules! impl_slice_from_slice {
///
/// If `slice.len() < Self::lanes()` the behavior is undefined.
#[inline]
- pub unsafe fn from_slice_unaligned_unchecked(
- slice: &[$elem_ty],
- ) -> Self {
+ pub unsafe fn from_slice_unaligned_unchecked(slice: &[$elem_ty]) -> Self {
use crate::mem::size_of;
debug_assert!(slice.len() >= $elem_count);
- let target_ptr =
- slice.get_unchecked(0) as *const $elem_ty as *const u8;
+ let target_ptr = slice.get_unchecked(0) as *const $elem_ty as *const u8;
let mut x = Self::splat(0 as $elem_ty);
let self_ptr = &mut x as *mut Self as *mut u8;
- crate::ptr::copy_nonoverlapping(
- target_ptr,
- self_ptr,
- size_of::<Self>(),
- );
+ crate::ptr::copy_nonoverlapping(target_ptr, self_ptr, size_of::<Self>());
x
}
}
diff --git a/vendor/packed_simd_2/src/api/slice/write_to_slice.rs b/vendor/packed_simd_2/src/api/slice/write_to_slice.rs
index b634d98b9..dd04a2634 100644
--- a/vendor/packed_simd_2/src/api/slice/write_to_slice.rs
+++ b/vendor/packed_simd_2/src/api/slice/write_to_slice.rs
@@ -13,13 +13,8 @@ macro_rules! impl_slice_write_to_slice {
pub fn write_to_slice_aligned(self, slice: &mut [$elem_ty]) {
unsafe {
assert!(slice.len() >= $elem_count);
- let target_ptr =
- slice.get_unchecked_mut(0) as *mut $elem_ty;
- assert_eq!(
- target_ptr
- .align_offset(crate::mem::align_of::<Self>()),
- 0
- );
+ let target_ptr = slice.get_unchecked_mut(0) as *mut $elem_ty;
+ assert_eq!(target_ptr.align_offset(crate::mem::align_of::<Self>()), 0);
self.write_to_slice_aligned_unchecked(slice);
}
}
@@ -45,18 +40,13 @@ macro_rules! impl_slice_write_to_slice {
/// aligned to an `align_of::<Self>()` boundary, the behavior is
/// undefined.
#[inline]
- pub unsafe fn write_to_slice_aligned_unchecked(
- self, slice: &mut [$elem_ty],
- ) {
+ pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [$elem_ty]) {
debug_assert!(slice.len() >= $elem_count);
let target_ptr = slice.get_unchecked_mut(0) as *mut $elem_ty;
- debug_assert_eq!(
- target_ptr.align_offset(crate::mem::align_of::<Self>()),
- 0
- );
+ debug_assert_eq!(target_ptr.align_offset(crate::mem::align_of::<Self>()), 0);
- #[allow(clippy::cast_ptr_alignment)]
- #[allow(clippy::cast_ptr_alignment)]
+ #[allow(clippy::cast_ptr_alignment)]
+ #[allow(clippy::cast_ptr_alignment)]
#[allow(clippy::cast_ptr_alignment)]
#[allow(clippy::cast_ptr_alignment)]
*(target_ptr as *mut Self) = self;
@@ -68,18 +58,11 @@ macro_rules! impl_slice_write_to_slice {
///
/// If `slice.len() < Self::lanes()` the behavior is undefined.
#[inline]
- pub unsafe fn write_to_slice_unaligned_unchecked(
- self, slice: &mut [$elem_ty],
- ) {
+ pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [$elem_ty]) {
debug_assert!(slice.len() >= $elem_count);
- let target_ptr =
- slice.get_unchecked_mut(0) as *mut $elem_ty as *mut u8;
+ let target_ptr = slice.get_unchecked_mut(0) as *mut $elem_ty as *mut u8;
let self_ptr = &self as *const Self as *const u8;
- crate::ptr::copy_nonoverlapping(
- self_ptr,
- target_ptr,
- crate::mem::size_of::<Self>(),
- );
+ crate::ptr::copy_nonoverlapping(self_ptr, target_ptr, crate::mem::size_of::<Self>());
}
}
diff --git a/vendor/packed_simd_2/src/codegen.rs b/vendor/packed_simd_2/src/codegen.rs
index 9d1517e20..8a9e97148 100644
--- a/vendor/packed_simd_2/src/codegen.rs
+++ b/vendor/packed_simd_2/src/codegen.rs
@@ -1,19 +1,19 @@
//! Code-generation utilities
-crate mod bit_manip;
-crate mod llvm;
-crate mod math;
-crate mod reductions;
-crate mod shuffle;
-crate mod shuffle1_dyn;
-crate mod swap_bytes;
+pub(crate) mod bit_manip;
+pub(crate) mod llvm;
+pub(crate) mod math;
+pub(crate) mod reductions;
+pub(crate) mod shuffle;
+pub(crate) mod shuffle1_dyn;
+pub(crate) mod swap_bytes;
macro_rules! impl_simd_array {
([$elem_ty:ident; $elem_count:expr]:
$tuple_id:ident | $($elem_tys:ident),*) => {
#[derive(Copy, Clone)]
#[repr(simd)]
- pub struct $tuple_id($(crate $elem_tys),*);
+ pub struct $tuple_id($(pub(crate) $elem_tys),*);
//^^^^^^^ leaked through SimdArray
impl crate::sealed::Seal for [$elem_ty; $elem_count] {}
@@ -35,28 +35,28 @@ macro_rules! impl_simd_array {
}
}
-crate mod pointer_sized_int;
+pub(crate) mod pointer_sized_int;
-crate mod v16;
-crate use self::v16::*;
+pub(crate) mod v16;
+pub(crate) use self::v16::*;
-crate mod v32;
-crate use self::v32::*;
+pub(crate) mod v32;
+pub(crate) use self::v32::*;
-crate mod v64;
-crate use self::v64::*;
+pub(crate) mod v64;
+pub(crate) use self::v64::*;
-crate mod v128;
-crate use self::v128::*;
+pub(crate) mod v128;
+pub(crate) use self::v128::*;
-crate mod v256;
-crate use self::v256::*;
+pub(crate) mod v256;
+pub(crate) use self::v256::*;
-crate mod v512;
-crate use self::v512::*;
+pub(crate) mod v512;
+pub(crate) use self::v512::*;
-crate mod vSize;
-crate use self::vSize::*;
+pub(crate) mod vSize;
+pub(crate) use self::vSize::*;
-crate mod vPtr;
-crate use self::vPtr::*;
+pub(crate) mod vPtr;
+pub(crate) use self::vPtr::*;
diff --git a/vendor/packed_simd_2/src/codegen/bit_manip.rs b/vendor/packed_simd_2/src/codegen/bit_manip.rs
index 83c7d1987..32d8d717a 100644
--- a/vendor/packed_simd_2/src/codegen/bit_manip.rs
+++ b/vendor/packed_simd_2/src/codegen/bit_manip.rs
@@ -1,7 +1,7 @@
//! LLVM bit manipulation intrinsics.
#[rustfmt::skip]
-use crate::*;
+pub(crate) use crate::*;
#[allow(improper_ctypes, dead_code)]
extern "C" {
@@ -147,7 +147,7 @@ extern "C" {
fn ctpop_u128x4(x: u128x4) -> u128x4;
}
-crate trait BitManip {
+pub(crate) trait BitManip {
fn ctpop(self) -> Self;
fn ctlz(self) -> Self;
fn cttz(self) -> Self;
@@ -212,8 +212,7 @@ macro_rules! impl_bit_manip {
fn ctpop(self) -> Self {
let mut ones = self;
for i in 0..Self::lanes() {
- ones = ones
- .replace(i, self.extract(i).count_ones() as $scalar);
+ ones = ones.replace(i, self.extract(i).count_ones() as $scalar);
}
ones
}
@@ -222,10 +221,7 @@ macro_rules! impl_bit_manip {
fn ctlz(self) -> Self {
let mut lz = self;
for i in 0..Self::lanes() {
- lz = lz.replace(
- i,
- self.extract(i).leading_zeros() as $scalar,
- );
+ lz = lz.replace(i, self.extract(i).leading_zeros() as $scalar);
}
lz
}
@@ -234,10 +230,7 @@ macro_rules! impl_bit_manip {
fn cttz(self) -> Self {
let mut tz = self;
for i in 0..Self::lanes() {
- tz = tz.replace(
- i,
- self.extract(i).trailing_zeros() as $scalar,
- );
+ tz = tz.replace(i, self.extract(i).trailing_zeros() as $scalar);
}
tz
}
diff --git a/vendor/packed_simd_2/src/codegen/llvm.rs b/vendor/packed_simd_2/src/codegen/llvm.rs
index 93c6ce6b7..b4c09849b 100644
--- a/vendor/packed_simd_2/src/codegen/llvm.rs
+++ b/vendor/packed_simd_2/src/codegen/llvm.rs
@@ -7,101 +7,122 @@ use crate::sealed::Simd;
// Shuffle intrinsics: expanded in users' crates, therefore public.
extern "platform-intrinsic" {
- // FIXME: Passing this intrinsics an `idx` array with an index that is
- // out-of-bounds will produce a monomorphization-time error.
- // https://github.com/rust-lang-nursery/packed_simd/issues/21
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 2], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 4], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 8], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle16<T, U>(x: T, y: T, idx: [u32; 16]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 16], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 32], Output = U>;
-
- #[rustc_args_required_const(2)]
- pub fn simd_shuffle64<T, U>(x: T, y: T, idx: [u32; 64]) -> U
- where
- T: Simd,
- <T as Simd>::Element: Shuffle<[u32; 64], Output = U>;
+ pub fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U;
+ pub fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U;
+ pub fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U;
+ pub fn simd_shuffle16<T, U>(x: T, y: T, idx: [u32; 16]) -> U;
+ pub fn simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U;
+ pub fn simd_shuffle64<T, U>(x: T, y: T, idx: [u32; 64]) -> U;
}
-pub use self::simd_shuffle16 as __shuffle_vector16;
-pub use self::simd_shuffle2 as __shuffle_vector2;
-pub use self::simd_shuffle32 as __shuffle_vector32;
-pub use self::simd_shuffle4 as __shuffle_vector4;
-pub use self::simd_shuffle64 as __shuffle_vector64;
-pub use self::simd_shuffle8 as __shuffle_vector8;
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector2<const IDX: [u32; 2], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 2], Output = U>,
+{
+ simd_shuffle2(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector4<const IDX: [u32; 4], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 4], Output = U>,
+{
+ simd_shuffle4(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector8<const IDX: [u32; 8], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 8], Output = U>,
+{
+ simd_shuffle8(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector16<const IDX: [u32; 16], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 16], Output = U>,
+{
+ simd_shuffle16(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector32<const IDX: [u32; 32], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 32], Output = U>,
+{
+ simd_shuffle32(x, y, IDX)
+}
+
+#[allow(clippy::missing_safety_doc)]
+#[inline]
+pub unsafe fn __shuffle_vector64<const IDX: [u32; 64], T, U>(x: T, y: T) -> U
+where
+ T: Simd,
+ <T as Simd>::Element: Shuffle<[u32; 64], Output = U>,
+{
+ simd_shuffle64(x, y, IDX)
+}
extern "platform-intrinsic" {
- crate fn simd_eq<T, U>(x: T, y: T) -> U;
- crate fn simd_ne<T, U>(x: T, y: T) -> U;
- crate fn simd_lt<T, U>(x: T, y: T) -> U;
- crate fn simd_le<T, U>(x: T, y: T) -> U;
- crate fn simd_gt<T, U>(x: T, y: T) -> U;
- crate fn simd_ge<T, U>(x: T, y: T) -> U;
-
- crate fn simd_insert<T, U>(x: T, idx: u32, val: U) -> T;
- crate fn simd_extract<T, U>(x: T, idx: u32) -> U;
-
- crate fn simd_cast<T, U>(x: T) -> U;
-
- crate fn simd_add<T>(x: T, y: T) -> T;
- crate fn simd_sub<T>(x: T, y: T) -> T;
- crate fn simd_mul<T>(x: T, y: T) -> T;
- crate fn simd_div<T>(x: T, y: T) -> T;
- crate fn simd_rem<T>(x: T, y: T) -> T;
- crate fn simd_shl<T>(x: T, y: T) -> T;
- crate fn simd_shr<T>(x: T, y: T) -> T;
- crate fn simd_and<T>(x: T, y: T) -> T;
- crate fn simd_or<T>(x: T, y: T) -> T;
- crate fn simd_xor<T>(x: T, y: T) -> T;
-
- crate fn simd_reduce_add_unordered<T, U>(x: T) -> U;
- crate fn simd_reduce_mul_unordered<T, U>(x: T) -> U;
- crate fn simd_reduce_add_ordered<T, U>(x: T, acc: U) -> U;
- crate fn simd_reduce_mul_ordered<T, U>(x: T, acc: U) -> U;
- crate fn simd_reduce_min<T, U>(x: T) -> U;
- crate fn simd_reduce_max<T, U>(x: T) -> U;
- crate fn simd_reduce_min_nanless<T, U>(x: T) -> U;
- crate fn simd_reduce_max_nanless<T, U>(x: T) -> U;
- crate fn simd_reduce_and<T, U>(x: T) -> U;
- crate fn simd_reduce_or<T, U>(x: T) -> U;
- crate fn simd_reduce_xor<T, U>(x: T) -> U;
- crate fn simd_reduce_all<T>(x: T) -> bool;
- crate fn simd_reduce_any<T>(x: T) -> bool;
-
- crate fn simd_select<M, T>(m: M, a: T, b: T) -> T;
-
- crate fn simd_fmin<T>(a: T, b: T) -> T;
- crate fn simd_fmax<T>(a: T, b: T) -> T;
-
- crate fn simd_fsqrt<T>(a: T) -> T;
- crate fn simd_fma<T>(a: T, b: T, c: T) -> T;
-
- crate fn simd_gather<T, P, M>(value: T, pointers: P, mask: M) -> T;
- crate fn simd_scatter<T, P, M>(value: T, pointers: P, mask: M);
-
- crate fn simd_bitmask<T, U>(value: T) -> U;
+ pub(crate) fn simd_eq<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_ne<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_lt<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_le<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_gt<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_ge<T, U>(x: T, y: T) -> U;
+
+ pub(crate) fn simd_insert<T, U>(x: T, idx: u32, val: U) -> T;
+ pub(crate) fn simd_extract<T, U>(x: T, idx: u32) -> U;
+
+ pub(crate) fn simd_cast<T, U>(x: T) -> U;
+
+ pub(crate) fn simd_add<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_sub<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_mul<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_div<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_rem<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_shl<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_shr<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_and<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_or<T>(x: T, y: T) -> T;
+ pub(crate) fn simd_xor<T>(x: T, y: T) -> T;
+
+ pub(crate) fn simd_reduce_add_unordered<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_mul_unordered<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_add_ordered<T, U>(x: T, acc: U) -> U;
+ pub(crate) fn simd_reduce_mul_ordered<T, U>(x: T, acc: U) -> U;
+ pub(crate) fn simd_reduce_min<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_max<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_min_nanless<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_max_nanless<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_and<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_or<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_xor<T, U>(x: T) -> U;
+ pub(crate) fn simd_reduce_all<T>(x: T) -> bool;
+ pub(crate) fn simd_reduce_any<T>(x: T) -> bool;
+
+ pub(crate) fn simd_select<M, T>(m: M, a: T, b: T) -> T;
+
+ pub(crate) fn simd_fmin<T>(a: T, b: T) -> T;
+ pub(crate) fn simd_fmax<T>(a: T, b: T) -> T;
+
+ pub(crate) fn simd_fsqrt<T>(a: T) -> T;
+ pub(crate) fn simd_fma<T>(a: T, b: T, c: T) -> T;
+
+ pub(crate) fn simd_gather<T, P, M>(value: T, pointers: P, mask: M) -> T;
+ pub(crate) fn simd_scatter<T, P, M>(value: T, pointers: P, mask: M);
+
+ pub(crate) fn simd_bitmask<T, U>(value: T) -> U;
}
diff --git a/vendor/packed_simd_2/src/codegen/math.rs b/vendor/packed_simd_2/src/codegen/math.rs
index f3997c7f1..9a0ea7a4e 100644
--- a/vendor/packed_simd_2/src/codegen/math.rs
+++ b/vendor/packed_simd_2/src/codegen/math.rs
@@ -1,3 +1,3 @@
//! Vertical math operations
-crate mod float;
+pub(crate) mod float;
diff --git a/vendor/packed_simd_2/src/codegen/math/float.rs b/vendor/packed_simd_2/src/codegen/math/float.rs
index 3743b4990..10d21831f 100644
--- a/vendor/packed_simd_2/src/codegen/math/float.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float.rs
@@ -2,18 +2,18 @@
#![allow(clippy::useless_transmute)]
#[macro_use]
-crate mod macros;
-crate mod abs;
-crate mod cos;
-crate mod cos_pi;
-crate mod exp;
-crate mod ln;
-crate mod mul_add;
-crate mod mul_adde;
-crate mod powf;
-crate mod sin;
-crate mod sin_cos_pi;
-crate mod sin_pi;
-crate mod sqrt;
-crate mod sqrte;
-crate mod tanh;
+pub(crate) mod macros;
+pub(crate) mod abs;
+pub(crate) mod cos;
+pub(crate) mod cos_pi;
+pub(crate) mod exp;
+pub(crate) mod ln;
+pub(crate) mod mul_add;
+pub(crate) mod mul_adde;
+pub(crate) mod powf;
+pub(crate) mod sin;
+pub(crate) mod sin_cos_pi;
+pub(crate) mod sin_pi;
+pub(crate) mod sqrt;
+pub(crate) mod sqrte;
+pub(crate) mod tanh;
diff --git a/vendor/packed_simd_2/src/codegen/math/float/abs.rs b/vendor/packed_simd_2/src/codegen/math/float/abs.rs
index bc4421f61..34aacc25b 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/abs.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/abs.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Abs {
+pub(crate) trait Abs {
fn abs(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/cos.rs b/vendor/packed_simd_2/src/codegen/math/float/cos.rs
index 50f6c16da..dec390cb7 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/cos.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/cos.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Cos {
+pub(crate) trait Cos {
fn cos(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs b/vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs
index ebff5fd1c..e283280ee 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/cos_pi.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait CosPi {
+pub(crate) trait CosPi {
fn cos_pi(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/exp.rs b/vendor/packed_simd_2/src/codegen/math/float/exp.rs
index 00d10e9fa..a7b20580e 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/exp.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/exp.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Exp {
+pub(crate) trait Exp {
fn exp(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/ln.rs b/vendor/packed_simd_2/src/codegen/math/float/ln.rs
index 88a5a6c6c..a5e38cb40 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/ln.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/ln.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Ln {
+pub(crate) trait Ln {
fn ln(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/macros.rs b/vendor/packed_simd_2/src/codegen/math/float/macros.rs
index 02d0ca3f5..8daee1afe 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/macros.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/macros.rs
@@ -1,7 +1,6 @@
//! Utility macros
#![allow(unused)]
-
macro_rules! impl_unary_ {
// implementation mapping 1:1
(vec | $trait_id:ident, $trait_method:ident, $vec_id:ident,
@@ -64,10 +63,8 @@ macro_rules! impl_unary_ {
let mut halves = U { vec: self }.halves;
- *halves.get_unchecked_mut(0) =
- transmute($fun(transmute(*halves.get_unchecked(0))));
- *halves.get_unchecked_mut(1) =
- transmute($fun(transmute(*halves.get_unchecked(1))));
+ *halves.get_unchecked_mut(0) = transmute($fun(transmute(*halves.get_unchecked(0))));
+ *halves.get_unchecked_mut(1) = transmute($fun(transmute(*halves.get_unchecked(1))));
U { halves }.vec
}
@@ -89,14 +86,10 @@ macro_rules! impl_unary_ {
let mut quarters = U { vec: self }.quarters;
- *quarters.get_unchecked_mut(0) =
- transmute($fun(transmute(*quarters.get_unchecked(0))));
- *quarters.get_unchecked_mut(1) =
- transmute($fun(transmute(*quarters.get_unchecked(1))));
- *quarters.get_unchecked_mut(2) =
- transmute($fun(transmute(*quarters.get_unchecked(2))));
- *quarters.get_unchecked_mut(3) =
- transmute($fun(transmute(*quarters.get_unchecked(3))));
+ *quarters.get_unchecked_mut(0) = transmute($fun(transmute(*quarters.get_unchecked(0))));
+ *quarters.get_unchecked_mut(1) = transmute($fun(transmute(*quarters.get_unchecked(1))));
+ *quarters.get_unchecked_mut(2) = transmute($fun(transmute(*quarters.get_unchecked(2))));
+ *quarters.get_unchecked_mut(3) = transmute($fun(transmute(*quarters.get_unchecked(3))));
U { quarters }.vec
}
@@ -137,43 +130,19 @@ macro_rules! gen_unary_impl_table {
impl_unary_!(gen | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[$sid:ident; $sc:expr]: $fun:ident) => {
- impl_unary_!(
- scalar | $trait_id,
- $trait_method,
- $vid,
- [$sid; $sc],
- $fun
- );
+ impl_unary_!(scalar | $trait_id, $trait_method, $vid, [$sid; $sc], $fun);
};
($vid:ident[s]: $fun:ident) => {
impl_unary_!(scalar | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[h => $vid_h:ident]: $fun:ident) => {
- impl_unary_!(
- halves | $trait_id,
- $trait_method,
- $vid,
- $vid_h,
- $fun
- );
+ impl_unary_!(halves | $trait_id, $trait_method, $vid, $vid_h, $fun);
};
($vid:ident[q => $vid_q:ident]: $fun:ident) => {
- impl_unary_!(
- quarter | $trait_id,
- $trait_method,
- $vid,
- $vid_q,
- $fun
- );
+ impl_unary_!(quarter | $trait_id, $trait_method, $vid, $vid_q, $fun);
};
($vid:ident[t => $vid_t:ident]: $fun:ident) => {
- impl_unary_!(
- twice | $trait_id,
- $trait_method,
- $vid,
- $vid_t,
- $fun
- );
+ impl_unary_!(twice | $trait_id, $trait_method, $vid, $vid_t, $fun);
};
}
};
@@ -188,11 +157,7 @@ macro_rules! impl_tertiary_ {
fn $trait_method(self, y: Self, z: Self) -> Self {
unsafe {
use crate::mem::transmute;
- transmute($fun(
- transmute(self),
- transmute(y),
- transmute(z),
- ))
+ transmute($fun(transmute(self), transmute(y), transmute(z)))
}
}
}
@@ -314,11 +279,8 @@ macro_rules! impl_tertiary_ {
let x_twice = U { vec: [self, uninitialized()] }.twice;
let y_twice = U { vec: [y, uninitialized()] }.twice;
let z_twice = U { vec: [z, uninitialized()] }.twice;
- let twice: $vect_id = transmute($fun(
- transmute(x_twice),
- transmute(y_twice),
- transmute(z_twice),
- ));
+ let twice: $vect_id =
+ transmute($fun(transmute(x_twice), transmute(y_twice), transmute(z_twice)));
*(U { twice }.vec.get_unchecked(0))
}
@@ -334,43 +296,19 @@ macro_rules! gen_tertiary_impl_table {
impl_tertiary_!(vec | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[$sid:ident; $sc:expr]: $fun:ident) => {
- impl_tertiary_!(
- scalar | $trait_id,
- $trait_method,
- $vid,
- [$sid; $sc],
- $fun
- );
+ impl_tertiary_!(scalar | $trait_id, $trait_method, $vid, [$sid; $sc], $fun);
};
($vid:ident[s]: $fun:ident) => {
impl_tertiary_!(scalar | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[h => $vid_h:ident]: $fun:ident) => {
- impl_tertiary_!(
- halves | $trait_id,
- $trait_method,
- $vid,
- $vid_h,
- $fun
- );
+ impl_tertiary_!(halves | $trait_id, $trait_method, $vid, $vid_h, $fun);
};
($vid:ident[q => $vid_q:ident]: $fun:ident) => {
- impl_tertiary_!(
- quarter | $trait_id,
- $trait_method,
- $vid,
- $vid_q,
- $fun
- );
+ impl_tertiary_!(quarter | $trait_id, $trait_method, $vid, $vid_q, $fun);
};
($vid:ident[t => $vid_t:ident]: $fun:ident) => {
- impl_tertiary_!(
- twice | $trait_id,
- $trait_method,
- $vid,
- $vid_t,
- $fun
- );
+ impl_tertiary_!(twice | $trait_id, $trait_method, $vid, $vid_t, $fun);
};
}
};
@@ -497,10 +435,7 @@ macro_rules! impl_binary_ {
let x_twice = U { vec: [self, uninitialized()] }.twice;
let y_twice = U { vec: [y, uninitialized()] }.twice;
- let twice: $vect_id = transmute($fun(
- transmute(x_twice),
- transmute(y_twice),
- ));
+ let twice: $vect_id = transmute($fun(transmute(x_twice), transmute(y_twice)));
*(U { twice }.vec.get_unchecked(0))
}
@@ -516,43 +451,19 @@ macro_rules! gen_binary_impl_table {
impl_binary_!(vec | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[$sid:ident; $sc:expr]: $fun:ident) => {
- impl_binary_!(
- scalar | $trait_id,
- $trait_method,
- $vid,
- [$sid; $sc],
- $fun
- );
+ impl_binary_!(scalar | $trait_id, $trait_method, $vid, [$sid; $sc], $fun);
};
($vid:ident[s]: $fun:ident) => {
impl_binary_!(scalar | $trait_id, $trait_method, $vid, $fun);
};
($vid:ident[h => $vid_h:ident]: $fun:ident) => {
- impl_binary_!(
- halves | $trait_id,
- $trait_method,
- $vid,
- $vid_h,
- $fun
- );
+ impl_binary_!(halves | $trait_id, $trait_method, $vid, $vid_h, $fun);
};
($vid:ident[q => $vid_q:ident]: $fun:ident) => {
- impl_binary_!(
- quarter | $trait_id,
- $trait_method,
- $vid,
- $vid_q,
- $fun
- );
+ impl_binary_!(quarter | $trait_id, $trait_method, $vid, $vid_q, $fun);
};
($vid:ident[t => $vid_t:ident]: $fun:ident) => {
- impl_binary_!(
- twice | $trait_id,
- $trait_method,
- $vid,
- $vid_t,
- $fun
- );
+ impl_binary_!(twice | $trait_id, $trait_method, $vid, $vid_t, $fun);
};
}
};
diff --git a/vendor/packed_simd_2/src/codegen/math/float/mul_add.rs b/vendor/packed_simd_2/src/codegen/math/float/mul_add.rs
index f48a57dc4..d37f30fa8 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/mul_add.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/mul_add.rs
@@ -4,7 +4,7 @@ use crate::*;
// FIXME: 64-bit 1 element mul_add
-crate trait MulAdd {
+pub(crate) trait MulAdd {
fn mul_add(self, y: Self, z: Self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs b/vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs
index 8c41fb131..c0baeacec 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/mul_adde.rs
@@ -3,7 +3,7 @@ use crate::*;
// FIXME: 64-bit 1 element mul_adde
-crate trait MulAddE {
+pub(crate) trait MulAddE {
fn mul_adde(self, y: Self, z: Self) -> Self;
}
@@ -38,13 +38,7 @@ macro_rules! impl_mul_adde {
#[cfg(not(target_arch = "s390x"))]
{
use crate::mem::transmute;
- unsafe {
- transmute($fn(
- transmute(self),
- transmute(y),
- transmute(z),
- ))
- }
+ unsafe { transmute($fn(transmute(self), transmute(y), transmute(z))) }
}
#[cfg(target_arch = "s390x")]
{
diff --git a/vendor/packed_simd_2/src/codegen/math/float/powf.rs b/vendor/packed_simd_2/src/codegen/math/float/powf.rs
index bc15067d7..89ca52e96 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/powf.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/powf.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Powf {
+pub(crate) trait Powf {
fn powf(self, x: Self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sin.rs b/vendor/packed_simd_2/src/codegen/math/float/sin.rs
index 7b014d07d..d88141590 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sin.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sin.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Sin {
+pub(crate) trait Sin {
fn sin(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs b/vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs
index 0f1249ec8..b283d1111 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sin_cos_pi.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait SinCosPi: Sized {
+pub(crate) trait SinCosPi: Sized {
type Output;
fn sin_cos_pi(self) -> Self::Output;
}
@@ -85,17 +85,14 @@ macro_rules! impl_unary_t {
let halves = U { vec: self }.halves;
- let res_0: ($vid_h, $vid_h) =
- transmute($fun(transmute(*halves.get_unchecked(0))));
- let res_1: ($vid_h, $vid_h) =
- transmute($fun(transmute(*halves.get_unchecked(1))));
+ let res_0: ($vid_h, $vid_h) = transmute($fun(transmute(*halves.get_unchecked(0))));
+ let res_1: ($vid_h, $vid_h) = transmute($fun(transmute(*halves.get_unchecked(1))));
union R {
result: ($vid, $vid),
halves: ([$vid_h; 2], [$vid_h; 2]),
}
- R { halves: ([res_0.0, res_1.0], [res_0.1, res_1.1]) }
- .result
+ R { halves: ([res_0.0, res_1.0], [res_0.1, res_1.1]) }.result
}
}
}
@@ -114,14 +111,10 @@ macro_rules! impl_unary_t {
let quarters = U { vec: self }.quarters;
- let res_0: ($vid_q, $vid_q) =
- transmute($fun(transmute(*quarters.get_unchecked(0))));
- let res_1: ($vid_q, $vid_q) =
- transmute($fun(transmute(*quarters.get_unchecked(1))));
- let res_2: ($vid_q, $vid_q) =
- transmute($fun(transmute(*quarters.get_unchecked(2))));
- let res_3: ($vid_q, $vid_q) =
- transmute($fun(transmute(*quarters.get_unchecked(3))));
+ let res_0: ($vid_q, $vid_q) = transmute($fun(transmute(*quarters.get_unchecked(0))));
+ let res_1: ($vid_q, $vid_q) = transmute($fun(transmute(*quarters.get_unchecked(1))));
+ let res_2: ($vid_q, $vid_q) = transmute($fun(transmute(*quarters.get_unchecked(2))));
+ let res_3: ($vid_q, $vid_q) = transmute($fun(transmute(*quarters.get_unchecked(3))));
union R {
result: ($vid, $vid),
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs b/vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs
index 72df98c93..0c8f6bb12 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sin_pi.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait SinPi {
+pub(crate) trait SinPi {
fn sin_pi(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sqrt.rs b/vendor/packed_simd_2/src/codegen/math/float/sqrt.rs
index 7ce31df62..67bb0a2a9 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sqrt.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sqrt.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait Sqrt {
+pub(crate) trait Sqrt {
fn sqrt(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/sqrte.rs b/vendor/packed_simd_2/src/codegen/math/float/sqrte.rs
index c1e379c34..58a1de1f4 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/sqrte.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/sqrte.rs
@@ -6,7 +6,7 @@
use crate::llvm::simd_fsqrt;
use crate::*;
-crate trait Sqrte {
+pub(crate) trait Sqrte {
fn sqrte(self) -> Self;
}
diff --git a/vendor/packed_simd_2/src/codegen/math/float/tanh.rs b/vendor/packed_simd_2/src/codegen/math/float/tanh.rs
index 5220c7d10..2c0dd3dc3 100644
--- a/vendor/packed_simd_2/src/codegen/math/float/tanh.rs
+++ b/vendor/packed_simd_2/src/codegen/math/float/tanh.rs
@@ -5,12 +5,11 @@
use crate::*;
-crate trait Tanh {
+pub(crate) trait Tanh {
fn tanh(self) -> Self;
}
macro_rules! define_tanh {
-
($name:ident, $basetype:ty, $simdtype:ty, $lanes:expr, $trait:path) => {
fn $name(x: $simdtype) -> $simdtype {
use core::intrinsics::transmute;
@@ -31,8 +30,9 @@ macro_rules! define_tanh {
};
}
-// llvm does not seem to expose the hyperbolic versions of trigonometric functions;
-// we thus call the classical rust versions on all of them (which stem from cmath).
+// llvm does not seem to expose the hyperbolic versions of trigonometric
+// functions; we thus call the classical rust versions on all of them (which
+// stem from cmath).
define_tanh!(f32 => tanh_v2f32, f32x2, 2);
define_tanh!(f32 => tanh_v4f32, f32x4, 4);
define_tanh!(f32 => tanh_v8f32, f32x8, 8);
diff --git a/vendor/packed_simd_2/src/codegen/pointer_sized_int.rs b/vendor/packed_simd_2/src/codegen/pointer_sized_int.rs
index 39f493d3b..55cbc297a 100644
--- a/vendor/packed_simd_2/src/codegen/pointer_sized_int.rs
+++ b/vendor/packed_simd_2/src/codegen/pointer_sized_int.rs
@@ -4,24 +4,24 @@ use cfg_if::cfg_if;
cfg_if! {
if #[cfg(target_pointer_width = "8")] {
- crate type isize_ = i8;
- crate type usize_ = u8;
+ pub(crate) type isize_ = i8;
+ pub(crate) type usize_ = u8;
} else if #[cfg(target_pointer_width = "16")] {
- crate type isize_ = i16;
- crate type usize_ = u16;
+ pub(crate) type isize_ = i16;
+ pub(crate) type usize_ = u16;
} else if #[cfg(target_pointer_width = "32")] {
- crate type isize_ = i32;
- crate type usize_ = u32;
+ pub(crate) type isize_ = i32;
+ pub(crate) type usize_ = u32;
} else if #[cfg(target_pointer_width = "64")] {
- crate type isize_ = i64;
- crate type usize_ = u64;
+ pub(crate) type isize_ = i64;
+ pub(crate) type usize_ = u64;
} else if #[cfg(target_pointer_width = "64")] {
- crate type isize_ = i64;
- crate type usize_ = u64;
+ pub(crate) type isize_ = i64;
+ pub(crate) type usize_ = u64;
} else if #[cfg(target_pointer_width = "128")] {
- crate type isize_ = i128;
- crate type usize_ = u128;
+ pub(crate) type isize_ = i128;
+ pub(crate) type usize_ = u128;
} else {
compile_error!("unsupported target_pointer_width");
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions.rs b/vendor/packed_simd_2/src/codegen/reductions.rs
index 7be4f5fab..302ca6d88 100644
--- a/vendor/packed_simd_2/src/codegen/reductions.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions.rs
@@ -1 +1 @@
-crate mod mask;
+pub(crate) mod mask;
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask.rs b/vendor/packed_simd_2/src/codegen/reductions/mask.rs
index 97260c6d4..a78bcc563 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask.rs
@@ -1,17 +1,17 @@
//! Code generation workaround for `all()` mask horizontal reduction.
//!
-//! Works arround [LLVM bug 36702].
+//! Works around [LLVM bug 36702].
//!
//! [LLVM bug 36702]: https://bugs.llvm.org/show_bug.cgi?id=36702
#![allow(unused_macros)]
use crate::*;
-crate trait All: crate::marker::Sized {
+pub(crate) trait All: crate::marker::Sized {
unsafe fn all(self) -> bool;
}
-crate trait Any: crate::marker::Sized {
+pub(crate) trait Any: crate::marker::Sized {
unsafe fn any(self) -> bool;
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs
index e9586eace..b2db52c89 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/aarch64.rs
@@ -19,7 +19,7 @@ macro_rules! aarch64_128_neon_impl {
$vmax(crate::mem::transmute(self)) != 0
}
}
- }
+ };
}
/// 64-bit wide vectors
@@ -35,9 +35,7 @@ macro_rules! aarch64_64_neon_impl {
halves: ($id, $id),
vec: $vec128,
}
- U {
- halves: (self, self),
- }.vec.all()
+ U { halves: (self, self) }.vec.all()
}
}
impl Any for $id {
@@ -48,9 +46,7 @@ macro_rules! aarch64_64_neon_impl {
halves: ($id, $id),
vec: $vec128,
}
- U {
- halves: (self, self),
- }.vec.any()
+ U { halves: (self, self) }.vec.any()
}
}
};
@@ -59,13 +55,27 @@ macro_rules! aarch64_64_neon_impl {
/// Mask reduction implementation for `aarch64` targets
macro_rules! impl_mask_reductions {
// 64-bit wide masks
- (m8x8) => { aarch64_64_neon_impl!(m8x8, m8x16); };
- (m16x4) => { aarch64_64_neon_impl!(m16x4, m16x8); };
- (m32x2) => { aarch64_64_neon_impl!(m32x2, m32x4); };
+ (m8x8) => {
+ aarch64_64_neon_impl!(m8x8, m8x16);
+ };
+ (m16x4) => {
+ aarch64_64_neon_impl!(m16x4, m16x8);
+ };
+ (m32x2) => {
+ aarch64_64_neon_impl!(m32x2, m32x4);
+ };
// 128-bit wide masks
- (m8x16) => { aarch64_128_neon_impl!(m8x16, vminvq_u8, vmaxvq_u8); };
- (m16x8) => { aarch64_128_neon_impl!(m16x8, vminvq_u16, vmaxvq_u16); };
- (m32x4) => { aarch64_128_neon_impl!(m32x4, vminvq_u32, vmaxvq_u32); };
+ (m8x16) => {
+ aarch64_128_neon_impl!(m8x16, vminvq_u8, vmaxvq_u8);
+ };
+ (m16x8) => {
+ aarch64_128_neon_impl!(m16x8, vminvq_u16, vmaxvq_u16);
+ };
+ (m32x4) => {
+ aarch64_128_neon_impl!(m32x4, vminvq_u32, vmaxvq_u32);
+ };
// Fallback to LLVM's default code-generation:
- ($id:ident) => { fallback_impl!($id); };
+ ($id:ident) => {
+ fallback_impl!($id);
+ };
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs
index 1987af7a9..41c3cbc58 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/arm.rs
@@ -15,10 +15,7 @@ macro_rules! arm_128_v7_neon_impl {
vec: $id,
}
let halves = U { vec: self }.halves;
- let h: $half = transmute($vpmin(
- transmute(halves.0),
- transmute(halves.1),
- ));
+ let h: $half = transmute($vpmin(transmute(halves.0), transmute(halves.1)));
h.all()
}
}
@@ -33,10 +30,7 @@ macro_rules! arm_128_v7_neon_impl {
vec: $id,
}
let halves = U { vec: self }.halves;
- let h: $half = transmute($vpmax(
- transmute(halves.0),
- transmute(halves.1),
- ));
+ let h: $half = transmute($vpmax(transmute(halves.0), transmute(halves.1)));
h.any()
}
}
@@ -46,9 +40,17 @@ macro_rules! arm_128_v7_neon_impl {
/// Mask reduction implementation for `arm` targets
macro_rules! impl_mask_reductions {
// 128-bit wide masks
- (m8x16) => { arm_128_v7_neon_impl!(m8x16, m8x8, vpmin_u8, vpmax_u8); };
- (m16x8) => { arm_128_v7_neon_impl!(m16x8, m16x4, vpmin_u16, vpmax_u16); };
- (m32x4) => { arm_128_v7_neon_impl!(m32x4, m32x2, vpmin_u32, vpmax_u32); };
+ (m8x16) => {
+ arm_128_v7_neon_impl!(m8x16, m8x8, vpmin_u8, vpmax_u8);
+ };
+ (m16x8) => {
+ arm_128_v7_neon_impl!(m16x8, m16x4, vpmin_u16, vpmax_u16);
+ };
+ (m32x4) => {
+ arm_128_v7_neon_impl!(m32x4, m32x2, vpmin_u32, vpmax_u32);
+ };
// Fallback to LLVM's default code-generation:
- ($id:ident) => { fallback_impl!($id); };
+ ($id:ident) => {
+ fallback_impl!($id);
+ };
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs
index 25e5c813a..4c377a687 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/fallback.rs
@@ -2,5 +2,7 @@
/// Default mask reduction implementation
macro_rules! impl_mask_reductions {
- ($id:ident) => { fallback_impl!($id); };
+ ($id:ident) => {
+ fallback_impl!($id);
+ };
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs
index bcfb1a6e1..4bf509806 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/x86.rs
@@ -114,17 +114,17 @@ macro_rules! x86_m64x4_impl {
/// Fallback implementation.
macro_rules! x86_intr_impl {
($id:ident) => {
- impl All for $id {
- #[inline]
- unsafe fn all(self) -> bool {
- use crate::llvm::simd_reduce_all;
- simd_reduce_all(self.0)
+ impl All for $id {
+ #[inline]
+ unsafe fn all(self) -> bool {
+ use crate::llvm::simd_reduce_all;
+ simd_reduce_all(self.0)
+ }
}
- }
impl Any for $id {
#[inline]
unsafe fn any(self) -> bool {
- use crate::llvm::simd_reduce_any;
+ use crate::llvm::simd_reduce_any;
simd_reduce_any(self.0)
}
}
@@ -134,21 +134,47 @@ macro_rules! x86_intr_impl {
/// Mask reduction implementation for `x86` and `x86_64` targets
macro_rules! impl_mask_reductions {
// 64-bit wide masks
- (m8x8) => { x86_m8x8_impl!(m8x8); };
- (m16x4) => { x86_m8x8_impl!(m16x4); };
- (m32x2) => { x86_m8x8_impl!(m32x2); };
+ (m8x8) => {
+ x86_m8x8_impl!(m8x8);
+ };
+ (m16x4) => {
+ x86_m8x8_impl!(m16x4);
+ };
+ (m32x2) => {
+ x86_m8x8_impl!(m32x2);
+ };
// 128-bit wide masks
- (m8x16) => { x86_m8x16_impl!(m8x16); };
- (m16x8) => { x86_m8x16_impl!(m16x8); };
- (m32x4) => { x86_m32x4_impl!(m32x4); };
- (m64x2) => { x86_m64x2_impl!(m64x2); };
- (m128x1) => { x86_intr_impl!(m128x1); };
+ (m8x16) => {
+ x86_m8x16_impl!(m8x16);
+ };
+ (m16x8) => {
+ x86_m8x16_impl!(m16x8);
+ };
+ (m32x4) => {
+ x86_m32x4_impl!(m32x4);
+ };
+ (m64x2) => {
+ x86_m64x2_impl!(m64x2);
+ };
+ (m128x1) => {
+ x86_intr_impl!(m128x1);
+ };
// 256-bit wide masks:
- (m8x32) => { x86_m8x32_impl!(m8x32, m8x16); };
- (m16x16) => { x86_m8x32_impl!(m16x16, m16x8); };
- (m32x8) => { x86_m32x8_impl!(m32x8, m32x4); };
- (m64x4) => { x86_m64x4_impl!(m64x4, m64x2); };
- (m128x2) => { x86_intr_impl!(m128x2); };
+ (m8x32) => {
+ x86_m8x32_impl!(m8x32, m8x16);
+ };
+ (m16x16) => {
+ x86_m8x32_impl!(m16x16, m16x8);
+ };
+ (m32x8) => {
+ x86_m32x8_impl!(m32x8, m32x4);
+ };
+ (m64x4) => {
+ x86_m64x4_impl!(m64x4, m64x2);
+ };
+ (m128x2) => {
+ x86_intr_impl!(m128x2);
+ };
(msizex2) => {
cfg_if! {
if #[cfg(target_pointer_width = "64")] {
@@ -184,5 +210,7 @@ macro_rules! impl_mask_reductions {
};
// Fallback to LLVM's default code-generation:
- ($id:ident) => { fallback_impl!($id); };
+ ($id:ident) => {
+ fallback_impl!($id);
+ };
}
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs
index d18736fb0..61f352d22 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/avx.rs
@@ -13,10 +13,7 @@ macro_rules! x86_m8x32_avx_impl {
use crate::arch::x86::_mm256_testc_si256;
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::_mm256_testc_si256;
- _mm256_testc_si256(
- crate::mem::transmute(self),
- crate::mem::transmute($id::splat(true)),
- ) != 0
+ _mm256_testc_si256(crate::mem::transmute(self), crate::mem::transmute($id::splat(true))) != 0
}
}
impl Any for $id {
@@ -27,10 +24,7 @@ macro_rules! x86_m8x32_avx_impl {
use crate::arch::x86::_mm256_testz_si256;
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::_mm256_testz_si256;
- _mm256_testz_si256(
- crate::mem::transmute(self),
- crate::mem::transmute(self),
- ) == 0
+ _mm256_testz_si256(crate::mem::transmute(self), crate::mem::transmute(self)) == 0
}
}
};
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs
index eb1ef7fac..e0c9aee92 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse.rs
@@ -16,8 +16,7 @@ macro_rules! x86_m32x4_sse_impl {
// most significant bit of each lane of `a`. If all
// bits are set, then all 4 lanes of the mask are
// true.
- _mm_movemask_ps(crate::mem::transmute(self))
- == 0b_1111_i32
+ _mm_movemask_ps(crate::mem::transmute(self)) == 0b_1111_i32
}
}
impl Any for $id {
diff --git a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs
index a99c606f5..bbb52fa47 100644
--- a/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs
+++ b/vendor/packed_simd_2/src/codegen/reductions/mask/x86/sse2.rs
@@ -16,8 +16,7 @@ macro_rules! x86_m64x2_sse2_impl {
// most significant bit of each lane of `a`. If all
// bits are set, then all 2 lanes of the mask are
// true.
- _mm_movemask_pd(crate::mem::transmute(self))
- == 0b_11_i32
+ _mm_movemask_pd(crate::mem::transmute(self)) == 0b_11_i32
}
}
impl Any for $id {
@@ -50,8 +49,7 @@ macro_rules! x86_m8x16_sse2_impl {
// most significant bit of each byte of `a`. If all
// bits are set, then all 16 lanes of the mask are
// true.
- _mm_movemask_epi8(crate::mem::transmute(self))
- == i32::from(u16::max_value())
+ _mm_movemask_epi8(crate::mem::transmute(self)) == i32::from(u16::max_value())
}
}
impl Any for $id {
diff --git a/vendor/packed_simd_2/src/codegen/shuffle.rs b/vendor/packed_simd_2/src/codegen/shuffle.rs
index d92c9ee22..d3acd48f5 100644
--- a/vendor/packed_simd_2/src/codegen/shuffle.rs
+++ b/vendor/packed_simd_2/src/codegen/shuffle.rs
@@ -2,7 +2,7 @@
//! lanes and vector element types.
use crate::masks::*;
-use crate::sealed::{Shuffle, Seal};
+use crate::sealed::{Seal, Shuffle};
macro_rules! impl_shuffle {
($array:ty, $base:ty, $out:ty) => {
@@ -10,7 +10,7 @@ macro_rules! impl_shuffle {
impl Shuffle<$array> for $base {
type Output = $out;
}
- }
+ };
}
impl_shuffle! { [u32; 2], i8, crate::codegen::i8x2 }
diff --git a/vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs b/vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs
index 8d9577b26..19d457a45 100644
--- a/vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs
+++ b/vendor/packed_simd_2/src/codegen/shuffle1_dyn.rs
@@ -16,8 +16,7 @@ macro_rules! impl_fallback {
fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
let mut result = Self::splat(0);
for i in 0..$id::lanes() {
- result = result
- .replace(i, self.extract(indices.extract(i) as usize));
+ result = result.replace(i, self.extract(indices.extract(i) as usize));
}
result
}
@@ -31,7 +30,7 @@ macro_rules! impl_shuffle1_dyn {
if #[cfg(all(
any(
all(target_arch = "aarch64", target_feature = "neon"),
- all(target_arch = "arm", target_feature = "v7",
+ all(target_arch = "doesnotexist", target_feature = "v7",
target_feature = "neon")
),
any(feature = "core_arch", libcore_neon)
@@ -43,7 +42,7 @@ macro_rules! impl_shuffle1_dyn {
fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
#[cfg(target_arch = "aarch64")]
use crate::arch::aarch64::vtbl1_u8;
- #[cfg(target_arch = "arm")]
+ #[cfg(target_arch = "doesnotexist")]
use crate::arch::arm::vtbl1_u8;
// This is safe because the binary is compiled with
@@ -104,7 +103,7 @@ macro_rules! impl_shuffle1_dyn {
}
}
}
- } else if #[cfg(all(target_arch = "arm", target_feature = "v7",
+ } else if #[cfg(all(target_arch = "doesnotexist", target_feature = "v7",
target_feature = "neon",
any(feature = "core_arch", libcore_neon)))] {
impl Shuffle1Dyn for u8x16 {
@@ -150,16 +149,12 @@ macro_rules! impl_shuffle1_dyn {
#[inline]
fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
let indices: u8x8 = (indices * 2).cast();
- let indices: u8x16 = shuffle!(
- indices, [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7]
- );
- let v = u8x16::new(
- 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
- );
+ let indices: u8x16 = shuffle!(indices, [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7]);
+ let v = u8x16::new(0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1);
let indices = indices + v;
unsafe {
- let s: u8x16 =crate::mem::transmute(self);
- crate::mem::transmute(s.shuffle1_dyn(indices))
+ let s: u8x16 = crate::mem::transmute(self);
+ crate::mem::transmute(s.shuffle1_dyn(indices))
}
}
}
@@ -268,7 +263,9 @@ macro_rules! impl_shuffle1_dyn {
}
}
};
- ($id:ident) => { impl_fallback!($id); }
+ ($id:ident) => {
+ impl_fallback!($id);
+ };
}
impl_shuffle1_dyn!(u8x2);
diff --git a/vendor/packed_simd_2/src/codegen/swap_bytes.rs b/vendor/packed_simd_2/src/codegen/swap_bytes.rs
index b435fb5da..9cf34a3e0 100644
--- a/vendor/packed_simd_2/src/codegen/swap_bytes.rs
+++ b/vendor/packed_simd_2/src/codegen/swap_bytes.rs
@@ -5,7 +5,7 @@
use crate::*;
-crate trait SwapBytes {
+pub(crate) trait SwapBytes {
fn swap_bytes(self) -> Self;
}
@@ -15,7 +15,7 @@ macro_rules! impl_swap_bytes {
impl SwapBytes for $id {
#[inline]
fn swap_bytes(self) -> Self {
- unsafe { shuffle!(self, [1, 0]) }
+ shuffle!(self, [1, 0])
}
}
)+
@@ -119,52 +119,12 @@ macro_rules! impl_swap_bytes {
impl_swap_bytes!(v16: u8x2, i8x2,);
impl_swap_bytes!(v32: u8x4, i8x4, u16x2, i16x2,);
// FIXME: 64-bit single element vector
-impl_swap_bytes!(
- v64: u8x8,
- i8x8,
- u16x4,
- i16x4,
- u32x2,
- i32x2, /* u64x1, i64x1, */
-);
+impl_swap_bytes!(v64: u8x8, i8x8, u16x4, i16x4, u32x2, i32x2 /* u64x1, i64x1, */,);
-impl_swap_bytes!(
- v128: u8x16,
- i8x16,
- u16x8,
- i16x8,
- u32x4,
- i32x4,
- u64x2,
- i64x2,
- u128x1,
- i128x1,
-);
-impl_swap_bytes!(
- v256: u8x32,
- i8x32,
- u16x16,
- i16x16,
- u32x8,
- i32x8,
- u64x4,
- i64x4,
- u128x2,
- i128x2,
-);
+impl_swap_bytes!(v128: u8x16, i8x16, u16x8, i16x8, u32x4, i32x4, u64x2, i64x2, u128x1, i128x1,);
+impl_swap_bytes!(v256: u8x32, i8x32, u16x16, i16x16, u32x8, i32x8, u64x4, i64x4, u128x2, i128x2,);
-impl_swap_bytes!(
- v512: u8x64,
- i8x64,
- u16x32,
- i16x32,
- u32x16,
- i32x16,
- u64x8,
- i64x8,
- u128x4,
- i128x4,
-);
+impl_swap_bytes!(v512: u8x64, i8x64, u16x32, i16x32, u32x16, i32x16, u64x8, i64x8, u128x4, i128x4,);
cfg_if! {
if #[cfg(target_pointer_width = "8")] {
diff --git a/vendor/packed_simd_2/src/codegen/vPtr.rs b/vendor/packed_simd_2/src/codegen/vPtr.rs
index cf4765538..abd3aa877 100644
--- a/vendor/packed_simd_2/src/codegen/vPtr.rs
+++ b/vendor/packed_simd_2/src/codegen/vPtr.rs
@@ -5,7 +5,7 @@ macro_rules! impl_simd_ptr {
| $($tys:ty),*) => {
#[derive(Copy, Clone)]
#[repr(simd)]
- pub struct $tuple_id<$ty>($(crate $tys),*);
+ pub struct $tuple_id<$ty>($(pub(crate) $tys),*);
//^^^^^^^ leaked through SimdArray
impl<$ty> crate::sealed::Seal for [$ptr_ty; $elem_count] {}
diff --git a/vendor/packed_simd_2/src/codegen/vSize.rs b/vendor/packed_simd_2/src/codegen/vSize.rs
index 3911b2134..d5db03991 100644
--- a/vendor/packed_simd_2/src/codegen/vSize.rs
+++ b/vendor/packed_simd_2/src/codegen/vSize.rs
@@ -11,33 +11,6 @@ impl_simd_array!([isize; 4]: isizex4 | isize_, isize_, isize_, isize_);
impl_simd_array!([usize; 4]: usizex4 | usize_, usize_, usize_, usize_);
impl_simd_array!([msize; 4]: msizex4 | isize_, isize_, isize_, isize_);
-impl_simd_array!(
- [isize; 8]: isizex8 | isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_
-);
-impl_simd_array!(
- [usize; 8]: usizex8 | usize_,
- usize_,
- usize_,
- usize_,
- usize_,
- usize_,
- usize_,
- usize_
-);
-impl_simd_array!(
- [msize; 8]: msizex8 | isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_,
- isize_
-);
+impl_simd_array!([isize; 8]: isizex8 | isize_, isize_, isize_, isize_, isize_, isize_, isize_, isize_);
+impl_simd_array!([usize; 8]: usizex8 | usize_, usize_, usize_, usize_, usize_, usize_, usize_, usize_);
+impl_simd_array!([msize; 8]: msizex8 | isize_, isize_, isize_, isize_, isize_, isize_, isize_, isize_);
diff --git a/vendor/packed_simd_2/src/lib.rs b/vendor/packed_simd_2/src/lib.rs
index 4d12c9cd9..277cc818b 100644
--- a/vendor/packed_simd_2/src/lib.rs
+++ b/vendor/packed_simd_2/src/lib.rs
@@ -13,8 +13,8 @@
//! - [Vector types](#vector-types)
//! - [Conditional operations](#conditional-operations)
//! - [Conversions](#conversions)
-//! - [Performance
-//! guide](https://rust-lang-nursery.github.io/packed_simd/perf-guide/)
+//! - [Hardware Features](#hardware-features)
+//! - [Performance guide](https://rust-lang-nursery.github.io/packed_simd/perf-guide/)
//!
//! ## Introduction
//!
@@ -26,7 +26,7 @@
//! are applied to each vector lane in isolation of the others:
//!
//! ```
-//! # use packed_simd::*;
+//! # use packed_simd_2::*;
//! let a = i32x4::new(1, 2, 3, 4);
//! let b = i32x4::new(5, 6, 7, 8);
//! assert_eq!(a + b, i32x4::new(6, 8, 10, 12));
@@ -35,7 +35,7 @@
//! Many "horizontal" operations are also provided:
//!
//! ```
-//! # use packed_simd::*;
+//! # use packed_simd_2::*;
//! # let a = i32x4::new(1, 2, 3, 4);
//! assert_eq!(a.wrapping_sum(), 10);
//! ```
@@ -47,9 +47,9 @@
//! and performing a single horizontal operation at the end:
//!
//! ```
-//! # use packed_simd::*;
+//! # use packed_simd_2::*;
//! fn reduce(x: &[i32]) -> i32 {
-//! assert!(x.len() % 4 == 0);
+//! assert_eq!(x.len() % 4, 0);
//! let mut sum = i32x4::splat(0); // [0, 0, 0, 0]
//! for i in (0..x.len()).step_by(4) {
//! sum += i32x4::from_slice_unaligned(&x[i..]);
@@ -79,7 +79,7 @@
//! ## Basic operations
//!
//! ```
-//! # use packed_simd::*;
+//! # use packed_simd_2::*;
//! // Sets all elements to `0`:
//! let a = i32x4::splat(0);
//!
@@ -107,7 +107,7 @@
//! to be performed:
//!
//! ```
-//! # use packed_simd::*;
+//! # use packed_simd_2::*;
//! let a = i32x4::new(1, 1, 2, 2);
//!
//! // Add `1` to the first two lanes of the vector.
@@ -134,13 +134,13 @@
//! > of lanes as the mask. The example shows this by using [`m16x4`] instead
//! > of [`m32x4`]. It is _typically_ more performant to use a mask element
//! > width equal to the element width of the vectors being operated upon.
-//! > This is, however, not true for 512-bit wide vectors when targetting
+//! > This is, however, not true for 512-bit wide vectors when targeting
//! > AVX-512, where the most efficient masks use only 1-bit per element.
//!
//! All vertical comparison operations returns masks:
//!
//! ```
-//! # use packed_simd::*;
+//! # use packed_simd_2::*;
//! let a = i32x4::new(1, 1, 3, 3);
//! let b = i32x4::new(2, 2, 0, 0);
//!
@@ -168,11 +168,11 @@
//! u8x8 = m8x8::splat(true).into_bits();` is provided because all `m8x8` bit
//! patterns are valid `u8x8` bit patterns. However, the opposite is not
//! true, not all `u8x8` bit patterns are valid `m8x8` bit-patterns, so this
-//! operation cannot be peformed safely using `x.into_bits()`; one needs to
+//! operation cannot be performed safely using `x.into_bits()`; one needs to
//! use `unsafe { crate::mem::transmute(x) }` for that, making sure that the
//! value in the `u8x8` is a valid bit-pattern of `m8x8`.
//!
-//! * **numeric casts** (`as`): are peformed using [`FromCast`]/[`Cast`]
+//! * **numeric casts** (`as`): are performed using [`FromCast`]/[`Cast`]
//! (`x.cast()`), just like `as`:
//!
//! * casting integer vectors whose lane types have the same size (e.g.
@@ -198,26 +198,36 @@
//!
//! Numeric casts are not very "precise": sometimes lossy, sometimes value
//! preserving, etc.
+//!
+//! ## Hardware Features
+//!
+//! This crate can use different hardware features based on your configured
+//! `RUSTFLAGS`. For example, with no configured `RUSTFLAGS`, `u64x8` on
+//! x86_64 will use SSE2 operations like `PCMPEQD`. If you configure
+//! `RUSTFLAGS='-C target-feature=+avx2,+avx'` on supported x86_64 hardware
+//! the same `u64x8` may use wider AVX2 operations like `VPCMPEQQ`. It is
+//! important for performance and for hardware support requirements that
+//! you choose an appropriate set of `target-feature` and `target-cpu`
+//! options during builds. For more information, see the [Performance
+//! guide](https://rust-lang-nursery.github.io/packed_simd/perf-guide/)
#![feature(
+ adt_const_params,
repr_simd,
rustc_attrs,
- const_fn,
platform_intrinsics,
stdsimd,
- aarch64_target_feature,
arm_target_feature,
link_llvm_intrinsics,
core_intrinsics,
stmt_expr_attributes,
- crate_visibility_modifier,
custom_inner_attributes,
- llvm_asm
)]
#![allow(non_camel_case_types, non_snake_case,
// FIXME: these types are unsound in C FFI already
// See https://github.com/rust-lang/rust/issues/53346
improper_ctypes_definitions,
+ incomplete_features,
clippy::cast_possible_truncation,
clippy::cast_lossless,
clippy::cast_possible_wrap,
@@ -228,6 +238,7 @@
// See https://github.com/rust-lang/rust-clippy/issues/3410
clippy::use_self,
clippy::wrong_self_convention,
+ clippy::from_over_into,
)]
#![cfg_attr(test, feature(hashmap_internals))]
#![deny(rust_2018_idioms, clippy::missing_inline_in_public_items)]
@@ -250,9 +261,8 @@ use wasm_bindgen_test::*;
#[allow(unused_imports)]
use core::{
- /* arch (handled above), */ cmp, f32, f64, fmt, hash, hint, i128,
- i16, i32, i64, i8, intrinsics, isize, iter, marker, mem, ops, ptr, slice,
- u128, u16, u32, u64, u8, usize,
+ /* arch (handled above), */ cmp, f32, f64, fmt, hash, hint, i128, i16, i32, i64, i8, intrinsics,
+ isize, iter, marker, mem, ops, ptr, slice, u128, u16, u32, u64, u8, usize,
};
#[macro_use]
@@ -262,14 +272,14 @@ mod api;
mod codegen;
mod sealed;
-pub use crate::sealed::{Simd as SimdVector, Shuffle, SimdArray, Mask};
+pub use crate::sealed::{Mask, Shuffle, Simd as SimdVector, SimdArray};
/// Packed SIMD vector type.
///
/// # Examples
///
/// ```
-/// # use packed_simd::Simd;
+/// # use packed_simd_2::Simd;
/// let v = Simd::<[i32; 4]>::new(0, 1, 2, 3);
/// assert_eq!(v.extract(2), 2);
/// ```
@@ -328,10 +338,10 @@ pub use self::api::into_bits::*;
// Re-export the shuffle intrinsics required by the `shuffle!` macro.
#[doc(hidden)]
pub use self::codegen::llvm::{
- __shuffle_vector16, __shuffle_vector2, __shuffle_vector32,
- __shuffle_vector4, __shuffle_vector64, __shuffle_vector8,
+ __shuffle_vector16, __shuffle_vector2, __shuffle_vector32, __shuffle_vector4, __shuffle_vector64,
+ __shuffle_vector8,
};
-crate mod llvm {
- crate use crate::codegen::llvm::*;
+pub(crate) mod llvm {
+ pub(crate) use crate::codegen::llvm::*;
}
diff --git a/vendor/packed_simd_2/src/masks.rs b/vendor/packed_simd_2/src/masks.rs
index aeb36d232..04534eab2 100644
--- a/vendor/packed_simd_2/src/masks.rs
+++ b/vendor/packed_simd_2/src/masks.rs
@@ -54,9 +54,7 @@ macro_rules! impl_mask_ty {
impl PartialOrd<$id> for $id {
#[inline]
- fn partial_cmp(
- &self, other: &Self,
- ) -> Option<crate::cmp::Ordering> {
+ fn partial_cmp(&self, other: &Self) -> Option<crate::cmp::Ordering> {
use crate::cmp::Ordering;
if self == other {
Some(Ordering::Equal)
@@ -107,9 +105,7 @@ macro_rules! impl_mask_ty {
impl crate::fmt::Debug for $id {
#[inline]
- fn fmt(
- &self, fmtter: &mut crate::fmt::Formatter<'_>,
- ) -> Result<(), crate::fmt::Error> {
+ fn fmt(&self, fmtter: &mut crate::fmt::Formatter<'_>) -> Result<(), crate::fmt::Error> {
write!(fmtter, "{}({})", stringify!($id), self.0 != 0)
}
}
diff --git a/vendor/packed_simd_2/src/testing.rs b/vendor/packed_simd_2/src/testing.rs
index fcbcf9e2a..6320b2805 100644
--- a/vendor/packed_simd_2/src/testing.rs
+++ b/vendor/packed_simd_2/src/testing.rs
@@ -5,4 +5,4 @@ mod macros;
#[cfg(test)]
#[macro_use]
-crate mod utils;
+pub(crate) mod utils;
diff --git a/vendor/packed_simd_2/src/testing/macros.rs b/vendor/packed_simd_2/src/testing/macros.rs
index 6008634c7..7bc4268b9 100644
--- a/vendor/packed_simd_2/src/testing/macros.rs
+++ b/vendor/packed_simd_2/src/testing/macros.rs
@@ -3,26 +3,26 @@
macro_rules! test_if {
($cfg_tt:tt: $it:item) => {
#[cfg(any(
- // Test everything if:
- //
- // * tests are enabled,
- // * no features about exclusively testing
- // specific vector classes are enabled
- all(test, not(any(
- test_v16,
- test_v32,
- test_v64,
- test_v128,
- test_v256,
- test_v512,
- test_none, // disables all tests
- ))),
- // Test if:
- //
- // * tests are enabled
- // * a particular cfg token tree returns true
- all(test, $cfg_tt),
- ))]
+ // Test everything if:
+ //
+ // * tests are enabled,
+ // * no features about exclusively testing
+ // specific vector classes are enabled
+ all(test, not(any(
+ test_v16,
+ test_v32,
+ test_v64,
+ test_v128,
+ test_v256,
+ test_v512,
+ test_none, // disables all tests
+ ))),
+ // Test if:
+ //
+ // * tests are enabled
+ // * a particular cfg token tree returns true
+ all(test, $cfg_tt),
+ ))]
$it
};
}
diff --git a/vendor/packed_simd_2/src/testing/utils.rs b/vendor/packed_simd_2/src/testing/utils.rs
index 21f27aae5..7d8f39573 100644
--- a/vendor/packed_simd_2/src/testing/utils.rs
+++ b/vendor/packed_simd_2/src/testing/utils.rs
@@ -7,16 +7,15 @@
use crate::{cmp::PartialOrd, fmt::Debug, LexicographicallyOrdered};
/// Tests PartialOrd for `a` and `b` where `a < b` is true.
-pub fn test_lt<T>(
- a: LexicographicallyOrdered<T>, b: LexicographicallyOrdered<T>,
-) where
+pub fn test_lt<T>(a: LexicographicallyOrdered<T>, b: LexicographicallyOrdered<T>)
+where
LexicographicallyOrdered<T>: Debug + PartialOrd,
{
assert!(a < b, "{:?}, {:?}", a, b);
assert!(b > a, "{:?}, {:?}", a, b);
assert!(!(a == b), "{:?}, {:?}", a, b);
- assert!(a != b, "{:?}, {:?}", a, b);
+ assert_ne!(a, b, "{:?}, {:?}", a, b);
assert!(a <= b, "{:?}, {:?}", a, b);
assert!(b >= a, "{:?}, {:?}", a, b);
@@ -37,9 +36,8 @@ pub fn test_lt<T>(
}
/// Tests PartialOrd for `a` and `b` where `a <= b` is true.
-pub fn test_le<T>(
- a: LexicographicallyOrdered<T>, b: LexicographicallyOrdered<T>,
-) where
+pub fn test_le<T>(a: LexicographicallyOrdered<T>, b: LexicographicallyOrdered<T>)
+where
LexicographicallyOrdered<T>: Debug + PartialOrd,
{
assert!(a <= b, "{:?}, {:?}", a, b);
@@ -54,14 +52,15 @@ pub fn test_le<T>(
assert!(!(a != b), "{:?}, {:?}", a, b);
} else {
- assert!(a != b, "{:?}, {:?}", a, b);
+ assert_ne!(a, b, "{:?}, {:?}", a, b);
test_lt(a, b);
}
}
/// Test PartialOrd::partial_cmp for `a` and `b` returning `Ordering`
pub fn test_cmp<T>(
- a: LexicographicallyOrdered<T>, b: LexicographicallyOrdered<T>,
+ a: LexicographicallyOrdered<T>,
+ b: LexicographicallyOrdered<T>,
o: Option<crate::cmp::Ordering>,
) where
LexicographicallyOrdered<T>: PartialOrd + Debug,
@@ -72,18 +71,8 @@ pub fn test_cmp<T>(
let mut arr_a: [T::Element; 64] = [Default::default(); 64];
let mut arr_b: [T::Element; 64] = [Default::default(); 64];
- unsafe {
- crate::ptr::write_unaligned(
- arr_a.as_mut_ptr() as *mut LexicographicallyOrdered<T>,
- a,
- )
- }
- unsafe {
- crate::ptr::write_unaligned(
- arr_b.as_mut_ptr() as *mut LexicographicallyOrdered<T>,
- b,
- )
- }
+ unsafe { crate::ptr::write_unaligned(arr_a.as_mut_ptr() as *mut LexicographicallyOrdered<T>, a) }
+ unsafe { crate::ptr::write_unaligned(arr_b.as_mut_ptr() as *mut LexicographicallyOrdered<T>, b) }
let expected = arr_a[0..T::LANES].partial_cmp(&arr_b[0..T::LANES]);
let result = a.partial_cmp(&b);
assert_eq!(expected, result, "{:?}, {:?}", a, b);
@@ -134,8 +123,7 @@ macro_rules! ptr_vals {
// all bits cleared
let clear: <$id as sealed::Simd>::Element = crate::mem::zeroed();
// all bits set
- let set: <$id as sealed::Simd>::Element =
- crate::mem::transmute(-1_isize);
+ let set: <$id as sealed::Simd>::Element = crate::mem::transmute(-1_isize);
(clear, set)
}
};