summaryrefslogtreecommitdiffstats
path: root/vendor/packed_simd_2/src/api
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--vendor/packed_simd_2/src/api.rs4
-rw-r--r--vendor/packed_simd_2/src/api/cast/v128.rs297
-rw-r--r--vendor/packed_simd_2/src/api/cast/v16.rs63
-rw-r--r--vendor/packed_simd_2/src/api/cast/v256.rs297
-rw-r--r--vendor/packed_simd_2/src/api/cast/v32.rs126
-rw-r--r--vendor/packed_simd_2/src/api/cast/v512.rs205
-rw-r--r--vendor/packed_simd_2/src/api/cast/v64.rs201
-rw-r--r--vendor/packed_simd_2/src/api/cmp/partial_eq.rs4
-rw-r--r--vendor/packed_simd_2/src/api/cmp/partial_ord.rs8
-rw-r--r--vendor/packed_simd_2/src/api/fmt/binary.rs4
-rw-r--r--vendor/packed_simd_2/src/api/fmt/debug.rs4
-rw-r--r--vendor/packed_simd_2/src/api/fmt/lower_hex.rs4
-rw-r--r--vendor/packed_simd_2/src/api/fmt/octal.rs4
-rw-r--r--vendor/packed_simd_2/src/api/fmt/upper_hex.rs4
-rw-r--r--vendor/packed_simd_2/src/api/into_bits.rs4
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/arch_specific.rs272
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/macros.rs2
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/v128.rs232
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/v256.rs231
-rw-r--r--vendor/packed_simd_2/src/api/into_bits/v512.rs231
-rw-r--r--vendor/packed_simd_2/src/api/math/float/consts.rs36
-rw-r--r--vendor/packed_simd_2/src/api/ops/scalar_shifts.rs7
-rw-r--r--vendor/packed_simd_2/src/api/ops/vector_rotates.rs2
-rw-r--r--vendor/packed_simd_2/src/api/ops/vector_shifts.rs7
-rw-r--r--vendor/packed_simd_2/src/api/ptr/gather_scatter.rs21
-rw-r--r--vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs8
-rw-r--r--vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs8
-rw-r--r--vendor/packed_simd_2/src/api/reductions/min_max.rs65
-rw-r--r--vendor/packed_simd_2/src/api/select.rs4
-rw-r--r--vendor/packed_simd_2/src/api/shuffle.rs54
-rw-r--r--vendor/packed_simd_2/src/api/slice/from_slice.rs28
-rw-r--r--vendor/packed_simd_2/src/api/slice/write_to_slice.rs35
32 files changed, 2018 insertions, 454 deletions
diff --git a/vendor/packed_simd_2/src/api.rs b/vendor/packed_simd_2/src/api.rs
index 953685925..262fc4ee6 100644
--- a/vendor/packed_simd_2/src/api.rs
+++ b/vendor/packed_simd_2/src/api.rs
@@ -2,7 +2,7 @@
#[macro_use]
mod bitmask;
-crate mod cast;
+pub(crate) mod cast;
#[macro_use]
mod cmp;
#[macro_use]
@@ -37,7 +37,7 @@ mod swap_bytes;
mod bit_manip;
#[cfg(feature = "into_bits")]
-crate mod into_bits;
+pub(crate) mod into_bits;
macro_rules! impl_i {
([$elem_ty:ident; $elem_n:expr]: $tuple_id:ident, $mask_ty:ident
diff --git a/vendor/packed_simd_2/src/api/cast/v128.rs b/vendor/packed_simd_2/src/api/cast/v128.rs
index ab47ddc00..2e10b97b7 100644
--- a/vendor/packed_simd_2/src/api/cast/v128.rs
+++ b/vendor/packed_simd_2/src/api/cast/v128.rs
@@ -3,74 +3,297 @@
use crate::*;
-impl_from_cast!(
- i8x16[test_v128]: u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16
-);
-impl_from_cast!(
- u8x16[test_v128]: i8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16
-);
-impl_from_cast_mask!(
- m8x16[test_v128]: i8x16, u8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16
-);
+impl_from_cast!(i8x16[test_v128]: u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
+impl_from_cast!(u8x16[test_v128]: i8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
+impl_from_cast_mask!(m8x16[test_v128]: i8x16, u8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
impl_from_cast!(
- i16x8[test_v128]: i8x8, u8x8, m8x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ i16x8[test_v128]: i8x8,
+ u8x8,
+ m8x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- u16x8[test_v128]: i8x8, u8x8, m8x8, i16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ u16x8[test_v128]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast_mask!(
- m16x8[test_v128]: i8x8, u8x8, m8x8, i16x8, u16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ m16x8[test_v128]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- i32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i32x4[test_v128]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ u32x4[test_v128]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- f32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ f32x4[test_v128]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ m32x4[test_v128]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- i64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i64x2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ u64x2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- f64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ f64x2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ m64x2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- isizex2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, usizex2, msizex2
+ isizex2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- usizex2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, msizex2
+ usizex2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ msizex2
);
impl_from_cast_mask!(
- msizex2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2
+ msizex2[test_v128]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2
);
// FIXME[test_v128]: 64-bit single element vectors into_cast impls
diff --git a/vendor/packed_simd_2/src/api/cast/v16.rs b/vendor/packed_simd_2/src/api/cast/v16.rs
index cf974bb08..896febacb 100644
--- a/vendor/packed_simd_2/src/api/cast/v16.rs
+++ b/vendor/packed_simd_2/src/api/cast/v16.rs
@@ -4,14 +4,65 @@
use crate::*;
impl_from_cast!(
- i8x2[test_v16]: u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i8x2[test_v16]: u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u8x2[test_v16]: i8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ u8x2[test_v16]: i8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m8x2[test_v16]: i8x2, u8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ m8x2[test_v16]: i8x2,
+ u8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
diff --git a/vendor/packed_simd_2/src/api/cast/v256.rs b/vendor/packed_simd_2/src/api/cast/v256.rs
index 9389dcb4c..fe0c835e3 100644
--- a/vendor/packed_simd_2/src/api/cast/v256.rs
+++ b/vendor/packed_simd_2/src/api/cast/v256.rs
@@ -7,75 +7,292 @@ impl_from_cast!(i8x32[test_v256]: u8x32, m8x32, i16x32, u16x32, m16x32);
impl_from_cast!(u8x32[test_v256]: i8x32, m8x32, i16x32, u16x32, m16x32);
impl_from_cast_mask!(m8x32[test_v256]: i8x32, u8x32, i16x32, u16x32, m16x32);
-impl_from_cast!(
- i16x16[test_v256]: i8x16, u8x16, m8x16, u16x16, m16x16,
- i32x16, u32x16, f32x16, m32x16
-);
-impl_from_cast!(
- u16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, m16x16,
- i32x16, u32x16, f32x16, m32x16
-);
-impl_from_cast_mask!(
- m16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, u16x16,
- i32x16, u32x16, f32x16, m32x16
-);
+impl_from_cast!(i16x16[test_v256]: i8x16, u8x16, m8x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
+impl_from_cast!(u16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, m16x16, i32x16, u32x16, f32x16, m32x16);
+impl_from_cast_mask!(m16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, u16x16, i32x16, u32x16, f32x16, m32x16);
impl_from_cast!(
- i32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ i32x8[test_v256]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- u32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ u32x8[test_v256]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- f32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ f32x8[test_v256]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast_mask!(
- m32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ m32x8[test_v256]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- i64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i64x4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ u64x4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- f64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ f64x4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ m64x4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- i128x2[test_v256]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i128x2[test_v256]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u128x2[test_v256]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, m128x2, isizex2, usizex2, msizex2
+ u128x2[test_v256]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m128x2[test_v256]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, m64x2, f64x2, i128x2, u128x2, isizex2, usizex2, msizex2
+ m128x2[test_v256]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ m64x2,
+ f64x2,
+ i128x2,
+ u128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- isizex4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, usizex4, msizex4
+ isizex4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- usizex4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, msizex4
+ usizex4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ msizex4
);
impl_from_cast_mask!(
- msizex4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4
+ msizex4[test_v256]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4
);
diff --git a/vendor/packed_simd_2/src/api/cast/v32.rs b/vendor/packed_simd_2/src/api/cast/v32.rs
index 2b254ba0c..4ad1cbf74 100644
--- a/vendor/packed_simd_2/src/api/cast/v32.rs
+++ b/vendor/packed_simd_2/src/api/cast/v32.rs
@@ -4,27 +4,129 @@
use crate::*;
impl_from_cast!(
- i8x4[test_v32]: u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i8x4[test_v32]: u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u8x4[test_v32]: i8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ u8x4[test_v32]: i8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m8x4[test_v32]: i8x4, u8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ m8x4[test_v32]: i8x4,
+ u8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- i16x2[test_v32]: i8x2, u8x2, m8x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i16x2[test_v32]: i8x2,
+ u8x2,
+ m8x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u16x2[test_v32]: i8x2, u8x2, m8x2, i16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ u16x2[test_v32]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m16x2[test_v32]: i8x2, u8x2, m8x2, i16x2, u16x2, i32x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ m16x2[test_v32]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
diff --git a/vendor/packed_simd_2/src/api/cast/v512.rs b/vendor/packed_simd_2/src/api/cast/v512.rs
index 5a10ab066..b64605045 100644
--- a/vendor/packed_simd_2/src/api/cast/v512.rs
+++ b/vendor/packed_simd_2/src/api/cast/v512.rs
@@ -11,58 +11,199 @@ impl_from_cast!(i16x32[test_v512]: i8x32, u8x32, m8x32, u16x32, m16x32);
impl_from_cast!(u16x32[test_v512]: i8x32, u8x32, m8x32, i16x32, m16x32);
impl_from_cast_mask!(m16x32[test_v512]: i8x32, u8x32, m8x32, i16x32, u16x32);
-impl_from_cast!(
- i32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, u32x16, f32x16, m32x16
-);
-impl_from_cast!(
- u32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, f32x16, m32x16
-);
-impl_from_cast!(
- f32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, m32x16
-);
-impl_from_cast_mask!(
- m32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16
-);
+impl_from_cast!(i32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, u32x16, f32x16, m32x16);
+impl_from_cast!(u32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, f32x16, m32x16);
+impl_from_cast!(f32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, m32x16);
+impl_from_cast_mask!(m32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16);
impl_from_cast!(
- i64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ i64x8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- u64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ u64x8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- f64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, m64x8, isizex8, usizex8, msizex8
+ f64x8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast_mask!(
- m64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, isizex8, usizex8, msizex8
+ m64x8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- i128x4[test_v512]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i128x4[test_v512]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u128x4[test_v512]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, m128x4, isizex4, usizex4, msizex4
+ u128x4[test_v512]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m128x4[test_v512]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, m64x4, f64x4, i128x4, u128x4, isizex4, usizex4, msizex4
+ m128x4[test_v512]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ m64x4,
+ f64x4,
+ i128x4,
+ u128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- isizex8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, usizex8, msizex8
+ isizex8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- usizex8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, msizex8
+ usizex8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ msizex8
);
impl_from_cast_mask!(
- msizex8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8
+ msizex8[test_v512]: i8x8,
+ u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8
);
diff --git a/vendor/packed_simd_2/src/api/cast/v64.rs b/vendor/packed_simd_2/src/api/cast/v64.rs
index 192a4638a..b23d1a491 100644
--- a/vendor/packed_simd_2/src/api/cast/v64.rs
+++ b/vendor/packed_simd_2/src/api/cast/v64.rs
@@ -4,44 +4,205 @@
use crate::*;
impl_from_cast!(
- i8x8[test_v64]: u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ i8x8[test_v64]: u8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- u8x8[test_v64]: i8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ u8x8[test_v64]: i8x8,
+ m8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast_mask!(
- m8x8[test_v64]: i8x8, u8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
- i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+ m8x8[test_v64]: i8x8,
+ u8x8,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ isizex8,
+ usizex8,
+ msizex8
);
impl_from_cast!(
- i16x4[test_v64]: i8x4, u8x4, m8x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ i16x4[test_v64]: i8x4,
+ u8x4,
+ m8x4,
+ u16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- u16x4[test_v64]: i8x4, u8x4, m8x4, i16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ u16x4[test_v64]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ m16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast_mask!(
- m16x4[test_v64]: i8x4, u8x4, m8x4, i16x4, u16x4, i32x4, u32x4, f32x4, m32x4,
- i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+ m16x4[test_v64]: i8x4,
+ u8x4,
+ m8x4,
+ i16x4,
+ u16x4,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x4,
+ u128x4,
+ m128x4,
+ isizex4,
+ usizex4,
+ msizex4
);
impl_from_cast!(
- i32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, u32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ i32x2[test_v64]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ u32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- u32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, f32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ u32x2[test_v64]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ f32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast!(
- f32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, m32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ f32x2[test_v64]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ m32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
impl_from_cast_mask!(
- m32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2,
- i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+ m32x2[test_v64]: i8x2,
+ u8x2,
+ m8x2,
+ i16x2,
+ u16x2,
+ m16x2,
+ i32x2,
+ u32x2,
+ f32x2,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x2,
+ u128x2,
+ m128x2,
+ isizex2,
+ usizex2,
+ msizex2
);
diff --git a/vendor/packed_simd_2/src/api/cmp/partial_eq.rs b/vendor/packed_simd_2/src/api/cmp/partial_eq.rs
index 1712a0de5..d69dd4742 100644
--- a/vendor/packed_simd_2/src/api/cmp/partial_eq.rs
+++ b/vendor/packed_simd_2/src/api/cmp/partial_eq.rs
@@ -21,9 +21,7 @@ macro_rules! impl_cmp_partial_eq {
// FIXME: https://github.com/rust-lang-nursery/rust-clippy/issues/2892
#[allow(clippy::partialeq_ne_impl)]
- impl crate::cmp::PartialEq<LexicographicallyOrdered<$id>>
- for LexicographicallyOrdered<$id>
- {
+ impl crate::cmp::PartialEq<LexicographicallyOrdered<$id>> for LexicographicallyOrdered<$id> {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
diff --git a/vendor/packed_simd_2/src/api/cmp/partial_ord.rs b/vendor/packed_simd_2/src/api/cmp/partial_ord.rs
index a2292918b..76ed9ebe4 100644
--- a/vendor/packed_simd_2/src/api/cmp/partial_ord.rs
+++ b/vendor/packed_simd_2/src/api/cmp/partial_ord.rs
@@ -12,13 +12,9 @@ macro_rules! impl_cmp_partial_ord {
}
}
- impl crate::cmp::PartialOrd<LexicographicallyOrdered<$id>>
- for LexicographicallyOrdered<$id>
- {
+ impl crate::cmp::PartialOrd<LexicographicallyOrdered<$id>> for LexicographicallyOrdered<$id> {
#[inline]
- fn partial_cmp(
- &self, other: &Self,
- ) -> Option<crate::cmp::Ordering> {
+ fn partial_cmp(&self, other: &Self) -> Option<crate::cmp::Ordering> {
if PartialEq::eq(self, other) {
Some(crate::cmp::Ordering::Equal)
} else if PartialOrd::lt(self, other) {
diff --git a/vendor/packed_simd_2/src/api/fmt/binary.rs b/vendor/packed_simd_2/src/api/fmt/binary.rs
index b60769082..91c082555 100644
--- a/vendor/packed_simd_2/src/api/fmt/binary.rs
+++ b/vendor/packed_simd_2/src/api/fmt/binary.rs
@@ -4,9 +4,7 @@ macro_rules! impl_fmt_binary {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::Binary for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/fmt/debug.rs b/vendor/packed_simd_2/src/api/fmt/debug.rs
index ad0b8a59a..1e209b3bf 100644
--- a/vendor/packed_simd_2/src/api/fmt/debug.rs
+++ b/vendor/packed_simd_2/src/api/fmt/debug.rs
@@ -44,9 +44,7 @@ macro_rules! impl_fmt_debug {
([$elem_ty:ty; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::Debug for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/fmt/lower_hex.rs b/vendor/packed_simd_2/src/api/fmt/lower_hex.rs
index 5a7aa14b5..8f11d3119 100644
--- a/vendor/packed_simd_2/src/api/fmt/lower_hex.rs
+++ b/vendor/packed_simd_2/src/api/fmt/lower_hex.rs
@@ -4,9 +4,7 @@ macro_rules! impl_fmt_lower_hex {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::LowerHex for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/fmt/octal.rs b/vendor/packed_simd_2/src/api/fmt/octal.rs
index 83ac8abc7..e708e094c 100644
--- a/vendor/packed_simd_2/src/api/fmt/octal.rs
+++ b/vendor/packed_simd_2/src/api/fmt/octal.rs
@@ -4,9 +4,7 @@ macro_rules! impl_fmt_octal {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::Octal for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/fmt/upper_hex.rs b/vendor/packed_simd_2/src/api/fmt/upper_hex.rs
index aa88f673a..5ad455706 100644
--- a/vendor/packed_simd_2/src/api/fmt/upper_hex.rs
+++ b/vendor/packed_simd_2/src/api/fmt/upper_hex.rs
@@ -4,9 +4,7 @@ macro_rules! impl_fmt_upper_hex {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl crate::fmt::UpperHex for $id {
#[allow(clippy::missing_inline_in_public_items)]
- fn fmt(
- &self, f: &mut crate::fmt::Formatter<'_>,
- ) -> crate::fmt::Result {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
write!(f, "{}(", stringify!($id))?;
for i in 0..$elem_count {
if i > 0 {
diff --git a/vendor/packed_simd_2/src/api/into_bits.rs b/vendor/packed_simd_2/src/api/into_bits.rs
index f2cc1bae5..32b6d2ddc 100644
--- a/vendor/packed_simd_2/src/api/into_bits.rs
+++ b/vendor/packed_simd_2/src/api/into_bits.rs
@@ -19,9 +19,7 @@ where
{
#[inline]
fn into_bits(self) -> U {
- debug_assert!(
- crate::mem::size_of::<Self>() == crate::mem::size_of::<U>()
- );
+ debug_assert!(crate::mem::size_of::<Self>() == crate::mem::size_of::<U>());
U::from_bits(self)
}
}
diff --git a/vendor/packed_simd_2/src/api/into_bits/arch_specific.rs b/vendor/packed_simd_2/src/api/into_bits/arch_specific.rs
index fee614005..bfac91557 100644
--- a/vendor/packed_simd_2/src/api/into_bits/arch_specific.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/arch_specific.rs
@@ -84,15 +84,48 @@ macro_rules! impl_arch {
// FIXME: 64-bit single element types
// FIXME: arm/aarch float16x4_t missing
impl_arch!(
- [arm["arm"]: int8x8_t, uint8x8_t, poly8x8_t, int16x4_t, uint16x4_t,
- poly16x4_t, int32x2_t, uint32x2_t, float32x2_t, int64x1_t,
- uint64x1_t],
- [aarch64["aarch64"]: int8x8_t, uint8x8_t, poly8x8_t, int16x4_t, uint16x4_t,
- poly16x4_t, int32x2_t, uint32x2_t, float32x2_t, int64x1_t, uint64x1_t,
- float64x1_t] |
- from: i8x8, u8x8, m8x8, i16x4, u16x4, m16x4, i32x2, u32x2, f32x2, m32x2 |
- into: i8x8, u8x8, i16x4, u16x4, i32x2, u32x2, f32x2 |
- test: test_v64
+ [
+ arm["arm"]: int8x8_t,
+ uint8x8_t,
+ poly8x8_t,
+ int16x4_t,
+ uint16x4_t,
+ poly16x4_t,
+ int32x2_t,
+ uint32x2_t,
+ float32x2_t,
+ int64x1_t,
+ uint64x1_t
+ ],
+ [
+ aarch64["aarch64"]: int8x8_t,
+ uint8x8_t,
+ poly8x8_t,
+ int16x4_t,
+ uint16x4_t,
+ poly16x4_t,
+ int32x2_t,
+ uint32x2_t,
+ float32x2_t,
+ int64x1_t,
+ uint64x1_t,
+ float64x1_t
+ ] | from: i8x8,
+ u8x8,
+ m8x8,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2 | into: i8x8,
+ u8x8,
+ i16x4,
+ u16x4,
+ i32x2,
+ u32x2,
+ f32x2 | test: test_v64
);
////////////////////////////////////////////////////////////////////////////////
@@ -108,67 +141,169 @@ impl_arch!(
// FIXME: ppc64 vector_unsigned___int128 missing
impl_arch!(
[x86["x86"]: __m128, __m128i, __m128d],
- [x86_64["x86_64"]: __m128, __m128i, __m128d],
- [arm["arm"]: int8x16_t, uint8x16_t, poly8x16_t, int16x8_t, uint16x8_t,
- poly16x8_t, int32x4_t, uint32x4_t, float32x4_t, int64x2_t, uint64x2_t],
- [aarch64["aarch64"]: int8x16_t, uint8x16_t, poly8x16_t, int16x8_t,
- uint16x8_t, poly16x8_t, int32x4_t, uint32x4_t, float32x4_t, int64x2_t,
- uint64x2_t, float64x2_t],
- [powerpc["powerpc"]: vector_signed_char, vector_unsigned_char,
- vector_signed_short, vector_unsigned_short, vector_signed_int,
- vector_unsigned_int, vector_float],
- [powerpc64["powerpc64"]: vector_signed_char, vector_unsigned_char,
- vector_signed_short, vector_unsigned_short, vector_signed_int,
- vector_unsigned_int, vector_float, vector_signed_long,
- vector_unsigned_long, vector_double] |
- from: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4,
- i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4, i64x2, u64x2, f64x2,
- i128x1, u128x1 |
- test: test_v128
+ [x86_64["x86_64"]: __m128, __m128i, __m128d],
+ [
+ arm["arm"]: int8x16_t,
+ uint8x16_t,
+ poly8x16_t,
+ int16x8_t,
+ uint16x8_t,
+ poly16x8_t,
+ int32x4_t,
+ uint32x4_t,
+ float32x4_t,
+ int64x2_t,
+ uint64x2_t
+ ],
+ [
+ aarch64["aarch64"]: int8x16_t,
+ uint8x16_t,
+ poly8x16_t,
+ int16x8_t,
+ uint16x8_t,
+ poly16x8_t,
+ int32x4_t,
+ uint32x4_t,
+ float32x4_t,
+ int64x2_t,
+ uint64x2_t,
+ float64x2_t
+ ],
+ [
+ powerpc["powerpc"]: vector_signed_char,
+ vector_unsigned_char,
+ vector_signed_short,
+ vector_unsigned_short,
+ vector_signed_int,
+ vector_unsigned_int,
+ vector_float
+ ],
+ [
+ powerpc64["powerpc64"]: vector_signed_char,
+ vector_unsigned_char,
+ vector_signed_short,
+ vector_unsigned_short,
+ vector_signed_int,
+ vector_unsigned_int,
+ vector_float,
+ vector_signed_long,
+ vector_unsigned_long,
+ vector_double
+ ] | from: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1 | test: test_v128
);
impl_arch!(
[powerpc["powerpc"]: vector_bool_char],
- [powerpc64["powerpc64"]: vector_bool_char] |
- from: m8x16, m16x8, m32x4, m64x2, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
- i64x2, u64x2, f64x2, i128x1, u128x1,
+ [powerpc64["powerpc64"]: vector_bool_char] | from: m8x16,
+ m16x8,
+ m32x4,
+ m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
// Masks:
- m8x16 |
- test: test_v128
+ m8x16 | test: test_v128
);
impl_arch!(
[powerpc["powerpc"]: vector_bool_short],
- [powerpc64["powerpc64"]: vector_bool_short] |
- from: m16x8, m32x4, m64x2, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
- i64x2, u64x2, f64x2, i128x1, u128x1,
+ [powerpc64["powerpc64"]: vector_bool_short] | from: m16x8,
+ m32x4,
+ m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
// Masks:
- m8x16, m16x8 |
- test: test_v128
+ m8x16,
+ m16x8 | test: test_v128
);
impl_arch!(
[powerpc["powerpc"]: vector_bool_int],
- [powerpc64["powerpc64"]: vector_bool_int] |
- from: m32x4, m64x2, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
- i64x2, u64x2, f64x2, i128x1, u128x1,
+ [powerpc64["powerpc64"]: vector_bool_int] | from: m32x4,
+ m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
// Masks:
- m8x16, m16x8, m32x4 |
- test: test_v128
+ m8x16,
+ m16x8,
+ m32x4 | test: test_v128
);
impl_arch!(
- [powerpc64["powerpc64"]: vector_bool_long] |
- from: m64x2, m128x1 |
- into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
- i64x2, u64x2, f64x2, i128x1, u128x1,
+ [powerpc64["powerpc64"]: vector_bool_long] | from: m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
// Masks:
- m8x16, m16x8, m32x4, m64x2 |
- test: test_v128
+ m8x16,
+ m16x8,
+ m32x4,
+ m64x2 | test: test_v128
);
////////////////////////////////////////////////////////////////////////////////
@@ -176,13 +311,34 @@ impl_arch!(
impl_arch!(
[x86["x86"]: __m256, __m256i, __m256d],
- [x86_64["x86_64"]: __m256, __m256i, __m256d] |
- from: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16,
- i32x8, u32x8, f32x8, m32x8,
- i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2 |
- into: i8x32, u8x32, i16x16, u16x16, i32x8, u32x8, f32x8,
- i64x4, u64x4, f64x4, i128x2, u128x2 |
- test: test_v256
+ [x86_64["x86_64"]: __m256, __m256i, __m256d] | from: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2 | into: i8x32,
+ u8x32,
+ i16x16,
+ u16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ i128x2,
+ u128x2 | test: test_v256
);
////////////////////////////////////////////////////////////////////////////////
diff --git a/vendor/packed_simd_2/src/api/into_bits/macros.rs b/vendor/packed_simd_2/src/api/into_bits/macros.rs
index 8cec5b004..265ab34ae 100644
--- a/vendor/packed_simd_2/src/api/into_bits/macros.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/macros.rs
@@ -24,7 +24,7 @@ macro_rules! impl_from_bits_ {
use crate::IntoBits;
assert_eq!(size_of::<$id>(),
size_of::<$from_ty>());
- // This is safe becasue we never create a reference to
+ // This is safe because we never create a reference to
// uninitialized memory:
let a: $from_ty = unsafe { zeroed() };
diff --git a/vendor/packed_simd_2/src/api/into_bits/v128.rs b/vendor/packed_simd_2/src/api/into_bits/v128.rs
index e32cd7f9f..639c09c2c 100644
--- a/vendor/packed_simd_2/src/api/into_bits/v128.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/v128.rs
@@ -4,25 +4,229 @@
#[allow(unused)] // wasm_bindgen_test
use crate::*;
-impl_from_bits!(i8x16[test_v128]: u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(u8x16[test_v128]: i8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(
+ i8x16[test_v128]: u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u8x16[test_v128]: i8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
impl_from_bits!(m8x16[test_v128]: m16x8, m32x4, m64x2, m128x1);
-impl_from_bits!(i16x8[test_v128]: i8x16, u8x16, m8x16, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(u16x8[test_v128]: i8x16, u8x16, m8x16, i16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(
+ i16x8[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u16x8[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
impl_from_bits!(m16x8[test_v128]: m32x4, m64x2, m128x1);
-impl_from_bits!(i32x4[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(u32x4[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(f32x4[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(
+ i32x4[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u32x4[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ f32x4[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
impl_from_bits!(m32x4[test_v128]: m64x2, m128x1);
-impl_from_bits!(i64x2[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(u64x2[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
-impl_from_bits!(f64x2[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(
+ i64x2[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u64x2[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ f64x2[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
impl_from_bits!(m64x2[test_v128]: m128x1);
-impl_from_bits!(i128x1[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, u128x1, m128x1);
-impl_from_bits!(u128x1[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, m128x1);
-// note: m128x1 cannot be constructed from all the other masks bit patterns in here
-
+impl_from_bits!(
+ i128x1[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u128x1[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ m128x1
+);
+// note: m128x1 cannot be constructed from all the other masks bit patterns in
+// here
diff --git a/vendor/packed_simd_2/src/api/into_bits/v256.rs b/vendor/packed_simd_2/src/api/into_bits/v256.rs
index c4c373e0d..e432bbbc9 100644
--- a/vendor/packed_simd_2/src/api/into_bits/v256.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/v256.rs
@@ -4,24 +4,229 @@
#[allow(unused)] // wasm_bindgen_test
use crate::*;
-impl_from_bits!(i8x32[test_v256]: u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(u8x32[test_v256]: i8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(
+ i8x32[test_v256]: u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u8x32[test_v256]: i8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
impl_from_bits!(m8x32[test_v256]: m16x16, m32x8, m64x4, m128x2);
-impl_from_bits!(i16x16[test_v256]: i8x32, u8x32, m8x32, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(u16x16[test_v256]: i8x32, u8x32, m8x32, i16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(
+ i16x16[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u16x16[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
impl_from_bits!(m16x16[test_v256]: m32x8, m64x4, m128x2);
-impl_from_bits!(i32x8[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(u32x8[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(f32x8[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(
+ i32x8[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u32x8[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ f32x8[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
impl_from_bits!(m32x8[test_v256]: m64x4, m128x2);
-impl_from_bits!(i64x4[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(u64x4[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
-impl_from_bits!(f64x4[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(
+ i64x4[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u64x4[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ f64x4[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
impl_from_bits!(m64x4[test_v256]: m128x2);
-impl_from_bits!(i128x2[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, u128x2, m128x2);
-impl_from_bits!(u128x2[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, m128x2);
-// note: m128x2 cannot be constructed from all the other masks bit patterns in here
+impl_from_bits!(
+ i128x2[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u128x2[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ m128x2
+);
+// note: m128x2 cannot be constructed from all the other masks bit patterns in
+// here
diff --git a/vendor/packed_simd_2/src/api/into_bits/v512.rs b/vendor/packed_simd_2/src/api/into_bits/v512.rs
index 4a771962c..f6e9bb8bf 100644
--- a/vendor/packed_simd_2/src/api/into_bits/v512.rs
+++ b/vendor/packed_simd_2/src/api/into_bits/v512.rs
@@ -4,24 +4,229 @@
#[allow(unused)] // wasm_bindgen_test
use crate::*;
-impl_from_bits!(i8x64[test_v512]: u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(u8x64[test_v512]: i8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(
+ i8x64[test_v512]: u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u8x64[test_v512]: i8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
impl_from_bits!(m8x64[test_v512]: m16x32, m32x16, m64x8, m128x4);
-impl_from_bits!(i16x32[test_v512]: i8x64, u8x64, m8x64, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(u16x32[test_v512]: i8x64, u8x64, m8x64, i16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(
+ i16x32[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u16x32[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
impl_from_bits!(m16x32[test_v512]: m32x16, m64x8, m128x4);
-impl_from_bits!(i32x16[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(u32x16[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(f32x16[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(
+ i32x16[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u32x16[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ f32x16[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
impl_from_bits!(m32x16[test_v512]: m64x8, m128x4);
-impl_from_bits!(i64x8[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(u64x8[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
-impl_from_bits!(f64x8[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(
+ i64x8[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u64x8[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ f64x8[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
impl_from_bits!(m64x8[test_v512]: m128x4);
-impl_from_bits!(i128x4[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, u128x4, m128x4);
-impl_from_bits!(u128x4[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, m128x4);
-// note: m128x4 cannot be constructed from all the other masks bit patterns in here
+impl_from_bits!(
+ i128x4[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u128x4[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ m128x4
+);
+// note: m128x4 cannot be constructed from all the other masks bit patterns in
+// here
diff --git a/vendor/packed_simd_2/src/api/math/float/consts.rs b/vendor/packed_simd_2/src/api/math/float/consts.rs
index 89f93a6d6..7f41acbf1 100644
--- a/vendor/packed_simd_2/src/api/math/float/consts.rs
+++ b/vendor/packed_simd_2/src/api/math/float/consts.rs
@@ -8,8 +8,7 @@ macro_rules! impl_float_consts {
pub const MIN: $id = $id::splat(core::$elem_ty::MIN);
/// Smallest positive normal value.
- pub const MIN_POSITIVE: $id =
- $id::splat(core::$elem_ty::MIN_POSITIVE);
+ pub const MIN_POSITIVE: $id = $id::splat(core::$elem_ty::MIN_POSITIVE);
/// Largest finite value.
pub const MAX: $id = $id::splat(core::$elem_ty::MAX);
@@ -21,50 +20,40 @@ macro_rules! impl_float_consts {
pub const INFINITY: $id = $id::splat(core::$elem_ty::INFINITY);
/// Negative infinity (-∞).
- pub const NEG_INFINITY: $id =
- $id::splat(core::$elem_ty::NEG_INFINITY);
+ pub const NEG_INFINITY: $id = $id::splat(core::$elem_ty::NEG_INFINITY);
/// Archimedes' constant (π)
pub const PI: $id = $id::splat(core::$elem_ty::consts::PI);
/// π/2
- pub const FRAC_PI_2: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_2);
+ pub const FRAC_PI_2: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_2);
/// π/3
- pub const FRAC_PI_3: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_3);
+ pub const FRAC_PI_3: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_3);
/// π/4
- pub const FRAC_PI_4: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_4);
+ pub const FRAC_PI_4: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_4);
/// π/6
- pub const FRAC_PI_6: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_6);
+ pub const FRAC_PI_6: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_6);
/// π/8
- pub const FRAC_PI_8: $id =
- $id::splat(core::$elem_ty::consts::FRAC_PI_8);
+ pub const FRAC_PI_8: $id = $id::splat(core::$elem_ty::consts::FRAC_PI_8);
/// 1/π
- pub const FRAC_1_PI: $id =
- $id::splat(core::$elem_ty::consts::FRAC_1_PI);
+ pub const FRAC_1_PI: $id = $id::splat(core::$elem_ty::consts::FRAC_1_PI);
/// 2/π
- pub const FRAC_2_PI: $id =
- $id::splat(core::$elem_ty::consts::FRAC_2_PI);
+ pub const FRAC_2_PI: $id = $id::splat(core::$elem_ty::consts::FRAC_2_PI);
/// 2/sqrt(π)
- pub const FRAC_2_SQRT_PI: $id =
- $id::splat(core::$elem_ty::consts::FRAC_2_SQRT_PI);
+ pub const FRAC_2_SQRT_PI: $id = $id::splat(core::$elem_ty::consts::FRAC_2_SQRT_PI);
/// sqrt(2)
pub const SQRT_2: $id = $id::splat(core::$elem_ty::consts::SQRT_2);
/// 1/sqrt(2)
- pub const FRAC_1_SQRT_2: $id =
- $id::splat(core::$elem_ty::consts::FRAC_1_SQRT_2);
+ pub const FRAC_1_SQRT_2: $id = $id::splat(core::$elem_ty::consts::FRAC_1_SQRT_2);
/// Euler's number (e)
pub const E: $id = $id::splat(core::$elem_ty::consts::E);
@@ -73,8 +62,7 @@ macro_rules! impl_float_consts {
pub const LOG2_E: $id = $id::splat(core::$elem_ty::consts::LOG2_E);
/// log<sub>10</sub>(e)
- pub const LOG10_E: $id =
- $id::splat(core::$elem_ty::consts::LOG10_E);
+ pub const LOG10_E: $id = $id::splat(core::$elem_ty::consts::LOG10_E);
/// ln(2)
pub const LN_2: $id = $id::splat(core::$elem_ty::consts::LN_2);
diff --git a/vendor/packed_simd_2/src/api/ops/scalar_shifts.rs b/vendor/packed_simd_2/src/api/ops/scalar_shifts.rs
index 9c164ad56..4a7a09626 100644
--- a/vendor/packed_simd_2/src/api/ops/scalar_shifts.rs
+++ b/vendor/packed_simd_2/src/api/ops/scalar_shifts.rs
@@ -36,11 +36,10 @@ macro_rules! impl_ops_scalar_shifts {
use super::*;
#[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
#[cfg_attr(any(target_arch = "s390x", target_arch = "sparc64"),
- allow(unreachable_code,
- unused_variables,
- unused_mut)
+ allow(unreachable_code, unused_variables)
)]
- // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
+ #[cfg(not(target_arch = "aarch64"))]
+ //~^ FIXME: https://github.com/rust-lang/packed_simd/issues/317
fn ops_scalar_shifts() {
let z = $id::splat(0 as $elem_ty);
let o = $id::splat(1 as $elem_ty);
diff --git a/vendor/packed_simd_2/src/api/ops/vector_rotates.rs b/vendor/packed_simd_2/src/api/ops/vector_rotates.rs
index 6c794ecf4..147fc2e37 100644
--- a/vendor/packed_simd_2/src/api/ops/vector_rotates.rs
+++ b/vendor/packed_simd_2/src/api/ops/vector_rotates.rs
@@ -47,6 +47,8 @@ macro_rules! impl_ops_vector_rotates {
pub mod [<$id _ops_vector_rotate>] {
use super::*;
#[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+ #[cfg(not(target_arch = "aarch64"))]
+ //~^ FIXME: https://github.com/rust-lang/packed_simd/issues/317
fn rotate_ops() {
let z = $id::splat(0 as $elem_ty);
let o = $id::splat(1 as $elem_ty);
diff --git a/vendor/packed_simd_2/src/api/ops/vector_shifts.rs b/vendor/packed_simd_2/src/api/ops/vector_shifts.rs
index 22e1fbc0e..8bb5ac2fc 100644
--- a/vendor/packed_simd_2/src/api/ops/vector_shifts.rs
+++ b/vendor/packed_simd_2/src/api/ops/vector_shifts.rs
@@ -37,11 +37,10 @@ macro_rules! impl_ops_vector_shifts {
use super::*;
#[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
#[cfg_attr(any(target_arch = "s390x", target_arch = "sparc64"),
- allow(unreachable_code,
- unused_variables,
- unused_mut)
+ allow(unreachable_code, unused_variables)
)]
- // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
+ #[cfg(not(target_arch = "aarch64"))]
+ //~^ FIXME: https://github.com/rust-lang/packed_simd/issues/317
fn ops_vector_shifts() {
let z = $id::splat(0 as $elem_ty);
let o = $id::splat(1 as $elem_ty);
diff --git a/vendor/packed_simd_2/src/api/ptr/gather_scatter.rs b/vendor/packed_simd_2/src/api/ptr/gather_scatter.rs
index 430435620..374482ac3 100644
--- a/vendor/packed_simd_2/src/api/ptr/gather_scatter.rs
+++ b/vendor/packed_simd_2/src/api/ptr/gather_scatter.rs
@@ -22,7 +22,8 @@ macro_rules! impl_ptr_read {
/// pointers must be aligned to `mem::align_of::<T>()`.
#[inline]
pub unsafe fn read<M>(
- self, mask: Simd<[M; $elem_count]>,
+ self,
+ mask: Simd<[M; $elem_count]>,
value: Simd<[T; $elem_count]>,
) -> Simd<[T; $elem_count]>
where
@@ -128,10 +129,8 @@ macro_rules! impl_ptr_write {
/// This method is unsafe because it dereferences raw pointers. The
/// pointers must be aligned to `mem::align_of::<T>()`.
#[inline]
- pub unsafe fn write<M>(
- self, mask: Simd<[M; $elem_count]>,
- value: Simd<[T; $elem_count]>,
- ) where
+ pub unsafe fn write<M>(self, mask: Simd<[M; $elem_count]>, value: Simd<[T; $elem_count]>)
+ where
M: sealed::Mask,
[M; $elem_count]: sealed::SimdArray,
{
@@ -147,8 +146,8 @@ macro_rules! impl_ptr_write {
use super::*;
#[test]
fn write() {
- // fourty_two = [42, 42, 42, ...]
- let fourty_two
+ // forty_two = [42, 42, 42, ...]
+ let forty_two
= Simd::<[i32; $elem_count]>::splat(42_i32);
// This test will write to this array
@@ -166,11 +165,11 @@ macro_rules! impl_ptr_write {
}
// ptr = [&arr[0], &arr[1], ...]
- // write `fourty_two` to all elements of `v`
+ // write `forty_two` to all elements of `v`
{
let backup = arr;
unsafe {
- ptr.write($mask_ty::splat(true), fourty_two)
+ ptr.write($mask_ty::splat(true), forty_two)
};
assert_eq!(arr, [42_i32; $elem_count]);
arr = backup; // arr = [0, 1, 2, ...]
@@ -196,7 +195,7 @@ macro_rules! impl_ptr_write {
}
let backup = arr;
- unsafe { ptr.write(mask, fourty_two) };
+ unsafe { ptr.write(mask, forty_two) };
assert_eq!(arr, r);
arr = backup; // arr = [0, 1, 2, 3, ...]
}
@@ -205,7 +204,7 @@ macro_rules! impl_ptr_write {
{
let backup = arr;
unsafe {
- ptr.write($mask_ty::splat(false), fourty_two)
+ ptr.write($mask_ty::splat(false), forty_two)
};
assert_eq!(arr, backup);
}
diff --git a/vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs b/vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs
index 4a47452e5..9dc8783db 100644
--- a/vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs
+++ b/vendor/packed_simd_2/src/api/reductions/float_arithmetic.rs
@@ -144,8 +144,6 @@ macro_rules! impl_reduction_float_arithmetic {
#[cfg_attr(not(target_arch = "wasm32"), test)]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
#[allow(unreachable_code)]
- #[allow(unused_mut)]
- // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
fn sum_nan() {
// FIXME: https://bugs.llvm.org/show_bug.cgi?id=36732
// https://github.com/rust-lang-nursery/packed_simd/issues/6
@@ -175,8 +173,6 @@ macro_rules! impl_reduction_float_arithmetic {
#[cfg_attr(not(target_arch = "wasm32"), test)]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
#[allow(unreachable_code)]
- #[allow(unused_mut)]
- // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
fn product_nan() {
// FIXME: https://bugs.llvm.org/show_bug.cgi?id=36732
// https://github.com/rust-lang-nursery/packed_simd/issues/6
@@ -247,7 +243,7 @@ macro_rules! impl_reduction_float_arithmetic {
tree_bits - red_bits
} < 2,
"vector: {:?} | simd_reduction: {:?} | \
- tree_reduction: {} | scalar_reduction: {}",
+tree_reduction: {} | scalar_reduction: {}",
v,
simd_reduction,
tree_reduction,
@@ -303,7 +299,7 @@ macro_rules! impl_reduction_float_arithmetic {
tree_bits - red_bits
} < ulp_limit.try_into().unwrap(),
"vector: {:?} | simd_reduction: {:?} | \
- tree_reduction: {} | scalar_reduction: {}",
+tree_reduction: {} | scalar_reduction: {}",
v,
simd_reduction,
tree_reduction,
diff --git a/vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs b/vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs
index 91dffad31..e99e6cb5d 100644
--- a/vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs
+++ b/vendor/packed_simd_2/src/api/reductions/integer_arithmetic.rs
@@ -18,9 +18,7 @@ macro_rules! impl_reduction_integer_arithmetic {
#[cfg(not(target_arch = "aarch64"))]
{
use crate::llvm::simd_reduce_add_ordered;
- let v: $ielem_ty = unsafe {
- simd_reduce_add_ordered(self.0, 0 as $ielem_ty)
- };
+ let v: $ielem_ty = unsafe { simd_reduce_add_ordered(self.0, 0 as $ielem_ty) };
v as $elem_ty
}
#[cfg(target_arch = "aarch64")]
@@ -49,9 +47,7 @@ macro_rules! impl_reduction_integer_arithmetic {
#[cfg(not(target_arch = "aarch64"))]
{
use crate::llvm::simd_reduce_mul_ordered;
- let v: $ielem_ty = unsafe {
- simd_reduce_mul_ordered(self.0, 1 as $ielem_ty)
- };
+ let v: $ielem_ty = unsafe { simd_reduce_mul_ordered(self.0, 1 as $ielem_ty) };
v as $elem_ty
}
#[cfg(target_arch = "aarch64")]
diff --git a/vendor/packed_simd_2/src/api/reductions/min_max.rs b/vendor/packed_simd_2/src/api/reductions/min_max.rs
index c4c1400a8..a3ce13a45 100644
--- a/vendor/packed_simd_2/src/api/reductions/min_max.rs
+++ b/vendor/packed_simd_2/src/api/reductions/min_max.rs
@@ -123,7 +123,7 @@ macro_rules! impl_reduction_min_max {
macro_rules! test_reduction_float_min_max {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
- test_if!{
+ test_if! {
$test_tt:
paste::item! {
// Comparisons use integer casts within mantissa^1 range.
@@ -160,20 +160,7 @@ macro_rules! test_reduction_float_min_max {
// targets:
if i == $id::lanes() - 1 &&
target_with_broken_last_lane_nan {
- // FIXME:
- // https://github.com/rust-lang-nursery/packed_simd/issues/5
- //
- // If there is a NaN, the result should always
- // the smallest element, but currently when the
- // last element is NaN the current
- // implementation incorrectly returns NaN.
- //
- // The targets mentioned above use different
- // codegen that produces the correct result.
- //
- // These asserts detect if this behavior changes
- assert!(v.min_element().is_nan(),
- // FIXME: ^^^ should be -3.
+ assert_eq!(v.min_element(), -3.,
"[A]: nan at {} => {} | {:?}",
i, v.min_element(), v);
@@ -181,14 +168,17 @@ macro_rules! test_reduction_float_min_max {
// up-to the `i-th` lane with `NaN`s, the result
// is still always `-3.` unless all elements of
// the vector are `NaN`s:
- //
- // This is also broken:
for j in 0..i {
v = v.replace(j, n);
- assert!(v.min_element().is_nan(),
- // FIXME: ^^^ should be -3.
+ if j == i-1 {
+ assert!(v.min_element().is_nan(),
+ "[B]: nan at {} => {} | {:?}",
+ i, v.min_element(), v);
+ } else {
+ assert_eq!(v.min_element(), -3.,
"[B]: nan at {} => {} | {:?}",
i, v.min_element(), v);
+ }
}
// We are done here, since we were in the last
@@ -203,7 +193,7 @@ macro_rules! test_reduction_float_min_max {
if $id::lanes() == 1 {
assert!(v.min_element().is_nan(),
"[C]: all nans | v={:?} | min={} | \
- is_nan: {}",
+is_nan: {}",
v, v.min_element(),
v.min_element().is_nan()
);
@@ -235,7 +225,7 @@ macro_rules! test_reduction_float_min_max {
// "i - 1" does not overflow.
assert!(v.min_element().is_nan(),
"[E]: all nans | v={:?} | min={} | \
- is_nan: {}",
+is_nan: {}",
v, v.min_element(),
v.min_element().is_nan());
} else {
@@ -280,21 +270,7 @@ macro_rules! test_reduction_float_min_max {
// targets:
if i == $id::lanes() - 1 &&
target_with_broken_last_lane_nan {
- // FIXME:
- // https://github.com/rust-lang-nursery/packed_simd/issues/5
- //
- // If there is a NaN, the result should
- // always the largest element, but currently
- // when the last element is NaN the current
- // implementation incorrectly returns NaN.
- //
- // The targets mentioned above use different
- // codegen that produces the correct result.
- //
- // These asserts detect if this behavior
- // changes
- assert!(v.max_element().is_nan(),
- // FIXME: ^^^ should be -3.
+ assert_eq!(v.max_element(), -3.,
"[A]: nan at {} => {} | {:?}",
i, v.max_element(), v);
@@ -302,14 +278,17 @@ macro_rules! test_reduction_float_min_max {
// up-to the `i-th` lane with `NaN`s, the result
// is still always `-3.` unless all elements of
// the vector are `NaN`s:
- //
- // This is also broken:
for j in 0..i {
v = v.replace(j, n);
- assert!(v.max_element().is_nan(),
- // FIXME: ^^^ should be -3.
+ if j == i-1 {
+ assert!(v.min_element().is_nan(),
+ "[B]: nan at {} => {} | {:?}",
+ i, v.min_element(), v);
+ } else {
+ assert_eq!(v.max_element(), -3.,
"[B]: nan at {} => {} | {:?}",
i, v.max_element(), v);
+ }
}
// We are done here, since we were in the last
@@ -324,7 +303,7 @@ macro_rules! test_reduction_float_min_max {
if $id::lanes() == 1 {
assert!(v.max_element().is_nan(),
"[C]: all nans | v={:?} | min={} | \
- is_nan: {}",
+is_nan: {}",
v, v.max_element(),
v.max_element().is_nan());
@@ -355,7 +334,7 @@ macro_rules! test_reduction_float_min_max {
// "i - 1" does not overflow.
assert!(v.max_element().is_nan(),
"[E]: all nans | v={:?} | max={} | \
- is_nan: {}",
+is_nan: {}",
v, v.max_element(),
v.max_element().is_nan());
} else {
@@ -377,5 +356,5 @@ macro_rules! test_reduction_float_min_max {
}
}
}
- }
+ };
}
diff --git a/vendor/packed_simd_2/src/api/select.rs b/vendor/packed_simd_2/src/api/select.rs
index 24525df56..daf629472 100644
--- a/vendor/packed_simd_2/src/api/select.rs
+++ b/vendor/packed_simd_2/src/api/select.rs
@@ -12,9 +12,7 @@ macro_rules! impl_select {
#[inline]
pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T>
where
- T: sealed::SimdArray<
- NT = <[$elem_ty; $elem_count] as sealed::SimdArray>::NT,
- >,
+ T: sealed::SimdArray<NT = <[$elem_ty; $elem_count] as sealed::SimdArray>::NT>,
{
use crate::llvm::simd_select;
Simd(unsafe { simd_select(self.0, a.0, b.0) })
diff --git a/vendor/packed_simd_2/src/api/shuffle.rs b/vendor/packed_simd_2/src/api/shuffle.rs
index 13a7fae5f..fda29ccdd 100644
--- a/vendor/packed_simd_2/src/api/shuffle.rs
+++ b/vendor/packed_simd_2/src/api/shuffle.rs
@@ -27,9 +27,7 @@
/// Shuffling elements of two vectors:
///
/// ```
-/// # #[macro_use]
-/// # extern crate packed_simd;
-/// # use packed_simd::*;
+/// # use packed_simd_2::*;
/// # fn main() {
/// // Shuffle allows reordering the elements:
/// let x = i32x4::new(1, 2, 3, 4);
@@ -51,9 +49,7 @@
/// Shuffling elements of one vector:
///
/// ```
-/// # #[macro_use]
-/// # extern crate packed_simd;
-/// # use packed_simd::*;
+/// # use packed_simd_2::*;
/// # fn main() {
/// // Shuffle allows reordering the elements of a vector:
/// let x = i32x4::new(1, 2, 3, 4);
@@ -79,20 +75,18 @@ macro_rules! shuffle {
($vec0:expr, $vec1:expr, [$l0:expr, $l1:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector2(
+ $crate::Simd($crate::__shuffle_vector2::<{[$l0, $l1]}, _, _>(
$vec0.0,
$vec1.0,
- [$l0, $l1],
))
}
}};
($vec0:expr, $vec1:expr, [$l0:expr, $l1:expr, $l2:expr, $l3:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector4(
+ $crate::Simd($crate::__shuffle_vector4::<{[$l0, $l1, $l2, $l3]}, _, _>(
$vec0.0,
$vec1.0,
- [$l0, $l1, $l2, $l3],
))
}
}};
@@ -101,10 +95,9 @@ macro_rules! shuffle {
$l4:expr, $l5:expr, $l6:expr, $l7:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector8(
+ $crate::Simd($crate::__shuffle_vector8::<{[$l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7]}, _, _>(
$vec0.0,
$vec1.0,
- [$l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7],
))
}
}};
@@ -115,13 +108,14 @@ macro_rules! shuffle {
$l12:expr, $l13:expr, $l14:expr, $l15:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector16(
- $vec0.0,
- $vec1.0,
+ $crate::Simd($crate::__shuffle_vector16::<{
[
$l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
$l11, $l12, $l13, $l14, $l15,
- ],
+ ]
+ }, _, _>(
+ $vec0.0,
+ $vec1.0,
))
}
}};
@@ -136,15 +130,16 @@ macro_rules! shuffle {
$l28:expr, $l29:expr, $l30:expr, $l31:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector32(
- $vec0.0,
- $vec1.0,
+ $crate::Simd($crate::__shuffle_vector32::<{
[
$l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
$l11, $l12, $l13, $l14, $l15, $l16, $l17, $l18, $l19,
$l20, $l21, $l22, $l23, $l24, $l25, $l26, $l27, $l28,
$l29, $l30, $l31,
- ],
+ ]
+ }, _, _>(
+ $vec0.0,
+ $vec1.0,
))
}
}};
@@ -167,18 +162,17 @@ macro_rules! shuffle {
$l60:expr, $l61:expr, $l62:expr, $l63:expr]) => {{
#[allow(unused_unsafe)]
unsafe {
- $crate::Simd($crate::__shuffle_vector64(
+ $crate::Simd($crate::__shuffle_vector64::<{[
+ $l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
+ $l11, $l12, $l13, $l14, $l15, $l16, $l17, $l18, $l19,
+ $l20, $l21, $l22, $l23, $l24, $l25, $l26, $l27, $l28,
+ $l29, $l30, $l31, $l32, $l33, $l34, $l35, $l36, $l37,
+ $l38, $l39, $l40, $l41, $l42, $l43, $l44, $l45, $l46,
+ $l47, $l48, $l49, $l50, $l51, $l52, $l53, $l54, $l55,
+ $l56, $l57, $l58, $l59, $l60, $l61, $l62, $l63,
+ ]}, _, _>(
$vec0.0,
$vec1.0,
- [
- $l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
- $l11, $l12, $l13, $l14, $l15, $l16, $l17, $l18, $l19,
- $l20, $l21, $l22, $l23, $l24, $l25, $l26, $l27, $l28,
- $l29, $l30, $l31, $l32, $l33, $l34, $l35, $l36, $l37,
- $l38, $l39, $l40, $l41, $l42, $l43, $l44, $l45, $l46,
- $l47, $l48, $l49, $l50, $l51, $l52, $l53, $l54, $l55,
- $l56, $l57, $l58, $l59, $l60, $l61, $l62, $l63,
- ],
))
}
}};
diff --git a/vendor/packed_simd_2/src/api/slice/from_slice.rs b/vendor/packed_simd_2/src/api/slice/from_slice.rs
index 25082d1e6..50f3914f7 100644
--- a/vendor/packed_simd_2/src/api/slice/from_slice.rs
+++ b/vendor/packed_simd_2/src/api/slice/from_slice.rs
@@ -14,11 +14,7 @@ macro_rules! impl_slice_from_slice {
unsafe {
assert!(slice.len() >= $elem_count);
let target_ptr = slice.get_unchecked(0) as *const $elem_ty;
- assert_eq!(
- target_ptr
- .align_offset(crate::mem::align_of::<Self>()),
- 0
- );
+ assert_eq!(target_ptr.align_offset(crate::mem::align_of::<Self>()), 0);
Self::from_slice_aligned_unchecked(slice)
}
}
@@ -43,15 +39,10 @@ macro_rules! impl_slice_from_slice {
/// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned
/// to an `align_of::<Self>()` boundary, the behavior is undefined.
#[inline]
- pub unsafe fn from_slice_aligned_unchecked(
- slice: &[$elem_ty],
- ) -> Self {
+ pub unsafe fn from_slice_aligned_unchecked(slice: &[$elem_ty]) -> Self {
debug_assert!(slice.len() >= $elem_count);
let target_ptr = slice.get_unchecked(0) as *const $elem_ty;
- debug_assert_eq!(
- target_ptr.align_offset(crate::mem::align_of::<Self>()),
- 0
- );
+ debug_assert_eq!(target_ptr.align_offset(crate::mem::align_of::<Self>()), 0);
#[allow(clippy::cast_ptr_alignment)]
*(target_ptr as *const Self)
@@ -63,20 +54,13 @@ macro_rules! impl_slice_from_slice {
///
/// If `slice.len() < Self::lanes()` the behavior is undefined.
#[inline]
- pub unsafe fn from_slice_unaligned_unchecked(
- slice: &[$elem_ty],
- ) -> Self {
+ pub unsafe fn from_slice_unaligned_unchecked(slice: &[$elem_ty]) -> Self {
use crate::mem::size_of;
debug_assert!(slice.len() >= $elem_count);
- let target_ptr =
- slice.get_unchecked(0) as *const $elem_ty as *const u8;
+ let target_ptr = slice.get_unchecked(0) as *const $elem_ty as *const u8;
let mut x = Self::splat(0 as $elem_ty);
let self_ptr = &mut x as *mut Self as *mut u8;
- crate::ptr::copy_nonoverlapping(
- target_ptr,
- self_ptr,
- size_of::<Self>(),
- );
+ crate::ptr::copy_nonoverlapping(target_ptr, self_ptr, size_of::<Self>());
x
}
}
diff --git a/vendor/packed_simd_2/src/api/slice/write_to_slice.rs b/vendor/packed_simd_2/src/api/slice/write_to_slice.rs
index b634d98b9..dd04a2634 100644
--- a/vendor/packed_simd_2/src/api/slice/write_to_slice.rs
+++ b/vendor/packed_simd_2/src/api/slice/write_to_slice.rs
@@ -13,13 +13,8 @@ macro_rules! impl_slice_write_to_slice {
pub fn write_to_slice_aligned(self, slice: &mut [$elem_ty]) {
unsafe {
assert!(slice.len() >= $elem_count);
- let target_ptr =
- slice.get_unchecked_mut(0) as *mut $elem_ty;
- assert_eq!(
- target_ptr
- .align_offset(crate::mem::align_of::<Self>()),
- 0
- );
+ let target_ptr = slice.get_unchecked_mut(0) as *mut $elem_ty;
+ assert_eq!(target_ptr.align_offset(crate::mem::align_of::<Self>()), 0);
self.write_to_slice_aligned_unchecked(slice);
}
}
@@ -45,18 +40,13 @@ macro_rules! impl_slice_write_to_slice {
/// aligned to an `align_of::<Self>()` boundary, the behavior is
/// undefined.
#[inline]
- pub unsafe fn write_to_slice_aligned_unchecked(
- self, slice: &mut [$elem_ty],
- ) {
+ pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [$elem_ty]) {
debug_assert!(slice.len() >= $elem_count);
let target_ptr = slice.get_unchecked_mut(0) as *mut $elem_ty;
- debug_assert_eq!(
- target_ptr.align_offset(crate::mem::align_of::<Self>()),
- 0
- );
+ debug_assert_eq!(target_ptr.align_offset(crate::mem::align_of::<Self>()), 0);
- #[allow(clippy::cast_ptr_alignment)]
- #[allow(clippy::cast_ptr_alignment)]
+ #[allow(clippy::cast_ptr_alignment)]
+ #[allow(clippy::cast_ptr_alignment)]
#[allow(clippy::cast_ptr_alignment)]
#[allow(clippy::cast_ptr_alignment)]
*(target_ptr as *mut Self) = self;
@@ -68,18 +58,11 @@ macro_rules! impl_slice_write_to_slice {
///
/// If `slice.len() < Self::lanes()` the behavior is undefined.
#[inline]
- pub unsafe fn write_to_slice_unaligned_unchecked(
- self, slice: &mut [$elem_ty],
- ) {
+ pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [$elem_ty]) {
debug_assert!(slice.len() >= $elem_count);
- let target_ptr =
- slice.get_unchecked_mut(0) as *mut $elem_ty as *mut u8;
+ let target_ptr = slice.get_unchecked_mut(0) as *mut $elem_ty as *mut u8;
let self_ptr = &self as *const Self as *const u8;
- crate::ptr::copy_nonoverlapping(
- self_ptr,
- target_ptr,
- crate::mem::size_of::<Self>(),
- );
+ crate::ptr::copy_nonoverlapping(self_ptr, target_ptr, crate::mem::size_of::<Self>());
}
}