summaryrefslogtreecommitdiffstats
path: root/vendor/packed_simd/src/api/into_bits
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/packed_simd/src/api/into_bits')
-rw-r--r--vendor/packed_simd/src/api/into_bits/arch_specific.rs345
-rw-r--r--vendor/packed_simd/src/api/into_bits/macros.rs74
-rw-r--r--vendor/packed_simd/src/api/into_bits/v128.rs232
-rw-r--r--vendor/packed_simd/src/api/into_bits/v16.rs9
-rw-r--r--vendor/packed_simd/src/api/into_bits/v256.rs232
-rw-r--r--vendor/packed_simd/src/api/into_bits/v32.rs13
-rw-r--r--vendor/packed_simd/src/api/into_bits/v512.rs232
-rw-r--r--vendor/packed_simd/src/api/into_bits/v64.rs18
8 files changed, 1155 insertions, 0 deletions
diff --git a/vendor/packed_simd/src/api/into_bits/arch_specific.rs b/vendor/packed_simd/src/api/into_bits/arch_specific.rs
new file mode 100644
index 000000000..bfac91557
--- /dev/null
+++ b/vendor/packed_simd/src/api/into_bits/arch_specific.rs
@@ -0,0 +1,345 @@
+//! `FromBits` and `IntoBits` between portable vector types and the
+//! architecture-specific vector types.
+#[rustfmt::skip]
+
+// FIXME: MIPS FromBits/IntoBits
+
+#[allow(unused)]
+use crate::*;
+
+/// This macro implements FromBits for the portable and the architecture
+/// specific vector types.
+///
+/// The "leaf" case is at the bottom, and the most generic case is at the top.
+/// The generic case is split into smaller cases recursively.
+macro_rules! impl_arch {
+ ([$arch_head_i:ident[$arch_head_tt:tt]: $($arch_head_ty:ident),*],
+ $([$arch_tail_i:ident[$arch_tail_tt:tt]: $($arch_tail_ty:ident),*]),* |
+ from: $($from_ty:ident),* | into: $($into_ty:ident),* |
+ test: $test_tt:tt) => {
+ impl_arch!(
+ [$arch_head_i[$arch_head_tt]: $($arch_head_ty),*] |
+ from: $($from_ty),* |
+ into: $($into_ty),* |
+ test: $test_tt
+ );
+ impl_arch!(
+ $([$arch_tail_i[$arch_tail_tt]: $($arch_tail_ty),*]),* |
+ from: $($from_ty),* |
+ into: $($into_ty),* |
+ test: $test_tt
+ );
+ };
+ ([$arch:ident[$arch_tt:tt]: $($arch_ty:ident),*] |
+ from: $($from_ty:ident),* | into: $($into_ty:ident),* |
+ test: $test_tt:tt) => {
+ // note: if target is "arm", "+v7,+neon" must be enabled
+ // and the std library must be recompiled with them
+ #[cfg(any(
+ not(target_arch = "arm"),
+ all(target_feature = "v7", target_feature = "neon",
+ any(feature = "core_arch", libcore_neon)))
+ )]
+ // note: if target is "powerpc", "altivec" must be enabled
+ // and the std library must be recompiled with it
+ #[cfg(any(
+ not(target_arch = "powerpc"),
+ all(target_feature = "altivec", feature = "core_arch"),
+ ))]
+ #[cfg(target_arch = $arch_tt)]
+ use crate::arch::$arch::{
+ $($arch_ty),*
+ };
+
+ #[cfg(any(
+ not(target_arch = "arm"),
+ all(target_feature = "v7", target_feature = "neon",
+ any(feature = "core_arch", libcore_neon)))
+ )]
+ #[cfg(any(
+ not(target_arch = "powerpc"),
+ all(target_feature = "altivec", feature = "core_arch"),
+ ))]
+ #[cfg(target_arch = $arch_tt)]
+ impl_arch!($($arch_ty),* | $($from_ty),* | $($into_ty),* |
+ test: $test_tt);
+ };
+ ($arch_head:ident, $($arch_tail:ident),* | $($from_ty:ident),*
+ | $($into_ty:ident),* | test: $test_tt:tt) => {
+ impl_arch!($arch_head | $($from_ty),* | $($into_ty),* |
+ test: $test_tt);
+ impl_arch!($($arch_tail),* | $($from_ty),* | $($into_ty),* |
+ test: $test_tt);
+ };
+ ($arch_head:ident | $($from_ty:ident),* | $($into_ty:ident),* |
+ test: $test_tt:tt) => {
+ impl_from_bits!($arch_head[$test_tt]: $($from_ty),*);
+ impl_into_bits!($arch_head[$test_tt]: $($into_ty),*);
+ };
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementations for the 64-bit wide vector types:
+
+// FIXME: 64-bit single element types
+// FIXME: arm/aarch float16x4_t missing
+impl_arch!(
+ [
+ arm["arm"]: int8x8_t,
+ uint8x8_t,
+ poly8x8_t,
+ int16x4_t,
+ uint16x4_t,
+ poly16x4_t,
+ int32x2_t,
+ uint32x2_t,
+ float32x2_t,
+ int64x1_t,
+ uint64x1_t
+ ],
+ [
+ aarch64["aarch64"]: int8x8_t,
+ uint8x8_t,
+ poly8x8_t,
+ int16x4_t,
+ uint16x4_t,
+ poly16x4_t,
+ int32x2_t,
+ uint32x2_t,
+ float32x2_t,
+ int64x1_t,
+ uint64x1_t,
+ float64x1_t
+ ] | from: i8x8,
+ u8x8,
+ m8x8,
+ i16x4,
+ u16x4,
+ m16x4,
+ i32x2,
+ u32x2,
+ f32x2,
+ m32x2 | into: i8x8,
+ u8x8,
+ i16x4,
+ u16x4,
+ i32x2,
+ u32x2,
+ f32x2 | test: test_v64
+);
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementations for the 128-bit wide vector types:
+
+// FIXME: arm/aarch float16x8_t missing
+// FIXME: ppc vector_pixel missing
+// FIXME: ppc64 vector_Float16 missing
+// FIXME: ppc64 vector_signed_long_long missing
+// FIXME: ppc64 vector_unsigned_long_long missing
+// FIXME: ppc64 vector_bool_long_long missing
+// FIXME: ppc64 vector_signed___int128 missing
+// FIXME: ppc64 vector_unsigned___int128 missing
+impl_arch!(
+ [x86["x86"]: __m128, __m128i, __m128d],
+ [x86_64["x86_64"]: __m128, __m128i, __m128d],
+ [
+ arm["arm"]: int8x16_t,
+ uint8x16_t,
+ poly8x16_t,
+ int16x8_t,
+ uint16x8_t,
+ poly16x8_t,
+ int32x4_t,
+ uint32x4_t,
+ float32x4_t,
+ int64x2_t,
+ uint64x2_t
+ ],
+ [
+ aarch64["aarch64"]: int8x16_t,
+ uint8x16_t,
+ poly8x16_t,
+ int16x8_t,
+ uint16x8_t,
+ poly16x8_t,
+ int32x4_t,
+ uint32x4_t,
+ float32x4_t,
+ int64x2_t,
+ uint64x2_t,
+ float64x2_t
+ ],
+ [
+ powerpc["powerpc"]: vector_signed_char,
+ vector_unsigned_char,
+ vector_signed_short,
+ vector_unsigned_short,
+ vector_signed_int,
+ vector_unsigned_int,
+ vector_float
+ ],
+ [
+ powerpc64["powerpc64"]: vector_signed_char,
+ vector_unsigned_char,
+ vector_signed_short,
+ vector_unsigned_short,
+ vector_signed_int,
+ vector_unsigned_int,
+ vector_float,
+ vector_signed_long,
+ vector_unsigned_long,
+ vector_double
+ ] | from: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1 | test: test_v128
+);
+
+impl_arch!(
+ [powerpc["powerpc"]: vector_bool_char],
+ [powerpc64["powerpc64"]: vector_bool_char] | from: m8x16,
+ m16x8,
+ m32x4,
+ m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
+ // Masks:
+ m8x16 | test: test_v128
+);
+
+impl_arch!(
+ [powerpc["powerpc"]: vector_bool_short],
+ [powerpc64["powerpc64"]: vector_bool_short] | from: m16x8,
+ m32x4,
+ m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
+ // Masks:
+ m8x16,
+ m16x8 | test: test_v128
+);
+
+impl_arch!(
+ [powerpc["powerpc"]: vector_bool_int],
+ [powerpc64["powerpc64"]: vector_bool_int] | from: m32x4,
+ m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
+ // Masks:
+ m8x16,
+ m16x8,
+ m32x4 | test: test_v128
+);
+
+impl_arch!(
+ [powerpc64["powerpc64"]: vector_bool_long] | from: m64x2,
+ m128x1 | into: i8x16,
+ u8x16,
+ i16x8,
+ u16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ i128x1,
+ u128x1,
+ // Masks:
+ m8x16,
+ m16x8,
+ m32x4,
+ m64x2 | test: test_v128
+);
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementations for the 256-bit wide vector types
+
+impl_arch!(
+ [x86["x86"]: __m256, __m256i, __m256d],
+ [x86_64["x86_64"]: __m256, __m256i, __m256d] | from: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2 | into: i8x32,
+ u8x32,
+ i16x16,
+ u16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ i128x2,
+ u128x2 | test: test_v256
+);
+
+////////////////////////////////////////////////////////////////////////////////
+// FIXME: Implementations for the 512-bit wide vector types
diff --git a/vendor/packed_simd/src/api/into_bits/macros.rs b/vendor/packed_simd/src/api/into_bits/macros.rs
new file mode 100644
index 000000000..265ab34ae
--- /dev/null
+++ b/vendor/packed_simd/src/api/into_bits/macros.rs
@@ -0,0 +1,74 @@
+//! Macros implementing `FromBits`
+
+macro_rules! impl_from_bits_ {
+ ($id:ident[$test_tt:tt]: $from_ty:ident) => {
+ impl crate::api::into_bits::FromBits<$from_ty> for $id {
+ #[inline]
+ fn from_bits(x: $from_ty) -> Self {
+ unsafe { crate::mem::transmute(x) }
+ }
+ }
+
+ test_if! {
+ $test_tt:
+ paste::item! {
+ pub mod [<$id _from_bits_ $from_ty>] {
+ use super::*;
+ #[cfg_attr(not(target_arch = "wasm32"), test)]
+ #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+ fn test() {
+ use crate::{
+ ptr::{read_unaligned},
+ mem::{size_of, zeroed}
+ };
+ use crate::IntoBits;
+ assert_eq!(size_of::<$id>(),
+ size_of::<$from_ty>());
+ // This is safe because we never create a reference to
+ // uninitialized memory:
+ let a: $from_ty = unsafe { zeroed() };
+
+ let b_0: $id = crate::FromBits::from_bits(a);
+ let b_1: $id = a.into_bits();
+
+ // Check that these are byte-wise equal, that is,
+ // that the bit patterns are identical:
+ for i in 0..size_of::<$id>() {
+ // This is safe because we only read initialized
+ // memory in bounds. Also, taking a reference to
+ // `b_i` is ok because the fields are initialized.
+ unsafe {
+ let b_0_v: u8 = read_unaligned(
+ (&b_0 as *const $id as *const u8)
+ .wrapping_add(i)
+ );
+ let b_1_v: u8 = read_unaligned(
+ (&b_1 as *const $id as *const u8)
+ .wrapping_add(i)
+ );
+ assert_eq!(b_0_v, b_1_v);
+ }
+ }
+ }
+ }
+ }
+ }
+ };
+}
+
+macro_rules! impl_from_bits {
+ ($id:ident[$test_tt:tt]: $($from_ty:ident),*) => {
+ $(
+ impl_from_bits_!($id[$test_tt]: $from_ty);
+ )*
+ }
+}
+
+#[allow(unused)]
+macro_rules! impl_into_bits {
+ ($id:ident[$test_tt:tt]: $($from_ty:ident),*) => {
+ $(
+ impl_from_bits_!($from_ty[$test_tt]: $id);
+ )*
+ }
+}
diff --git a/vendor/packed_simd/src/api/into_bits/v128.rs b/vendor/packed_simd/src/api/into_bits/v128.rs
new file mode 100644
index 000000000..639c09c2c
--- /dev/null
+++ b/vendor/packed_simd/src/api/into_bits/v128.rs
@@ -0,0 +1,232 @@
+//! `FromBits` and `IntoBits` implementations for portable 128-bit wide vectors
+#[rustfmt::skip]
+
+#[allow(unused)] // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(
+ i8x16[test_v128]: u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u8x16[test_v128]: i8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(m8x16[test_v128]: m16x8, m32x4, m64x2, m128x1);
+
+impl_from_bits!(
+ i16x8[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u16x8[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(m16x8[test_v128]: m32x4, m64x2, m128x1);
+
+impl_from_bits!(
+ i32x4[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u32x4[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ f32x4[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(m32x4[test_v128]: m64x2, m128x1);
+
+impl_from_bits!(
+ i64x2[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u64x2[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ f64x2[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ m64x2,
+ i128x1,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(m64x2[test_v128]: m128x1);
+
+impl_from_bits!(
+ i128x1[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ u128x1,
+ m128x1
+);
+impl_from_bits!(
+ u128x1[test_v128]: i8x16,
+ u8x16,
+ m8x16,
+ i16x8,
+ u16x8,
+ m16x8,
+ i32x4,
+ u32x4,
+ f32x4,
+ m32x4,
+ i64x2,
+ u64x2,
+ f64x2,
+ m64x2,
+ i128x1,
+ m128x1
+);
+// note: m128x1 cannot be constructed from all the other masks bit patterns in
+// here
diff --git a/vendor/packed_simd/src/api/into_bits/v16.rs b/vendor/packed_simd/src/api/into_bits/v16.rs
new file mode 100644
index 000000000..e44d0e7f9
--- /dev/null
+++ b/vendor/packed_simd/src/api/into_bits/v16.rs
@@ -0,0 +1,9 @@
+//! `FromBits` and `IntoBits` implementations for portable 16-bit wide vectors
+#[rustfmt::skip]
+
+#[allow(unused)] // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(i8x2[test_v16]: u8x2, m8x2);
+impl_from_bits!(u8x2[test_v16]: i8x2, m8x2);
+// note: m8x2 cannot be constructed from all i8x2 or u8x2 bit patterns
diff --git a/vendor/packed_simd/src/api/into_bits/v256.rs b/vendor/packed_simd/src/api/into_bits/v256.rs
new file mode 100644
index 000000000..e432bbbc9
--- /dev/null
+++ b/vendor/packed_simd/src/api/into_bits/v256.rs
@@ -0,0 +1,232 @@
+//! `FromBits` and `IntoBits` implementations for portable 256-bit wide vectors
+#[rustfmt::skip]
+
+#[allow(unused)] // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(
+ i8x32[test_v256]: u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u8x32[test_v256]: i8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(m8x32[test_v256]: m16x16, m32x8, m64x4, m128x2);
+
+impl_from_bits!(
+ i16x16[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u16x16[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(m16x16[test_v256]: m32x8, m64x4, m128x2);
+
+impl_from_bits!(
+ i32x8[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u32x8[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ f32x8[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(m32x8[test_v256]: m64x4, m128x2);
+
+impl_from_bits!(
+ i64x4[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u64x4[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ f64x4[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ m64x4,
+ i128x2,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(m64x4[test_v256]: m128x2);
+
+impl_from_bits!(
+ i128x2[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ u128x2,
+ m128x2
+);
+impl_from_bits!(
+ u128x2[test_v256]: i8x32,
+ u8x32,
+ m8x32,
+ i16x16,
+ u16x16,
+ m16x16,
+ i32x8,
+ u32x8,
+ f32x8,
+ m32x8,
+ i64x4,
+ u64x4,
+ f64x4,
+ m64x4,
+ i128x2,
+ m128x2
+);
+// note: m128x2 cannot be constructed from all the other masks bit patterns in
+// here
diff --git a/vendor/packed_simd/src/api/into_bits/v32.rs b/vendor/packed_simd/src/api/into_bits/v32.rs
new file mode 100644
index 000000000..5dba38a17
--- /dev/null
+++ b/vendor/packed_simd/src/api/into_bits/v32.rs
@@ -0,0 +1,13 @@
+//! `FromBits` and `IntoBits` implementations for portable 32-bit wide vectors
+#[rustfmt::skip]
+
+#[allow(unused)] // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(i8x4[test_v32]: u8x4, m8x4, i16x2, u16x2, m16x2);
+impl_from_bits!(u8x4[test_v32]: i8x4, m8x4, i16x2, u16x2, m16x2);
+impl_from_bits!(m8x4[test_v32]: m16x2);
+
+impl_from_bits!(i16x2[test_v32]: i8x4, u8x4, m8x4, u16x2, m16x2);
+impl_from_bits!(u16x2[test_v32]: i8x4, u8x4, m8x4, i16x2, m16x2);
+// note: m16x2 cannot be constructed from all m8x4 bit patterns
diff --git a/vendor/packed_simd/src/api/into_bits/v512.rs b/vendor/packed_simd/src/api/into_bits/v512.rs
new file mode 100644
index 000000000..f6e9bb8bf
--- /dev/null
+++ b/vendor/packed_simd/src/api/into_bits/v512.rs
@@ -0,0 +1,232 @@
+//! `FromBits` and `IntoBits` implementations for portable 512-bit wide vectors
+#[rustfmt::skip]
+
+#[allow(unused)] // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(
+ i8x64[test_v512]: u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u8x64[test_v512]: i8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(m8x64[test_v512]: m16x32, m32x16, m64x8, m128x4);
+
+impl_from_bits!(
+ i16x32[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u16x32[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(m16x32[test_v512]: m32x16, m64x8, m128x4);
+
+impl_from_bits!(
+ i32x16[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u32x16[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ f32x16[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(m32x16[test_v512]: m64x8, m128x4);
+
+impl_from_bits!(
+ i64x8[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u64x8[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ f64x8[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ m64x8,
+ i128x4,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(m64x8[test_v512]: m128x4);
+
+impl_from_bits!(
+ i128x4[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ u128x4,
+ m128x4
+);
+impl_from_bits!(
+ u128x4[test_v512]: i8x64,
+ u8x64,
+ m8x64,
+ i16x32,
+ u16x32,
+ m16x32,
+ i32x16,
+ u32x16,
+ f32x16,
+ m32x16,
+ i64x8,
+ u64x8,
+ f64x8,
+ m64x8,
+ i128x4,
+ m128x4
+);
+// note: m128x4 cannot be constructed from all the other masks bit patterns in
+// here
diff --git a/vendor/packed_simd/src/api/into_bits/v64.rs b/vendor/packed_simd/src/api/into_bits/v64.rs
new file mode 100644
index 000000000..5b065f1bd
--- /dev/null
+++ b/vendor/packed_simd/src/api/into_bits/v64.rs
@@ -0,0 +1,18 @@
+//! `FromBits` and `IntoBits` implementations for portable 64-bit wide vectors
+#[rustfmt::skip]
+
+#[allow(unused)] // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(i8x8[test_v64]: u8x8, m8x8, i16x4, u16x4, m16x4, i32x2, u32x2, f32x2, m32x2);
+impl_from_bits!(u8x8[test_v64]: i8x8, m8x8, i16x4, u16x4, m16x4, i32x2, u32x2, f32x2, m32x2);
+impl_from_bits!(m8x8[test_v64]: m16x4, m32x2);
+
+impl_from_bits!(i16x4[test_v64]: i8x8, u8x8, m8x8, u16x4, m16x4, i32x2, u32x2, f32x2, m32x2);
+impl_from_bits!(u16x4[test_v64]: i8x8, u8x8, m8x8, i16x4, m16x4, i32x2, u32x2, f32x2, m32x2);
+impl_from_bits!(m16x4[test_v64]: m32x2);
+
+impl_from_bits!(i32x2[test_v64]: i8x8, u8x8, m8x8, i16x4, u16x4, m16x4, u32x2, f32x2, m32x2);
+impl_from_bits!(u32x2[test_v64]: i8x8, u8x8, m8x8, i16x4, u16x4, m16x4, i32x2, f32x2, m32x2);
+impl_from_bits!(f32x2[test_v64]: i8x8, u8x8, m8x8, i16x4, u16x4, m16x4, i32x2, u32x2, m32x2);
+// note: m32x2 cannot be constructed from all m16x4 or m8x8 bit patterns