summaryrefslogtreecommitdiffstats
path: root/library/stdarch/crates
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:42 +0000
commit837b550238aa671a591ccf282dddeab29cadb206 (patch)
tree914b6b8862bace72bd3245ca184d374b08d8a672 /library/stdarch/crates
parentAdding debian version 1.70.0+dfsg2-1. (diff)
downloadrustc-837b550238aa671a591ccf282dddeab29cadb206.tar.xz
rustc-837b550238aa671a591ccf282dddeab29cadb206.zip
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/stdarch/crates')
-rw-r--r--library/stdarch/crates/core_arch/avx512f.md2
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs42
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/crypto.rs98
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs928
-rw-r--r--library/stdarch/crates/core_arch/src/powerpc/altivec.rs2148
-rw-r--r--library/stdarch/crates/core_arch/src/powerpc/macros.rs88
-rw-r--r--library/stdarch/crates/core_arch/src/powerpc/mod.rs5
-rw-r--r--library/stdarch/crates/core_arch/src/powerpc/vsx.rs27
-rw-r--r--library/stdarch/crates/core_arch/src/wasm32/simd128.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx.rs14
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx2.rs1
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512bw.rs12
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512f.rs38
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse.rs6
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse2.rs10
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/avx512f.rs14
-rw-r--r--library/stdarch/crates/std_detect/README.md9
-rw-r--r--library/stdarch/crates/std_detect/src/detect/arch/arm.rs2
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs7
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs16
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs39
-rw-r--r--library/stdarch/crates/std_detect/tests/cpu-detection.rs1
-rw-r--r--library/stdarch/crates/stdarch-gen/neon.spec42
-rw-r--r--library/stdarch/crates/stdarch-test/src/disassembly.rs2
24 files changed, 2805 insertions, 750 deletions
diff --git a/library/stdarch/crates/core_arch/avx512f.md b/library/stdarch/crates/core_arch/avx512f.md
index 6cb6e6564..2435583bf 100644
--- a/library/stdarch/crates/core_arch/avx512f.md
+++ b/library/stdarch/crates/core_arch/avx512f.md
@@ -1519,7 +1519,7 @@
* [x] [`_mm512_mask_i32scatter_epi64`]
* [_] [`_mm_i32scatter_epi64`]//need i1
* [_] [`_mm_mask_i32scatter_epi64`] //need i1
- * [_] [`_mm256_i32scatter_epi64`] //need i1
+ * [x] [`_mm256_i32scatter_epi64`]
* [_] [`_mm256_mask_i32scatter_epi64`] //need i1
* [x] [`_mm512_i32scatter_ps`]
* [x] [`_mm512_mask_i32scatter_ps`]
diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
index cb5413fa3..da7fdf8b1 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
@@ -1349,7 +1349,7 @@ pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(cmge))]
+#[cfg_attr(test, assert_instr(cmgt))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
@@ -1361,7 +1361,7 @@ pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(cmge))]
+#[cfg_attr(test, assert_instr(cmgt))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
@@ -1373,7 +1373,7 @@ pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(cmge))]
+#[cfg_attr(test, assert_instr(cmgt))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
let b: i16x4 = i16x4::new(0, 0, 0, 0);
@@ -1385,7 +1385,7 @@ pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(cmge))]
+#[cfg_attr(test, assert_instr(cmgt))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
@@ -1397,7 +1397,7 @@ pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(cmge))]
+#[cfg_attr(test, assert_instr(cmgt))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
let b: i32x2 = i32x2::new(0, 0);
@@ -1409,7 +1409,7 @@ pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(cmge))]
+#[cfg_attr(test, assert_instr(cmgt))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
let b: i32x4 = i32x4::new(0, 0, 0, 0);
@@ -1421,7 +1421,7 @@ pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(cmge))]
+#[cfg_attr(test, assert_instr(cmgt))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
let b: i64x1 = i64x1::new(0);
@@ -1433,7 +1433,7 @@ pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(cmge))]
+#[cfg_attr(test, assert_instr(cmgt))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
let b: i64x2 = i64x2::new(0, 0);
@@ -1493,7 +1493,7 @@ pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(eor))]
+#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcgezd_s64(a: i64) -> u64 {
transmute(vcgez_s64(transmute(a)))
@@ -11595,7 +11595,7 @@ pub unsafe fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(sqdmull))]
+#[cfg_attr(test, assert_instr(sqdmlal))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
@@ -11607,11 +11607,11 @@ pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(sqdmull))]
+#[cfg_attr(test, assert_instr(sqdmlal))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
- let x: int64x2_t = vqdmull_s32(vdup_n_s32(b), vdup_n_s32(c));
- vqaddd_s64(a, simd_extract(x, 0))
+ let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
+ x as i64
}
/// Signed saturating doubling multiply-add long
@@ -11645,7 +11645,7 @@ pub unsafe fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t)
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(sqdmull, LANE = 0))]
+#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
@@ -11658,7 +11658,7 @@ pub unsafe fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(sqdmull, LANE = 0))]
+#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
@@ -11793,7 +11793,7 @@ pub unsafe fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(sqdmull))]
+#[cfg_attr(test, assert_instr(sqdmlsl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
@@ -11805,11 +11805,11 @@ pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(sqdmull))]
+#[cfg_attr(test, assert_instr(sqdmlsl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
- let x: int64x2_t = vqdmull_s32(vdup_n_s32(b), vdup_n_s32(c));
- vqsubd_s64(a, simd_extract(x, 0))
+ let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
+ x as i64
}
/// Signed saturating doubling multiply-subtract long
@@ -11843,7 +11843,7 @@ pub unsafe fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t)
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(sqdmull, LANE = 0))]
+#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
@@ -11856,7 +11856,7 @@ pub unsafe fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(sqdmull, LANE = 0))]
+#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs b/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs
index 060091136..193f109bf 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs
@@ -55,8 +55,8 @@ use stdarch_test::assert_instr;
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "aes")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(aese))]
pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
vaeseq_u8_(data, key)
@@ -66,8 +66,8 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "aes")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(aesd))]
pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
vaesdq_u8_(data, key)
@@ -77,8 +77,8 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "aes")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(aesmc))]
pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
vaesmcq_u8_(data)
@@ -88,8 +88,8 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "aes")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(aesimc))]
pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
vaesimcq_u8_(data)
@@ -99,8 +99,8 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha1h))]
pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
vsha1h_u32_(hash_e)
@@ -110,8 +110,8 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha1c))]
pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
vsha1cq_u32_(hash_abcd, hash_e, wk)
@@ -121,8 +121,8 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha1m))]
pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
vsha1mq_u32_(hash_abcd, hash_e, wk)
@@ -132,8 +132,8 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha1p))]
pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
vsha1pq_u32_(hash_abcd, hash_e, wk)
@@ -143,8 +143,8 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha1su0))]
pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t {
vsha1su0q_u32_(w0_3, w4_7, w8_11)
@@ -154,8 +154,8 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha1su1))]
pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t {
vsha1su1q_u32_(tw0_3, w12_15)
@@ -165,8 +165,8 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha256h))]
pub unsafe fn vsha256hq_u32(
hash_abcd: uint32x4_t,
@@ -180,8 +180,8 @@ pub unsafe fn vsha256hq_u32(
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha256h2))]
pub unsafe fn vsha256h2q_u32(
hash_efgh: uint32x4_t,
@@ -195,8 +195,8 @@ pub unsafe fn vsha256h2q_u32(
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha256su0))]
pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t {
vsha256su0q_u32_(w0_3, w4_7)
@@ -206,8 +206,8 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)
#[inline]
-#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
+#[target_feature(enable = "sha2")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(test, assert_instr(sha256su1))]
pub unsafe fn vsha256su1q_u32(
tw0_3: uint32x4_t,
@@ -224,8 +224,7 @@ mod tests {
use std::mem;
use stdarch_test::simd_test;
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))]
+ #[simd_test(enable = "aes")]
unsafe fn test_vaeseq_u8() {
let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7));
@@ -238,8 +237,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))]
+ #[simd_test(enable = "aes")]
unsafe fn test_vaesdq_u8() {
let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7));
@@ -250,8 +248,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))]
+ #[simd_test(enable = "aes")]
unsafe fn test_vaesmcq_u8() {
let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
let r: u8x16 = mem::transmute(vaesmcq_u8(data));
@@ -261,8 +258,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))]
+ #[simd_test(enable = "aes")]
unsafe fn test_vaesimcq_u8() {
let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
let r: u8x16 = mem::transmute(vaesimcq_u8(data));
@@ -272,15 +268,13 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha1h_u32() {
assert_eq!(vsha1h_u32(0x1234), 0x048d);
assert_eq!(vsha1h_u32(0x5678), 0x159e);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha1su0q_u32() {
let r: u32x4 = mem::transmute(vsha1su0q_u32(
mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
@@ -290,8 +284,7 @@ mod tests {
assert_eq!(r, u32x4::new(0x9abc, 0xdef0, 0x1234, 0x5678));
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha1su1q_u32() {
let r: u32x4 = mem::transmute(vsha1su1q_u32(
mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
@@ -303,8 +296,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha1cq_u32() {
let r: u32x4 = mem::transmute(vsha1cq_u32(
mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
@@ -317,8 +309,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha1pq_u32() {
let r: u32x4 = mem::transmute(vsha1pq_u32(
mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
@@ -331,8 +322,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha1mq_u32() {
let r: u32x4 = mem::transmute(vsha1mq_u32(
mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
@@ -345,8 +335,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha256hq_u32() {
let r: u32x4 = mem::transmute(vsha256hq_u32(
mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
@@ -359,8 +348,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha256h2q_u32() {
let r: u32x4 = mem::transmute(vsha256h2q_u32(
mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
@@ -373,8 +361,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha256su0q_u32() {
let r: u32x4 = mem::transmute(vsha256su0q_u32(
mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
@@ -386,8 +373,7 @@ mod tests {
);
}
- #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
- #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
+ #[simd_test(enable = "sha2")]
unsafe fn test_vsha256su1q_u32() {
let r: u32x4 = mem::transmute(vsha256su1q_u32(
mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs
index 923265966..8a8f4febf 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs
@@ -109,7 +109,7 @@ types! {
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t);
@@ -117,7 +117,7 @@ pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t);
@@ -125,7 +125,7 @@ pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t);
@@ -134,7 +134,7 @@ pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t);
@@ -142,7 +142,7 @@ pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t);
@@ -150,7 +150,7 @@ pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_t);
@@ -159,7 +159,7 @@ pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t);
@@ -167,7 +167,7 @@ pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t);
@@ -175,7 +175,7 @@ pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_t);
@@ -184,7 +184,7 @@ pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t);
@@ -192,7 +192,7 @@ pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t);
@@ -200,7 +200,7 @@ pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x16x4_t(
@@ -214,7 +214,7 @@ pub struct uint8x16x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t);
@@ -222,7 +222,7 @@ pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t);
@@ -230,7 +230,7 @@ pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_t);
@@ -239,7 +239,7 @@ pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t);
@@ -247,7 +247,7 @@ pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t);
@@ -255,7 +255,7 @@ pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x16x4_t(
@@ -269,7 +269,7 @@ pub struct poly8x16x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t);
@@ -277,7 +277,7 @@ pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t);
@@ -285,7 +285,7 @@ pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_t);
@@ -294,7 +294,7 @@ pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t);
@@ -302,7 +302,7 @@ pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t);
@@ -310,7 +310,7 @@ pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_t);
@@ -319,7 +319,7 @@ pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t);
@@ -327,7 +327,7 @@ pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x4x3_t(pub uint16x4_t, pub uint16x4_t, pub uint16x4_t);
@@ -335,7 +335,7 @@ pub struct uint16x4x3_t(pub uint16x4_t, pub uint16x4_t, pub uint16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x4x4_t(
@@ -349,7 +349,7 @@ pub struct uint16x4x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t);
@@ -357,7 +357,7 @@ pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x8x3_t(pub uint16x8_t, pub uint16x8_t, pub uint16x8_t);
@@ -365,7 +365,7 @@ pub struct uint16x8x3_t(pub uint16x8_t, pub uint16x8_t, pub uint16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x8x4_t(
@@ -379,7 +379,7 @@ pub struct uint16x8x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t);
@@ -387,7 +387,7 @@ pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x4x3_t(pub poly16x4_t, pub poly16x4_t, pub poly16x4_t);
@@ -395,7 +395,7 @@ pub struct poly16x4x3_t(pub poly16x4_t, pub poly16x4_t, pub poly16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x4x4_t(
@@ -409,7 +409,7 @@ pub struct poly16x4x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t);
@@ -417,7 +417,7 @@ pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x8x3_t(pub poly16x8_t, pub poly16x8_t, pub poly16x8_t);
@@ -425,7 +425,7 @@ pub struct poly16x8x3_t(pub poly16x8_t, pub poly16x8_t, pub poly16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x8x4_t(
@@ -439,7 +439,7 @@ pub struct poly16x8x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t);
@@ -447,7 +447,7 @@ pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t);
@@ -455,7 +455,7 @@ pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_t);
@@ -464,7 +464,7 @@ pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t);
@@ -472,7 +472,7 @@ pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t);
@@ -480,7 +480,7 @@ pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_t);
@@ -489,7 +489,7 @@ pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t);
@@ -497,7 +497,7 @@ pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x2x3_t(pub uint32x2_t, pub uint32x2_t, pub uint32x2_t);
@@ -505,7 +505,7 @@ pub struct uint32x2x3_t(pub uint32x2_t, pub uint32x2_t, pub uint32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x2x4_t(
@@ -519,7 +519,7 @@ pub struct uint32x2x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t);
@@ -527,7 +527,7 @@ pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x4x3_t(pub uint32x4_t, pub uint32x4_t, pub uint32x4_t);
@@ -535,7 +535,7 @@ pub struct uint32x4x3_t(pub uint32x4_t, pub uint32x4_t, pub uint32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x4x4_t(
@@ -549,7 +549,7 @@ pub struct uint32x4x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t);
@@ -557,7 +557,7 @@ pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x2x3_t(pub float32x2_t, pub float32x2_t, pub float32x2_t);
@@ -565,7 +565,7 @@ pub struct float32x2x3_t(pub float32x2_t, pub float32x2_t, pub float32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x2x4_t(
@@ -579,7 +579,7 @@ pub struct float32x2x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t);
@@ -587,7 +587,7 @@ pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x4x3_t(pub float32x4_t, pub float32x4_t, pub float32x4_t);
@@ -595,7 +595,7 @@ pub struct float32x4x3_t(pub float32x4_t, pub float32x4_t, pub float32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x4x4_t(
@@ -609,7 +609,7 @@ pub struct float32x4x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t);
@@ -617,7 +617,7 @@ pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t);
@@ -625,7 +625,7 @@ pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_t);
@@ -634,7 +634,7 @@ pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t);
@@ -642,7 +642,7 @@ pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t);
@@ -650,7 +650,7 @@ pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_t);
@@ -659,7 +659,7 @@ pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t);
@@ -667,7 +667,7 @@ pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x1x3_t(pub uint64x1_t, pub uint64x1_t, pub uint64x1_t);
@@ -675,7 +675,7 @@ pub struct uint64x1x3_t(pub uint64x1_t, pub uint64x1_t, pub uint64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x1x4_t(
@@ -689,7 +689,7 @@ pub struct uint64x1x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t);
@@ -697,7 +697,7 @@ pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x2x3_t(pub uint64x2_t, pub uint64x2_t, pub uint64x2_t);
@@ -705,7 +705,7 @@ pub struct uint64x2x3_t(pub uint64x2_t, pub uint64x2_t, pub uint64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x2x4_t(
@@ -719,7 +719,7 @@ pub struct uint64x2x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x1x2_t(pub poly64x1_t, pub poly64x1_t);
@@ -727,7 +727,7 @@ pub struct poly64x1x2_t(pub poly64x1_t, pub poly64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x1x3_t(pub poly64x1_t, pub poly64x1_t, pub poly64x1_t);
@@ -735,7 +735,7 @@ pub struct poly64x1x3_t(pub poly64x1_t, pub poly64x1_t, pub poly64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x1x4_t(
@@ -749,7 +749,7 @@ pub struct poly64x1x4_t(
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t);
@@ -757,7 +757,7 @@ pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t);
@@ -765,7 +765,7 @@ pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x2x4_t(
@@ -971,7 +971,7 @@ extern "unadjusted" {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x8_t) -> int8x8_t {
@@ -987,7 +987,7 @@ pub unsafe fn vld1_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x8_t) -> in
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x16_t) -> int8x16_t {
@@ -1003,7 +1003,7 @@ pub unsafe fn vld1q_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x16_t) ->
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x4_t) -> int16x4_t {
@@ -1019,7 +1019,7 @@ pub unsafe fn vld1_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x4_t) ->
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x8_t) -> int16x8_t {
@@ -1035,7 +1035,7 @@ pub unsafe fn vld1q_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x8_t) -
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x2_t) -> int32x2_t {
@@ -1051,7 +1051,7 @@ pub unsafe fn vld1_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x2_t) ->
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x4_t) -> int32x4_t {
@@ -1067,7 +1067,7 @@ pub unsafe fn vld1q_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x4_t) -
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) -> int64x1_t {
@@ -1083,7 +1083,7 @@ pub unsafe fn vld1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) ->
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -> int64x2_t {
@@ -1099,7 +1099,7 @@ pub unsafe fn vld1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x8_t) -> uint8x8_t {
@@ -1115,7 +1115,7 @@ pub unsafe fn vld1_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x8_t) -> u
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x16_t) -> uint8x16_t {
@@ -1131,7 +1131,7 @@ pub unsafe fn vld1q_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x16_t) ->
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x4_t) -> uint16x4_t {
@@ -1147,7 +1147,7 @@ pub unsafe fn vld1_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x4_t) -
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x8_t) -> uint16x8_t {
@@ -1163,7 +1163,7 @@ pub unsafe fn vld1q_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x8_t)
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x2_t) -> uint32x2_t {
@@ -1179,7 +1179,7 @@ pub unsafe fn vld1_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x2_t) -
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x4_t) -> uint32x4_t {
@@ -1195,7 +1195,7 @@ pub unsafe fn vld1q_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x4_t)
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -> uint64x1_t {
@@ -1211,7 +1211,7 @@ pub unsafe fn vld1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t) -> uint64x2_t {
@@ -1227,7 +1227,7 @@ pub unsafe fn vld1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t)
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x8_t) -> poly8x8_t {
@@ -1243,7 +1243,7 @@ pub unsafe fn vld1_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x8_t) -> p
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x16_t) -> poly8x16_t {
@@ -1259,7 +1259,7 @@ pub unsafe fn vld1q_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x16_t) ->
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x4_t) -> poly16x4_t {
@@ -1275,7 +1275,7 @@ pub unsafe fn vld1_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x4_t) -
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x8_t) -> poly16x8_t {
@@ -1293,7 +1293,7 @@ pub unsafe fn vld1q_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x8_t)
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -> poly64x1_t {
@@ -1311,7 +1311,7 @@ pub unsafe fn vld1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t) -> poly64x2_t {
@@ -1327,7 +1327,7 @@ pub unsafe fn vld1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t)
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x2_t) -> float32x2_t {
@@ -1343,7 +1343,7 @@ pub unsafe fn vld1_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x2_t)
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x4_t) -> float32x4_t {
@@ -1358,7 +1358,7 @@ pub unsafe fn vld1q_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x4_t)
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t {
@@ -1373,7 +1373,7 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t {
@@ -1388,7 +1388,7 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t {
@@ -1403,7 +1403,7 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t {
@@ -1418,7 +1418,7 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t {
@@ -1433,7 +1433,7 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t {
@@ -1448,7 +1448,7 @@ pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t {
@@ -1469,7 +1469,7 @@ pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t {
@@ -1484,7 +1484,7 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t {
@@ -1499,7 +1499,7 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t {
@@ -1514,7 +1514,7 @@ pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t {
@@ -1529,7 +1529,7 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t {
@@ -1544,7 +1544,7 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t {
@@ -1559,7 +1559,7 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t {
@@ -1574,7 +1574,7 @@ pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t {
@@ -1595,7 +1595,7 @@ pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t {
@@ -1610,7 +1610,7 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t {
@@ -1625,7 +1625,7 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t {
@@ -1640,7 +1640,7 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t {
@@ -1655,7 +1655,7 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t {
@@ -1670,7 +1670,7 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t {
@@ -1687,7 +1687,7 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t {
@@ -1710,7 +1710,7 @@ pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t {
@@ -1725,7 +1725,7 @@ pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t {
@@ -1740,7 +1740,7 @@ pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
@@ -1752,7 +1752,7 @@ pub unsafe fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
@@ -1764,7 +1764,7 @@ pub unsafe fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
@@ -1776,7 +1776,7 @@ pub unsafe fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
@@ -1788,7 +1788,7 @@ pub unsafe fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
@@ -1800,7 +1800,7 @@ pub unsafe fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
@@ -1813,7 +1813,7 @@ pub unsafe fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
@@ -1825,7 +1825,7 @@ pub unsafe fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
@@ -1837,7 +1837,7 @@ pub unsafe fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
@@ -1849,7 +1849,7 @@ pub unsafe fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
@@ -1861,7 +1861,7 @@ pub unsafe fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
@@ -1873,7 +1873,7 @@ pub unsafe fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
@@ -1887,7 +1887,7 @@ pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t {
@@ -1900,7 +1900,7 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t {
@@ -1913,7 +1913,7 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t {
@@ -1926,7 +1926,7 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t {
@@ -1939,7 +1939,7 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t {
@@ -1952,7 +1952,7 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t {
@@ -1966,7 +1966,7 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
@@ -1979,7 +1979,7 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
@@ -1992,7 +1992,7 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
@@ -2005,7 +2005,7 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
@@ -2018,7 +2018,7 @@ pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
@@ -2031,7 +2031,7 @@ pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
@@ -2045,7 +2045,7 @@ pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
@@ -2059,7 +2059,7 @@ pub unsafe fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
@@ -2073,7 +2073,7 @@ pub unsafe fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
@@ -2087,7 +2087,7 @@ pub unsafe fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
@@ -2101,7 +2101,7 @@ pub unsafe fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
@@ -2115,7 +2115,7 @@ pub unsafe fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
@@ -2129,7 +2129,7 @@ pub unsafe fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
@@ -2143,7 +2143,7 @@ pub unsafe fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
@@ -2157,7 +2157,7 @@ pub unsafe fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
@@ -2171,7 +2171,7 @@ pub unsafe fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
@@ -2185,7 +2185,7 @@ pub unsafe fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
@@ -2199,7 +2199,7 @@ pub unsafe fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
@@ -2213,7 +2213,7 @@ pub unsafe fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
@@ -2227,7 +2227,7 @@ pub unsafe fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
@@ -2241,7 +2241,7 @@ pub unsafe fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fadd))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
@@ -2255,7 +2255,7 @@ pub unsafe fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fadd))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
@@ -2269,7 +2269,7 @@ pub unsafe fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t {
@@ -2285,7 +2285,7 @@ pub unsafe fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t {
@@ -2301,7 +2301,7 @@ pub unsafe fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t {
@@ -2317,7 +2317,7 @@ pub unsafe fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t {
@@ -2333,7 +2333,7 @@ pub unsafe fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
@@ -2349,7 +2349,7 @@ pub unsafe fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
@@ -2365,7 +2365,7 @@ pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
@@ -2383,7 +2383,7 @@ pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
@@ -2401,7 +2401,7 @@ pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
@@ -2419,7 +2419,7 @@ pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
@@ -2437,7 +2437,7 @@ pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
@@ -2455,7 +2455,7 @@ pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
@@ -2473,7 +2473,7 @@ pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t {
@@ -2488,7 +2488,7 @@ pub unsafe fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t {
@@ -2503,7 +2503,7 @@ pub unsafe fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t {
@@ -2518,7 +2518,7 @@ pub unsafe fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
@@ -2533,7 +2533,7 @@ pub unsafe fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t {
@@ -2548,7 +2548,7 @@ pub unsafe fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t {
@@ -2563,7 +2563,7 @@ pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
@@ -2579,7 +2579,7 @@ pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
@@ -2595,7 +2595,7 @@ pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
@@ -2611,7 +2611,7 @@ pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
@@ -2627,7 +2627,7 @@ pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
@@ -2643,7 +2643,7 @@ pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
@@ -2659,7 +2659,7 @@ pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t {
@@ -2673,7 +2673,7 @@ pub unsafe fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t {
@@ -2687,7 +2687,7 @@ pub unsafe fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t {
@@ -2701,7 +2701,7 @@ pub unsafe fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
@@ -2715,7 +2715,7 @@ pub unsafe fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
@@ -2729,7 +2729,7 @@ pub unsafe fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
@@ -2743,7 +2743,7 @@ pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t {
@@ -2758,7 +2758,7 @@ pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x1
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t {
@@ -2773,7 +2773,7 @@ pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t {
@@ -2788,7 +2788,7 @@ pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t {
@@ -2803,7 +2803,7 @@ pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uin
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t {
@@ -2818,7 +2818,7 @@ pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> ui
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t {
@@ -2833,7 +2833,7 @@ pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> ui
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t {
@@ -2847,7 +2847,7 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t {
@@ -2861,7 +2861,7 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t {
@@ -2875,7 +2875,7 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
@@ -2889,7 +2889,7 @@ pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
@@ -2903,7 +2903,7 @@ pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
@@ -2917,7 +2917,7 @@ pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t {
@@ -2932,7 +2932,7 @@ pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t {
@@ -2947,7 +2947,7 @@ pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int1
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t {
@@ -2962,7 +2962,7 @@ pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int3
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t {
@@ -2977,7 +2977,7 @@ pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> ui
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t {
@@ -2992,7 +2992,7 @@ pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> u
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t {
@@ -3007,7 +3007,7 @@ pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> u
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t {
@@ -3021,7 +3021,7 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t {
@@ -3035,7 +3035,7 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t {
@@ -3049,7 +3049,7 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t {
@@ -3063,7 +3063,7 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t {
@@ -3077,7 +3077,7 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t {
@@ -3091,7 +3091,7 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t {
@@ -3105,7 +3105,7 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t {
@@ -3119,7 +3119,7 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t {
@@ -3133,7 +3133,7 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t {
@@ -3147,7 +3147,7 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t {
@@ -3161,7 +3161,7 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t {
@@ -3175,7 +3175,7 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t {
@@ -3189,7 +3189,7 @@ pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t {
@@ -3203,7 +3203,7 @@ pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t {
@@ -3217,7 +3217,7 @@ pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t {
@@ -3231,7 +3231,7 @@ pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t {
@@ -3245,7 +3245,7 @@ pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t {
@@ -3259,7 +3259,7 @@ pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t {
@@ -3273,7 +3273,7 @@ pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t {
@@ -3287,7 +3287,7 @@ pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t {
@@ -3301,7 +3301,7 @@ pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t {
@@ -3315,7 +3315,7 @@ pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t {
@@ -3329,7 +3329,7 @@ pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t {
@@ -3343,7 +3343,7 @@ pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t {
@@ -3358,7 +3358,7 @@ pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t {
@@ -3375,7 +3375,7 @@ pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t {
@@ -3390,7 +3390,7 @@ pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t {
@@ -3405,7 +3405,7 @@ pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t {
@@ -3420,7 +3420,7 @@ pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t {
@@ -3435,7 +3435,7 @@ pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t {
@@ -3450,7 +3450,7 @@ pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t {
@@ -3467,7 +3467,7 @@ pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t {
@@ -3482,7 +3482,7 @@ pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t {
@@ -3499,7 +3499,7 @@ pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t {
@@ -3514,7 +3514,7 @@ pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t {
@@ -3529,7 +3529,7 @@ pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t {
@@ -3544,7 +3544,7 @@ pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t {
@@ -3561,7 +3561,7 @@ pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
@@ -3576,7 +3576,7 @@ pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
@@ -3593,7 +3593,7 @@ pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
@@ -3608,7 +3608,7 @@ pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
@@ -3623,7 +3623,7 @@ pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
@@ -3638,7 +3638,7 @@ pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
@@ -3653,7 +3653,7 @@ pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
@@ -3668,7 +3668,7 @@ pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
@@ -3683,7 +3683,7 @@ pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
@@ -3698,7 +3698,7 @@ pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
@@ -3715,7 +3715,7 @@ pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
@@ -3730,7 +3730,7 @@ pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
@@ -3745,7 +3745,7 @@ pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
@@ -3760,7 +3760,7 @@ pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
@@ -3775,7 +3775,7 @@ pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
@@ -3790,7 +3790,7 @@ pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
@@ -3809,7 +3809,7 @@ pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
@@ -3827,7 +3827,7 @@ pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
@@ -3845,7 +3845,7 @@ pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
@@ -3863,7 +3863,7 @@ pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t {
@@ -3881,7 +3881,7 @@ pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
@@ -3899,7 +3899,7 @@ pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
@@ -3917,7 +3917,7 @@ pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
@@ -3935,7 +3935,7 @@ pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t {
@@ -3953,7 +3953,7 @@ pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
@@ -3971,7 +3971,7 @@ pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t {
@@ -3989,7 +3989,7 @@ pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t {
@@ -4007,7 +4007,7 @@ pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
@@ -4027,7 +4027,7 @@ pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
@@ -4045,7 +4045,7 @@ pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
@@ -4063,7 +4063,7 @@ pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
@@ -4081,7 +4081,7 @@ pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
@@ -4101,7 +4101,7 @@ pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
@@ -4119,7 +4119,7 @@ pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
@@ -4137,7 +4137,7 @@ pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
@@ -4155,7 +4155,7 @@ pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t {
@@ -4175,7 +4175,7 @@ pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t {
@@ -4193,7 +4193,7 @@ pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
@@ -4211,7 +4211,7 @@ pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float3
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
@@ -4226,7 +4226,7 @@ pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
@@ -4243,7 +4243,7 @@ pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
@@ -4258,7 +4258,7 @@ pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
@@ -4273,7 +4273,7 @@ pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
@@ -4288,7 +4288,7 @@ pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
@@ -4303,7 +4303,7 @@ pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
@@ -4318,7 +4318,7 @@ pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
@@ -4333,7 +4333,7 @@ pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
@@ -4348,7 +4348,7 @@ pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
@@ -4365,7 +4365,7 @@ pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
@@ -4380,7 +4380,7 @@ pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
@@ -4395,7 +4395,7 @@ pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
@@ -4410,7 +4410,7 @@ pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
@@ -4425,7 +4425,7 @@ pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
@@ -4440,7 +4440,7 @@ pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
@@ -4455,7 +4455,7 @@ pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
@@ -4469,7 +4469,7 @@ pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
@@ -4483,7 +4483,7 @@ pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
@@ -4497,7 +4497,7 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
@@ -4511,7 +4511,7 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
@@ -4525,7 +4525,7 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
@@ -4539,7 +4539,7 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
@@ -4553,7 +4553,7 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
@@ -4567,7 +4567,7 @@ pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
@@ -4581,7 +4581,7 @@ pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
@@ -4595,7 +4595,7 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
@@ -4609,7 +4609,7 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
@@ -4623,7 +4623,7 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
@@ -4637,7 +4637,7 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
@@ -4651,7 +4651,7 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u64<const IMM5: i32>(v: uint64x2_t) -> u64 {
@@ -4666,7 +4666,7 @@ pub unsafe fn vgetq_lane_u64<const IMM5: i32>(v: uint64x2_t) -> u64 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u64<const IMM5: i32>(v: uint64x1_t) -> u64 {
@@ -4681,7 +4681,7 @@ pub unsafe fn vget_lane_u64<const IMM5: i32>(v: uint64x1_t) -> u64 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u16<const IMM5: i32>(v: uint16x4_t) -> u16 {
@@ -4696,7 +4696,7 @@ pub unsafe fn vget_lane_u16<const IMM5: i32>(v: uint16x4_t) -> u16 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s16<const IMM5: i32>(v: int16x4_t) -> i16 {
@@ -4711,7 +4711,7 @@ pub unsafe fn vget_lane_s16<const IMM5: i32>(v: int16x4_t) -> i16 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_p16<const IMM5: i32>(v: poly16x4_t) -> p16 {
@@ -4726,7 +4726,7 @@ pub unsafe fn vget_lane_p16<const IMM5: i32>(v: poly16x4_t) -> p16 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u32<const IMM5: i32>(v: uint32x2_t) -> u32 {
@@ -4741,7 +4741,7 @@ pub unsafe fn vget_lane_u32<const IMM5: i32>(v: uint32x2_t) -> u32 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s32<const IMM5: i32>(v: int32x2_t) -> i32 {
@@ -4756,7 +4756,7 @@ pub unsafe fn vget_lane_s32<const IMM5: i32>(v: int32x2_t) -> i32 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_f32<const IMM5: i32>(v: float32x2_t) -> f32 {
@@ -4771,7 +4771,7 @@ pub unsafe fn vget_lane_f32<const IMM5: i32>(v: float32x2_t) -> f32 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_f32<const IMM5: i32>(v: float32x4_t) -> f32 {
@@ -4786,7 +4786,7 @@ pub unsafe fn vgetq_lane_f32<const IMM5: i32>(v: float32x4_t) -> f32 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_p64<const IMM5: i32>(v: poly64x1_t) -> p64 {
@@ -4801,7 +4801,7 @@ pub unsafe fn vget_lane_p64<const IMM5: i32>(v: poly64x1_t) -> p64 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_p64<const IMM5: i32>(v: poly64x2_t) -> p64 {
@@ -4816,7 +4816,7 @@ pub unsafe fn vgetq_lane_p64<const IMM5: i32>(v: poly64x2_t) -> p64 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s64<const IMM5: i32>(v: int64x1_t) -> i64 {
@@ -4831,7 +4831,7 @@ pub unsafe fn vget_lane_s64<const IMM5: i32>(v: int64x1_t) -> i64 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s64<const IMM5: i32>(v: int64x2_t) -> i64 {
@@ -4846,7 +4846,7 @@ pub unsafe fn vgetq_lane_s64<const IMM5: i32>(v: int64x2_t) -> i64 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u16<const IMM5: i32>(v: uint16x8_t) -> u16 {
@@ -4861,7 +4861,7 @@ pub unsafe fn vgetq_lane_u16<const IMM5: i32>(v: uint16x8_t) -> u16 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u32<const IMM5: i32>(v: uint32x4_t) -> u32 {
@@ -4876,7 +4876,7 @@ pub unsafe fn vgetq_lane_u32<const IMM5: i32>(v: uint32x4_t) -> u32 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s16<const IMM5: i32>(v: int16x8_t) -> i16 {
@@ -4891,7 +4891,7 @@ pub unsafe fn vgetq_lane_s16<const IMM5: i32>(v: int16x8_t) -> i16 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_p16<const IMM5: i32>(v: poly16x8_t) -> p16 {
@@ -4906,7 +4906,7 @@ pub unsafe fn vgetq_lane_p16<const IMM5: i32>(v: poly16x8_t) -> p16 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s32<const IMM5: i32>(v: int32x4_t) -> i32 {
@@ -4921,7 +4921,7 @@ pub unsafe fn vgetq_lane_s32<const IMM5: i32>(v: int32x4_t) -> i32 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u8<const IMM5: i32>(v: uint8x8_t) -> u8 {
@@ -4936,7 +4936,7 @@ pub unsafe fn vget_lane_u8<const IMM5: i32>(v: uint8x8_t) -> u8 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s8<const IMM5: i32>(v: int8x8_t) -> i8 {
@@ -4951,7 +4951,7 @@ pub unsafe fn vget_lane_s8<const IMM5: i32>(v: int8x8_t) -> i8 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_p8<const IMM5: i32>(v: poly8x8_t) -> p8 {
@@ -4966,7 +4966,7 @@ pub unsafe fn vget_lane_p8<const IMM5: i32>(v: poly8x8_t) -> p8 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u8<const IMM5: i32>(v: uint8x16_t) -> u8 {
@@ -4981,7 +4981,7 @@ pub unsafe fn vgetq_lane_u8<const IMM5: i32>(v: uint8x16_t) -> u8 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s8<const IMM5: i32>(v: int8x16_t) -> i8 {
@@ -4996,7 +4996,7 @@ pub unsafe fn vgetq_lane_s8<const IMM5: i32>(v: int8x16_t) -> i8 {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_p8<const IMM5: i32>(v: poly8x16_t) -> p8 {
@@ -5011,7 +5011,7 @@ pub unsafe fn vgetq_lane_p8<const IMM5: i32>(v: poly8x16_t) -> p8 {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t {
@@ -5025,7 +5025,7 @@ pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t {
@@ -5039,7 +5039,7 @@ pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t {
@@ -5053,7 +5053,7 @@ pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t {
@@ -5067,7 +5067,7 @@ pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t {
@@ -5081,7 +5081,7 @@ pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t {
@@ -5095,7 +5095,7 @@ pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t {
@@ -5109,7 +5109,7 @@ pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t {
@@ -5123,7 +5123,7 @@ pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t {
@@ -5137,7 +5137,7 @@ pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t {
@@ -5151,7 +5151,7 @@ pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t {
@@ -5164,7 +5164,7 @@ pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "vget_low_s8", since = "1.60.0")
)]
pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t {
@@ -5177,7 +5177,7 @@ pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t {
@@ -5190,7 +5190,7 @@ pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t {
@@ -5203,7 +5203,7 @@ pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t {
@@ -5216,7 +5216,7 @@ pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t {
@@ -5229,7 +5229,7 @@ pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t {
@@ -5242,7 +5242,7 @@ pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t {
@@ -5255,7 +5255,7 @@ pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t {
@@ -5268,7 +5268,7 @@ pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t {
@@ -5281,7 +5281,7 @@ pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t {
@@ -5294,7 +5294,7 @@ pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t {
@@ -5308,7 +5308,7 @@ pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t {
@@ -5325,7 +5325,7 @@ pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t {
@@ -5339,7 +5339,7 @@ pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t {
@@ -5353,7 +5353,7 @@ pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t {
@@ -5367,7 +5367,7 @@ pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t {
@@ -5384,7 +5384,7 @@ pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t {
@@ -5398,7 +5398,7 @@ pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t {
@@ -5412,7 +5412,7 @@ pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t {
@@ -5426,7 +5426,7 @@ pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t {
@@ -5443,7 +5443,7 @@ pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t {
@@ -5457,7 +5457,7 @@ pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t {
@@ -5474,7 +5474,7 @@ pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t {
@@ -5488,7 +5488,7 @@ unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t {
@@ -5502,7 +5502,7 @@ pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t {
@@ -5516,7 +5516,7 @@ pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t {
@@ -5530,7 +5530,7 @@ pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t {
@@ -5544,7 +5544,7 @@ pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t {
@@ -5558,7 +5558,7 @@ pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t {
@@ -5572,7 +5572,7 @@ pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t {
@@ -5586,7 +5586,7 @@ pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t {
@@ -5600,7 +5600,7 @@ pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t {
@@ -5614,7 +5614,7 @@ pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t {
@@ -5628,7 +5628,7 @@ pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t {
@@ -5645,7 +5645,7 @@ pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t {
@@ -5659,7 +5659,7 @@ unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vldrq_p128(a: *const p128) -> p128 {
@@ -5673,7 +5673,7 @@ pub unsafe fn vldrq_p128(a: *const p128) -> p128 {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vstrq_p128(a: *mut p128, b: p128) {
@@ -5687,7 +5687,7 @@ pub unsafe fn vstrq_p128(a: *mut p128, b: p128) {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t {
@@ -5701,7 +5701,7 @@ pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t {
@@ -5715,7 +5715,7 @@ pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t {
@@ -5729,7 +5729,7 @@ pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t {
@@ -5743,7 +5743,7 @@ pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t {
@@ -5757,7 +5757,7 @@ pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t {
@@ -5771,7 +5771,7 @@ pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t {
@@ -5785,7 +5785,7 @@ pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t {
@@ -5799,7 +5799,7 @@ pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t {
@@ -5813,7 +5813,7 @@ pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t {
@@ -5827,7 +5827,7 @@ pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t {
@@ -5841,7 +5841,7 @@ pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t {
@@ -5855,7 +5855,7 @@ pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t {
@@ -5869,7 +5869,7 @@ pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t {
@@ -5883,7 +5883,7 @@ pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t {
@@ -5897,7 +5897,7 @@ pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t {
@@ -5911,7 +5911,7 @@ pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t {
@@ -5925,7 +5925,7 @@ pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t {
@@ -5939,7 +5939,7 @@ pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t {
@@ -5953,7 +5953,7 @@ pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t {
@@ -5967,7 +5967,7 @@ pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t {
@@ -5981,7 +5981,7 @@ pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t {
@@ -5996,7 +5996,7 @@ pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("nop", N = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vext_s64<const N: i32>(a: int64x1_t, _b: int64x1_t) -> int64x1_t {
@@ -6012,7 +6012,7 @@ pub unsafe fn vext_s64<const N: i32>(a: int64x1_t, _b: int64x1_t) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("nop", N = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vext_u64<const N: i32>(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t {
@@ -6027,7 +6027,7 @@ pub unsafe fn vext_u64<const N: i32>(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t {
@@ -6040,7 +6040,7 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t {
@@ -6053,7 +6053,7 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t {
@@ -6066,7 +6066,7 @@ pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t {
@@ -6079,7 +6079,7 @@ pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t {
@@ -6092,7 +6092,7 @@ pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t {
@@ -6106,7 +6106,7 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t {
@@ -6120,7 +6120,7 @@ pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t {
@@ -6134,7 +6134,7 @@ pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t {
@@ -6148,7 +6148,7 @@ pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t {
@@ -6162,7 +6162,7 @@ pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t {
@@ -6176,7 +6176,7 @@ pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t {
@@ -6190,7 +6190,7 @@ pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t {
@@ -6204,7 +6204,7 @@ pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t {
@@ -6218,7 +6218,7 @@ pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t {
@@ -6232,7 +6232,7 @@ pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t {
@@ -6246,7 +6246,7 @@ pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t {
@@ -6260,7 +6260,7 @@ pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t {
@@ -6274,7 +6274,7 @@ pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t {
@@ -6288,7 +6288,7 @@ pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t {
@@ -6302,7 +6302,7 @@ pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t {
@@ -6316,7 +6316,7 @@ pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t {
@@ -6330,7 +6330,7 @@ pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t {
@@ -6344,7 +6344,7 @@ pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t {
@@ -6358,7 +6358,7 @@ pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t {
@@ -6372,7 +6372,7 @@ pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t {
@@ -6386,7 +6386,7 @@ pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t {
@@ -6400,7 +6400,7 @@ pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t {
@@ -6414,7 +6414,7 @@ pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t {
@@ -6428,7 +6428,7 @@ pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t {
@@ -6442,7 +6442,7 @@ pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t {
@@ -6456,7 +6456,7 @@ pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t {
@@ -6470,7 +6470,7 @@ pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t {
@@ -6484,7 +6484,7 @@ pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t {
@@ -6498,7 +6498,7 @@ pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t {
@@ -6512,7 +6512,7 @@ pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t {
@@ -6526,7 +6526,7 @@ pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t {
@@ -6540,7 +6540,7 @@ pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t {
@@ -6554,7 +6554,7 @@ pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t {
@@ -6568,7 +6568,7 @@ pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t {
@@ -6582,7 +6582,7 @@ pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t {
@@ -6596,7 +6596,7 @@ pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t {
@@ -6610,7 +6610,7 @@ pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t {
@@ -6631,7 +6631,7 @@ pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t {
@@ -6652,7 +6652,7 @@ pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t {
@@ -6673,7 +6673,7 @@ pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
@@ -6694,7 +6694,7 @@ pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
@@ -6715,7 +6715,7 @@ pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
@@ -6736,7 +6736,7 @@ pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t {
@@ -6757,7 +6757,7 @@ pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t {
@@ -6778,7 +6778,7 @@ pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t {
@@ -6799,7 +6799,7 @@ pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
@@ -6820,7 +6820,7 @@ pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
@@ -6841,7 +6841,7 @@ pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
- target_arch = "aarch64",
+ not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
diff --git a/library/stdarch/crates/core_arch/src/powerpc/altivec.rs b/library/stdarch/crates/core_arch/src/powerpc/altivec.rs
index 70344c88b..419fd4fee 100644
--- a/library/stdarch/crates/core_arch/src/powerpc/altivec.rs
+++ b/library/stdarch/crates/core_arch/src/powerpc/altivec.rs
@@ -53,6 +53,17 @@ types! {
extern "C" {
#[link_name = "llvm.ppc.altivec.lvx"]
fn lvx(p: *const i8) -> vector_unsigned_int;
+
+ #[link_name = "llvm.ppc.altivec.lvebx"]
+ fn lvebx(p: *const i8) -> vector_signed_char;
+ #[link_name = "llvm.ppc.altivec.lvehx"]
+ fn lvehx(p: *const i8) -> vector_signed_short;
+ #[link_name = "llvm.ppc.altivec.lvewx"]
+ fn lvewx(p: *const i8) -> vector_signed_int;
+
+ #[link_name = "llvm.ppc.altivec.lvxl"]
+ fn lvxl(p: *const i8) -> vector_unsigned_int;
+
#[link_name = "llvm.ppc.altivec.vperm"]
fn vperm(
a: vector_signed_int,
@@ -244,6 +255,46 @@ extern "C" {
#[link_name = "llvm.floor.v4f32"]
fn vfloor(a: vector_float) -> vector_float;
+
+ #[link_name = "llvm.ppc.altivec.vcmpequb.p"]
+ fn vcmpequb_p(cr: i32, a: vector_unsigned_char, b: vector_unsigned_char) -> i32;
+ #[link_name = "llvm.ppc.altivec.vcmpequh.p"]
+ fn vcmpequh_p(cr: i32, a: vector_unsigned_short, b: vector_unsigned_short) -> i32;
+ #[link_name = "llvm.ppc.altivec.vcmpequw.p"]
+ fn vcmpequw_p(cr: i32, a: vector_unsigned_int, b: vector_unsigned_int) -> i32;
+
+ #[link_name = "llvm.ppc.altivec.vcmpeqfp.p"]
+ fn vcmpeqfp_p(cr: i32, a: vector_float, b: vector_float) -> i32;
+
+ #[link_name = "llvm.ppc.altivec.vcmpgtub.p"]
+ fn vcmpgtub_p(cr: i32, a: vector_unsigned_char, b: vector_unsigned_char) -> i32;
+ #[link_name = "llvm.ppc.altivec.vcmpgtuh.p"]
+ fn vcmpgtuh_p(cr: i32, a: vector_unsigned_short, b: vector_unsigned_short) -> i32;
+ #[link_name = "llvm.ppc.altivec.vcmpgtuw.p"]
+ fn vcmpgtuw_p(cr: i32, a: vector_unsigned_int, b: vector_unsigned_int) -> i32;
+ #[link_name = "llvm.ppc.altivec.vcmpgtsb.p"]
+ fn vcmpgtsb_p(cr: i32, a: vector_signed_char, b: vector_signed_char) -> i32;
+ #[link_name = "llvm.ppc.altivec.vcmpgtsh.p"]
+ fn vcmpgtsh_p(cr: i32, a: vector_signed_short, b: vector_signed_short) -> i32;
+ #[link_name = "llvm.ppc.altivec.vcmpgtsw.p"]
+ fn vcmpgtsw_p(cr: i32, a: vector_signed_int, b: vector_signed_int) -> i32;
+
+ #[link_name = "llvm.ppc.altivec.vcmpgefp.p"]
+ fn vcmpgefp_p(cr: i32, a: vector_float, b: vector_float) -> i32;
+ #[link_name = "llvm.ppc.altivec.vcmpgtfp.p"]
+ fn vcmpgtfp_p(cr: i32, a: vector_float, b: vector_float) -> i32;
+ #[link_name = "llvm.ppc.altivec.vcmpbfp.p"]
+ fn vcmpbfp_p(cr: i32, a: vector_float, b: vector_float) -> i32;
+
+ #[link_name = "llvm.ppc.altivec.vcfsx"]
+ fn vcfsx(a: vector_signed_int, b: i32) -> vector_float;
+ #[link_name = "llvm.ppc.altivec.vcfux"]
+ fn vcfux(a: vector_unsigned_int, b: i32) -> vector_float;
+
+ #[link_name = "llvm.ppc.altivec.vctsxs"]
+ fn vctsxs(a: vector_float, b: i32) -> vector_signed_int;
+ #[link_name = "llvm.ppc.altivec.vctuxs"]
+ fn vctuxs(a: vector_float, b: i32) -> vector_unsigned_int;
}
macro_rules! s_t_l {
@@ -337,96 +388,6 @@ impl_neg! { f32x4 : 0f32 }
mod sealed {
use super::*;
- macro_rules! test_impl {
- ($fun:ident ($($v:ident : $ty:ty),*) -> $r:ty [$call:ident, $instr:ident]) => {
- #[inline]
- #[target_feature(enable = "altivec")]
- #[cfg_attr(test, assert_instr($instr))]
- pub unsafe fn $fun ($($v : $ty),*) -> $r {
- $call ($($v),*)
- }
- };
- ($fun:ident ($($v:ident : $ty:ty),*) -> $r:ty [$call:ident, $instr_altivec:ident / $instr_vsx:ident]) => {
- #[inline]
- #[target_feature(enable = "altivec")]
- #[cfg_attr(all(test, not(target_feature="vsx")), assert_instr($instr_altivec))]
- #[cfg_attr(all(test, target_feature="vsx"), assert_instr($instr_vsx))]
- pub unsafe fn $fun ($($v : $ty),*) -> $r {
- $call ($($v),*)
- }
- }
-
- }
-
- #[allow(unknown_lints, unused_macro_rules)]
- macro_rules! impl_vec_trait {
- ([$Trait:ident $m:ident] $fun:ident ($a:ty)) => {
- impl $Trait for $a {
- #[inline]
- #[target_feature(enable = "altivec")]
- unsafe fn $m(self) -> Self {
- $fun(transmute(self))
- }
- }
- };
- ([$Trait:ident $m:ident] $fun:ident ($a:ty) -> $r:ty) => {
- impl $Trait for $a {
- type Result = $r;
- #[inline]
- #[target_feature(enable = "altivec")]
- unsafe fn $m(self) -> Self::Result {
- $fun(transmute(self))
- }
- }
- };
- ([$Trait:ident $m:ident] 1 ($ub:ident, $sb:ident, $uh:ident, $sh:ident, $uw:ident, $sw:ident, $sf: ident)) => {
- impl_vec_trait!{ [$Trait $m] $ub (vector_unsigned_char) -> vector_unsigned_char }
- impl_vec_trait!{ [$Trait $m] $sb (vector_signed_char) -> vector_signed_char }
- impl_vec_trait!{ [$Trait $m] $uh (vector_unsigned_short) -> vector_unsigned_short }
- impl_vec_trait!{ [$Trait $m] $sh (vector_signed_short) -> vector_signed_short }
- impl_vec_trait!{ [$Trait $m] $uw (vector_unsigned_int) -> vector_unsigned_int }
- impl_vec_trait!{ [$Trait $m] $sw (vector_signed_int) -> vector_signed_int }
- impl_vec_trait!{ [$Trait $m] $sf (vector_float) -> vector_float }
- };
- ([$Trait:ident $m:ident] $fun:ident ($a:ty, $b:ty) -> $r:ty) => {
- impl $Trait<$b> for $a {
- type Result = $r;
- #[inline]
- #[target_feature(enable = "altivec")]
- unsafe fn $m(self, b: $b) -> Self::Result {
- $fun(transmute(self), transmute(b))
- }
- }
- };
- ([$Trait:ident $m:ident] $fun:ident ($a:ty, ~$b:ty) -> $r:ty) => {
- impl_vec_trait!{ [$Trait $m] $fun ($a, $a) -> $r }
- impl_vec_trait!{ [$Trait $m] $fun ($a, $b) -> $r }
- impl_vec_trait!{ [$Trait $m] $fun ($b, $a) -> $r }
- };
- ([$Trait:ident $m:ident] ~($ub:ident, $sb:ident, $uh:ident, $sh:ident, $uw:ident, $sw:ident)) => {
- impl_vec_trait!{ [$Trait $m] $ub (vector_unsigned_char, ~vector_bool_char) -> vector_unsigned_char }
- impl_vec_trait!{ [$Trait $m] $sb (vector_signed_char, ~vector_bool_char) -> vector_signed_char }
- impl_vec_trait!{ [$Trait $m] $uh (vector_unsigned_short, ~vector_bool_short) -> vector_unsigned_short }
- impl_vec_trait!{ [$Trait $m] $sh (vector_signed_short, ~vector_bool_short) -> vector_signed_short }
- impl_vec_trait!{ [$Trait $m] $uw (vector_unsigned_int, ~vector_bool_int) -> vector_unsigned_int }
- impl_vec_trait!{ [$Trait $m] $sw (vector_signed_int, ~vector_bool_int) -> vector_signed_int }
- };
- ([$Trait:ident $m:ident] ~($fn:ident)) => {
- impl_vec_trait!{ [$Trait $m] ~($fn, $fn, $fn, $fn, $fn, $fn) }
- };
- ([$Trait:ident $m:ident] 2 ($ub:ident, $sb:ident, $uh:ident, $sh:ident, $uw:ident, $sw:ident)) => {
- impl_vec_trait!{ [$Trait $m] $ub (vector_unsigned_char, vector_unsigned_char) -> vector_unsigned_char }
- impl_vec_trait!{ [$Trait $m] $sb (vector_signed_char, vector_signed_char) -> vector_signed_char }
- impl_vec_trait!{ [$Trait $m] $uh (vector_unsigned_short, vector_unsigned_short) -> vector_unsigned_short }
- impl_vec_trait!{ [$Trait $m] $sh (vector_signed_short, vector_signed_short) -> vector_signed_short }
- impl_vec_trait!{ [$Trait $m] $uw (vector_unsigned_int, vector_unsigned_int) -> vector_unsigned_int }
- impl_vec_trait!{ [$Trait $m] $sw (vector_signed_int, vector_signed_int) -> vector_signed_int }
- };
- ([$Trait:ident $m:ident] 2 ($fn:ident)) => {
- impl_vec_trait!{ [$Trait $m] ($fn, $fn, $fn, $fn, $fn, $fn) }
- }
- }
-
macro_rules! impl_vec_cmp {
([$Trait:ident $m:ident] ($b:ident, $h:ident, $w:ident)) => {
impl_vec_cmp! { [$Trait $m] ($b, $b, $h, $h, $w, $w) }
@@ -441,50 +402,107 @@ mod sealed {
}
}
- #[inline(always)]
- unsafe fn load(off: i32, p: *const i8) -> u32x4 {
- let addr = p.offset(off as isize);
- transmute(lvx(addr))
+ macro_rules! impl_vec_any_all {
+ ([$Trait:ident $m:ident] ($b:ident, $h:ident, $w:ident)) => {
+ impl_vec_any_all! { [$Trait $m] ($b, $b, $h, $h, $w, $w) }
+ };
+ ([$Trait:ident $m:ident] ($ub:ident, $sb:ident, $uh:ident, $sh:ident, $uw:ident, $sw:ident)) => {
+ impl_vec_trait!{ [$Trait $m] $ub (vector_unsigned_char, vector_unsigned_char) -> bool }
+ impl_vec_trait!{ [$Trait $m] $sb (vector_signed_char, vector_signed_char) -> bool }
+ impl_vec_trait!{ [$Trait $m] $uh (vector_unsigned_short, vector_unsigned_short) -> bool }
+ impl_vec_trait!{ [$Trait $m] $sh (vector_signed_short, vector_signed_short) -> bool }
+ impl_vec_trait!{ [$Trait $m] $uw (vector_unsigned_int, vector_unsigned_int) -> bool }
+ impl_vec_trait!{ [$Trait $m] $sw (vector_signed_int, vector_signed_int) -> bool }
+ }
}
pub trait VectorLd {
type Result;
- unsafe fn vec_ld(self, off: i32) -> Self::Result;
+ unsafe fn vec_ld(self, off: isize) -> Self::Result;
+ unsafe fn vec_ldl(self, off: isize) -> Self::Result;
}
macro_rules! impl_vec_ld {
- ($fun:ident $ty:ident [$instr:ident]) => {
+ ($fun:ident $fun_lru:ident $ty:ident) => {
#[inline]
#[target_feature(enable = "altivec")]
- #[cfg_attr(test, assert_instr($instr))]
- pub unsafe fn $fun(off: i32, p: *const $ty) -> t_t_l!($ty) {
- transmute(load(off, p as *const i8))
+ #[cfg_attr(test, assert_instr(lvx))]
+ pub unsafe fn $fun(off: isize, p: *const $ty) -> t_t_l!($ty) {
+ let addr = (p as *const i8).offset(off);
+ transmute(lvx(addr))
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(lvxl))]
+ pub unsafe fn $fun_lru(off: i32, p: *const $ty) -> t_t_l!($ty) {
+ let addr = (p as *const i8).offset(off as isize);
+ transmute(lvxl(addr))
}
impl VectorLd for *const $ty {
type Result = t_t_l!($ty);
#[inline]
#[target_feature(enable = "altivec")]
- unsafe fn vec_ld(self, off: i32) -> Self::Result {
+ unsafe fn vec_ld(self, off: isize) -> Self::Result {
+ $fun(off, self)
+ }
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ unsafe fn vec_ldl(self, off: isize) -> Self::Result {
$fun(off, self)
}
}
};
- ($fun:ident $ty:ident) => {
- impl_vec_ld! { $fun $ty [lvx] }
+ }
+
+ impl_vec_ld! { vec_ld_u8 vec_ldl_u8 u8 }
+ impl_vec_ld! { vec_ld_i8 vec_ldl_i8 i8 }
+
+ impl_vec_ld! { vec_ld_u16 vec_ldl_u16 u16 }
+ impl_vec_ld! { vec_ld_i16 vec_ldl_i16 i16 }
+
+ impl_vec_ld! { vec_ld_u32 vec_ldl_u32 u32 }
+ impl_vec_ld! { vec_ld_i32 vec_ldl_i32 i32 }
+
+ impl_vec_ld! { vec_ld_f32 vec_ldl_f32 f32 }
+
+ pub trait VectorLde {
+ type Result;
+ unsafe fn vec_lde(self, a: isize) -> Self::Result;
+ }
+
+ macro_rules! impl_vec_lde {
+ ($fun:ident $instr:ident $ty:ident) => {
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr($instr))]
+ pub unsafe fn $fun(a: isize, b: *const $ty) -> t_t_l!($ty) {
+ let addr = (b as *const i8).offset(a);
+ transmute($instr(addr))
+ }
+
+ impl VectorLde for *const $ty {
+ type Result = t_t_l!($ty);
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ unsafe fn vec_lde(self, a: isize) -> Self::Result {
+ $fun(a, self)
+ }
+ }
};
}
- impl_vec_ld! { vec_ld_u8 u8 }
- impl_vec_ld! { vec_ld_i8 i8 }
+ impl_vec_lde! { vec_lde_u8 lvebx u8 }
+ impl_vec_lde! { vec_lde_i8 lvebx i8 }
- impl_vec_ld! { vec_ld_u16 u16 }
- impl_vec_ld! { vec_ld_i16 i16 }
+ impl_vec_lde! { vec_lde_u16 lvehx u16 }
+ impl_vec_lde! { vec_lde_i16 lvehx i16 }
- impl_vec_ld! { vec_ld_u32 u32 }
- impl_vec_ld! { vec_ld_i32 i32 }
+ impl_vec_lde! { vec_lde_u32 lvewx u32 }
+ impl_vec_lde! { vec_lde_i32 lvewx i32 }
- impl_vec_ld! { vec_ld_f32 f32 }
+ impl_vec_lde! { vec_lde_f32 lvewx f32 }
test_impl! { vec_floor(a: vector_float) -> vector_float [ vfloor, vrfim / xvrspim ] }
@@ -520,6 +538,460 @@ mod sealed {
test_impl! { vec_vcmpbfp(a: vector_float, b: vector_float) -> vector_signed_int [vcmpbfp, vcmpbfp] }
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequb.))]
+ unsafe fn vcmpequb_all(a: vector_unsigned_char, b: vector_unsigned_char) -> bool {
+ vcmpequb_p(2, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequb.))]
+ unsafe fn vcmpequb_any(a: vector_unsigned_char, b: vector_unsigned_char) -> bool {
+ vcmpequb_p(1, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequh.))]
+ unsafe fn vcmpequh_all(a: vector_unsigned_short, b: vector_unsigned_short) -> bool {
+ vcmpequh_p(2, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequh.))]
+ unsafe fn vcmpequh_any(a: vector_unsigned_short, b: vector_unsigned_short) -> bool {
+ vcmpequh_p(1, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequw.))]
+ unsafe fn vcmpequw_all(a: vector_unsigned_int, b: vector_unsigned_int) -> bool {
+ vcmpequw_p(2, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequw.))]
+ unsafe fn vcmpequw_any(a: vector_unsigned_int, b: vector_unsigned_int) -> bool {
+ vcmpequw_p(1, a, b) != 0
+ }
+
+ pub trait VectorAllEq<Other> {
+ type Result;
+ unsafe fn vec_all_eq(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_any_all! { [VectorAllEq vec_all_eq] (vcmpequb_all, vcmpequh_all, vcmpequw_all) }
+
+ // TODO: vsx encoding
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpeqfp.))]
+ unsafe fn vcmpeqfp_all(a: vector_float, b: vector_float) -> bool {
+ vcmpeqfp_p(2, a, b) != 0
+ }
+
+ impl VectorAllEq<vector_float> for vector_float {
+ type Result = bool;
+ #[inline]
+ unsafe fn vec_all_eq(self, b: vector_float) -> Self::Result {
+ vcmpeqfp_all(self, b)
+ }
+ }
+
+ pub trait VectorAnyEq<Other> {
+ type Result;
+ unsafe fn vec_any_eq(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_any_all! { [VectorAnyEq vec_any_eq] (vcmpequb_any, vcmpequh_any, vcmpequw_any) }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpeqfp.))]
+ unsafe fn vcmpeqfp_any(a: vector_float, b: vector_float) -> bool {
+ vcmpeqfp_p(1, a, b) != 0
+ }
+
+ impl VectorAnyEq<vector_float> for vector_float {
+ type Result = bool;
+ #[inline]
+ unsafe fn vec_any_eq(self, b: vector_float) -> Self::Result {
+ vcmpeqfp_any(self, b)
+ }
+ }
+
+ // All/Any GreaterEqual
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsb.))]
+ unsafe fn vcmpgesb_all(a: vector_signed_char, b: vector_signed_char) -> bool {
+ vcmpgtsb_p(0, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsb.))]
+ unsafe fn vcmpgesb_any(a: vector_signed_char, b: vector_signed_char) -> bool {
+ vcmpgtsb_p(3, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsh.))]
+ unsafe fn vcmpgesh_all(a: vector_signed_short, b: vector_signed_short) -> bool {
+ vcmpgtsh_p(0, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsh.))]
+ unsafe fn vcmpgesh_any(a: vector_signed_short, b: vector_signed_short) -> bool {
+ vcmpgtsh_p(3, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsw.))]
+ unsafe fn vcmpgesw_all(a: vector_signed_int, b: vector_signed_int) -> bool {
+ vcmpgtsw_p(0, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsw.))]
+ unsafe fn vcmpgesw_any(a: vector_signed_int, b: vector_signed_int) -> bool {
+ vcmpgtsw_p(3, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtub.))]
+ unsafe fn vcmpgeub_all(a: vector_unsigned_char, b: vector_unsigned_char) -> bool {
+ vcmpgtub_p(0, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtub.))]
+ unsafe fn vcmpgeub_any(a: vector_unsigned_char, b: vector_unsigned_char) -> bool {
+ vcmpgtub_p(3, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtuh.))]
+ unsafe fn vcmpgeuh_all(a: vector_unsigned_short, b: vector_unsigned_short) -> bool {
+ vcmpgtuh_p(0, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtuh.))]
+ unsafe fn vcmpgeuh_any(a: vector_unsigned_short, b: vector_unsigned_short) -> bool {
+ vcmpgtuh_p(3, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtuw.))]
+ unsafe fn vcmpgeuw_all(a: vector_unsigned_int, b: vector_unsigned_int) -> bool {
+ vcmpgtuw_p(0, b, a) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtuw.))]
+ unsafe fn vcmpgeuw_any(a: vector_unsigned_int, b: vector_unsigned_int) -> bool {
+ vcmpgtuw_p(3, b, a) != 0
+ }
+
+ pub trait VectorAllGe<Other> {
+ type Result;
+ unsafe fn vec_all_ge(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_any_all! { [VectorAllGe vec_all_ge] (
+ vcmpgeub_all, vcmpgesb_all,
+ vcmpgeuh_all, vcmpgesh_all,
+ vcmpgeuw_all, vcmpgesw_all
+ ) }
+
+ // TODO: vsx encoding
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgefp.))]
+ unsafe fn vcmpgefp_all(a: vector_float, b: vector_float) -> bool {
+ vcmpgefp_p(2, a, b) != 0
+ }
+
+ impl VectorAllGe<vector_float> for vector_float {
+ type Result = bool;
+ #[inline]
+ unsafe fn vec_all_ge(self, b: vector_float) -> Self::Result {
+ vcmpgefp_all(self, b)
+ }
+ }
+
+ pub trait VectorAnyGe<Other> {
+ type Result;
+ unsafe fn vec_any_ge(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_any_all! { [VectorAnyGe vec_any_ge] (
+ vcmpgeub_any, vcmpgesb_any,
+ vcmpgeuh_any, vcmpgesh_any,
+ vcmpgeuw_any, vcmpgesw_any
+ ) }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgefp.))]
+ unsafe fn vcmpgefp_any(a: vector_float, b: vector_float) -> bool {
+ vcmpgefp_p(1, a, b) != 0
+ }
+
+ impl VectorAnyGe<vector_float> for vector_float {
+ type Result = bool;
+ #[inline]
+ unsafe fn vec_any_ge(self, b: vector_float) -> Self::Result {
+ vcmpgefp_any(self, b)
+ }
+ }
+
+ // All/Any Greater Than
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsb.))]
+ unsafe fn vcmpgtsb_all(a: vector_signed_char, b: vector_signed_char) -> bool {
+ vcmpgtsb_p(2, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsb.))]
+ unsafe fn vcmpgtsb_any(a: vector_signed_char, b: vector_signed_char) -> bool {
+ vcmpgtsb_p(1, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsh.))]
+ unsafe fn vcmpgtsh_all(a: vector_signed_short, b: vector_signed_short) -> bool {
+ vcmpgtsh_p(2, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsh.))]
+ unsafe fn vcmpgtsh_any(a: vector_signed_short, b: vector_signed_short) -> bool {
+ vcmpgtsh_p(1, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsw.))]
+ unsafe fn vcmpgtsw_all(a: vector_signed_int, b: vector_signed_int) -> bool {
+ vcmpgtsw_p(2, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtsw.))]
+ unsafe fn vcmpgtsw_any(a: vector_signed_int, b: vector_signed_int) -> bool {
+ vcmpgtsw_p(1, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtub.))]
+ unsafe fn vcmpgtub_all(a: vector_unsigned_char, b: vector_unsigned_char) -> bool {
+ vcmpgtub_p(2, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtub.))]
+ unsafe fn vcmpgtub_any(a: vector_unsigned_char, b: vector_unsigned_char) -> bool {
+ vcmpgtub_p(1, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtuh.))]
+ unsafe fn vcmpgtuh_all(a: vector_unsigned_short, b: vector_unsigned_short) -> bool {
+ vcmpgtuh_p(2, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtuh.))]
+ unsafe fn vcmpgtuh_any(a: vector_unsigned_short, b: vector_unsigned_short) -> bool {
+ vcmpgtuh_p(1, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtuw.))]
+ unsafe fn vcmpgtuw_all(a: vector_unsigned_int, b: vector_unsigned_int) -> bool {
+ vcmpgtuw_p(2, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtuw.))]
+ unsafe fn vcmpgtuw_any(a: vector_unsigned_int, b: vector_unsigned_int) -> bool {
+ vcmpgtuw_p(1, a, b) != 0
+ }
+
+ pub trait VectorAllGt<Other> {
+ type Result;
+ unsafe fn vec_all_gt(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_any_all! { [VectorAllGt vec_all_gt] (
+ vcmpgtub_all, vcmpgtsb_all,
+ vcmpgtuh_all, vcmpgtsh_all,
+ vcmpgtuw_all, vcmpgtsw_all
+ ) }
+
+ // TODO: vsx encoding
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtfp.))]
+ unsafe fn vcmpgtfp_all(a: vector_float, b: vector_float) -> bool {
+ vcmpgtfp_p(2, a, b) != 0
+ }
+
+ impl VectorAllGt<vector_float> for vector_float {
+ type Result = bool;
+ #[inline]
+ unsafe fn vec_all_gt(self, b: vector_float) -> Self::Result {
+ vcmpgtfp_all(self, b)
+ }
+ }
+
+ pub trait VectorAnyGt<Other> {
+ type Result;
+ unsafe fn vec_any_gt(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_any_all! { [VectorAnyGt vec_any_gt] (
+ vcmpgtub_any, vcmpgtsb_any,
+ vcmpgtuh_any, vcmpgtsh_any,
+ vcmpgtuw_any, vcmpgtsw_any
+ ) }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpgtfp.))]
+ unsafe fn vcmpgtfp_any(a: vector_float, b: vector_float) -> bool {
+ vcmpgtfp_p(1, a, b) != 0
+ }
+
+ impl VectorAnyGt<vector_float> for vector_float {
+ type Result = bool;
+ #[inline]
+ unsafe fn vec_any_gt(self, b: vector_float) -> Self::Result {
+ vcmpgtfp_any(self, b)
+ }
+ }
+
+ // All/Any Elements Not Equal
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequb.))]
+ unsafe fn vcmpneub_all(a: vector_unsigned_char, b: vector_unsigned_char) -> bool {
+ vcmpequb_p(0, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequb.))]
+ unsafe fn vcmpneub_any(a: vector_unsigned_char, b: vector_unsigned_char) -> bool {
+ vcmpequb_p(3, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequh.))]
+ unsafe fn vcmpneuh_all(a: vector_unsigned_short, b: vector_unsigned_short) -> bool {
+ vcmpequh_p(0, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequh.))]
+ unsafe fn vcmpneuh_any(a: vector_unsigned_short, b: vector_unsigned_short) -> bool {
+ vcmpequh_p(3, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequw.))]
+ unsafe fn vcmpneuw_all(a: vector_unsigned_int, b: vector_unsigned_int) -> bool {
+ vcmpequw_p(0, a, b) != 0
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpequw.))]
+ unsafe fn vcmpneuw_any(a: vector_unsigned_int, b: vector_unsigned_int) -> bool {
+ vcmpequw_p(3, a, b) != 0
+ }
+
+ pub trait VectorAllNe<Other> {
+ type Result;
+ unsafe fn vec_all_ne(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_any_all! { [VectorAllNe vec_all_ne] (vcmpneub_all, vcmpneuh_all, vcmpneuw_all) }
+
+ // TODO: vsx encoding
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpeqfp.))]
+ unsafe fn vcmpnefp_all(a: vector_float, b: vector_float) -> bool {
+ vcmpeqfp_p(0, a, b) != 0
+ }
+
+ impl VectorAllNe<vector_float> for vector_float {
+ type Result = bool;
+ #[inline]
+ unsafe fn vec_all_ne(self, b: vector_float) -> Self::Result {
+ vcmpnefp_all(self, b)
+ }
+ }
+
+ pub trait VectorAnyNe<Other> {
+ type Result;
+ unsafe fn vec_any_ne(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_any_all! { [VectorAnyNe vec_any_ne] (vcmpneub_any, vcmpneuh_any, vcmpneuw_any) }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcmpeqfp.))]
+ unsafe fn vcmpnefp_any(a: vector_float, b: vector_float) -> bool {
+ vcmpeqfp_p(3, a, b) != 0
+ }
+
+ impl VectorAnyNe<vector_float> for vector_float {
+ type Result = bool;
+ #[inline]
+ unsafe fn vec_any_ne(self, b: vector_float) -> Self::Result {
+ vcmpnefp_any(self, b)
+ }
+ }
+
test_impl! { vec_vceil(a: vector_float) -> vector_float [vceil, vrfip / xvrspip ] }
test_impl! { vec_vavgsb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char [ vavgsb, vavgsb ] }
@@ -540,8 +1012,10 @@ mod sealed {
#[target_feature(enable = "altivec")]
#[cfg_attr(all(test, not(target_feature = "vsx")), assert_instr(vandc))]
#[cfg_attr(all(test, target_feature = "vsx"), assert_instr(xxlandc))]
- unsafe fn andc(a: u8x16, b: u8x16) -> u8x16 {
- simd_and(simd_xor(u8x16::splat(0xff), b), a)
+ unsafe fn andc(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char {
+ let a = transmute(a);
+ let b = transmute(b);
+ transmute(simd_and(simd_xor(u8x16::splat(0xff), b), a))
}
pub trait VectorAndc<Other> {
@@ -1423,8 +1897,15 @@ mod sealed {
#[inline]
#[target_feature(enable = "altivec")]
#[cfg_attr(test, assert_instr(vmladduhm))]
- unsafe fn mladd(a: i16x8, b: i16x8, c: i16x8) -> i16x8 {
- simd_add(simd_mul(a, b), c)
+ unsafe fn mladd(
+ a: vector_signed_short,
+ b: vector_signed_short,
+ c: vector_signed_short,
+ ) -> vector_signed_short {
+ let a: i16x8 = transmute(a);
+ let b: i16x8 = transmute(b);
+ let c: i16x8 = transmute(c);
+ transmute(simd_add(simd_mul(a, b), c))
}
macro_rules! vector_mladd {
@@ -1434,9 +1915,9 @@ mod sealed {
#[inline]
#[target_feature(enable = "altivec")]
unsafe fn vec_mladd(self, b: $bc, c: $bc) -> Self::Result {
- let a: i16x8 = transmute(self);
- let b: i16x8 = transmute(b);
- let c: i16x8 = transmute(c);
+ let a = transmute(self);
+ let b = transmute(b);
+ let c = transmute(c);
transmute(mladd(a, b, c))
}
@@ -1448,18 +1929,111 @@ mod sealed {
vector_mladd! { vector_unsigned_short, vector_signed_short, vector_signed_short }
vector_mladd! { vector_signed_short, vector_unsigned_short, vector_signed_short }
vector_mladd! { vector_signed_short, vector_signed_short, vector_signed_short }
+
+ pub trait VectorOr<Other> {
+ type Result;
+ unsafe fn vec_or(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_trait! { [VectorOr vec_or] ~(simd_or) }
+
+ pub trait VectorXor<Other> {
+ type Result;
+ unsafe fn vec_xor(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_trait! { [VectorXor vec_xor] ~(simd_xor) }
+
+ macro_rules! vector_vnor {
+ ($fun:ident $ty:ident) => {
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(all(test, not(target_feature = "vsx")), assert_instr(vnor))]
+ #[cfg_attr(all(test, target_feature = "vsx"), assert_instr(xxlnor))]
+ pub unsafe fn $fun(a: t_t_l!($ty), b: t_t_l!($ty)) -> t_t_l!($ty) {
+ let o = vec_splats(!0 as $ty);
+ vec_xor(vec_or(a, b), o)
+ }
+ };
+ }
+
+ vector_vnor! { vec_vnorsb i8 }
+ vector_vnor! { vec_vnorsh i16 }
+ vector_vnor! { vec_vnorsw i32 }
+ vector_vnor! { vec_vnorub u8 }
+ vector_vnor! { vec_vnoruh u16 }
+ vector_vnor! { vec_vnoruw u32 }
+
+ pub trait VectorNor<Other> {
+ type Result;
+ unsafe fn vec_nor(self, b: Other) -> Self::Result;
+ }
+
+ impl_vec_trait! { [VectorNor vec_nor] 2 (vec_vnorub, vec_vnorsb, vec_vnoruh, vec_vnorsh, vec_vnoruw, vec_vnorsw) }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcfsx, IMM5 = 1))]
+ unsafe fn vec_ctf_i32<const IMM5: i32>(a: vector_signed_int) -> vector_float {
+ static_assert_uimm_bits!(IMM5, 5);
+ vcfsx(a, IMM5)
+ }
+
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr(vcfux, IMM5 = 1))]
+ unsafe fn vec_ctf_u32<const IMM5: i32>(a: vector_unsigned_int) -> vector_float {
+ static_assert_uimm_bits!(IMM5, 5);
+ vcfux(a, IMM5)
+ }
+
+ pub trait VectorCtf {
+ unsafe fn vec_ctf<const IMM5: i32>(self) -> vector_float;
+ }
+
+ impl VectorCtf for vector_signed_int {
+ unsafe fn vec_ctf<const IMM5: i32>(self) -> vector_float {
+ vec_ctf_i32::<IMM5>(self)
+ }
+ }
+
+ impl VectorCtf for vector_unsigned_int {
+ unsafe fn vec_ctf<const IMM5: i32>(self) -> vector_float {
+ vec_ctf_u32::<IMM5>(self)
+ }
+ }
}
-/// Vector ld.
+/// Vector Load Indexed.
#[inline]
#[target_feature(enable = "altivec")]
-pub unsafe fn vec_ld<T>(off: i32, p: T) -> <T as sealed::VectorLd>::Result
+pub unsafe fn vec_ld<T>(off: isize, p: T) -> <T as sealed::VectorLd>::Result
where
T: sealed::VectorLd,
{
p.vec_ld(off)
}
+/// Vector Load Indexed Least Recently Used.
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_ldl<T>(off: isize, p: T) -> <T as sealed::VectorLd>::Result
+where
+ T: sealed::VectorLd,
+{
+ p.vec_ldl(off)
+}
+
+/// Vector Load Element Indexed.
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_lde<T>(off: isize, p: T) -> <T as sealed::VectorLde>::Result
+where
+ T: sealed::VectorLde,
+{
+ p.vec_lde(off)
+}
+
/// Vector floor.
#[inline]
#[target_feature(enable = "altivec")]
@@ -1562,6 +2136,36 @@ where
a.vec_and(b)
}
+/// Vector or.
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_or<T, U>(a: T, b: U) -> <T as sealed::VectorOr<U>>::Result
+where
+ T: sealed::VectorOr<U>,
+{
+ a.vec_or(b)
+}
+
+/// Vector nor.
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_nor<T, U>(a: T, b: U) -> <T as sealed::VectorNor<U>>::Result
+where
+ T: sealed::VectorNor<U>,
+{
+ a.vec_nor(b)
+}
+
+/// Vector xor.
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_xor<T, U>(a: T, b: U) -> <T as sealed::VectorXor<U>>::Result
+where
+ T: sealed::VectorXor<U>,
+{
+ a.vec_xor(b)
+}
+
/// Vector adds.
#[inline]
#[target_feature(enable = "altivec")]
@@ -1659,6 +2263,36 @@ where
a.vec_add(b)
}
+/// Vector Convert to Floating-Point
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_ctf<const IMM5: i32, T>(a: T) -> vector_float
+where
+ T: sealed::VectorCtf,
+{
+ a.vec_ctf::<IMM5>()
+}
+
+/// Vector Convert to Signed Integer
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr(vctsxs, IMM5 = 1))]
+pub unsafe fn vec_cts<const IMM5: i32>(a: vector_float) -> vector_signed_int {
+ static_assert_uimm_bits!(IMM5, 5);
+
+ vctsxs(a, IMM5)
+}
+
+/// Vector Convert to Signed Integer
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr(vctuxs, IMM5 = 1))]
+pub unsafe fn vec_ctu<const IMM5: i32>(a: vector_float) -> vector_unsigned_int {
+ static_assert_uimm_bits!(IMM5, 5);
+
+ vctuxs(a, IMM5)
+}
+
/// Endian-biased intrinsics
#[cfg(target_endian = "little")]
mod endian {
@@ -1796,6 +2430,238 @@ where
a.vec_sum4s(b)
}
+/// Vector All Elements Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_all_eq<T, U>(a: T, b: U) -> <T as sealed::VectorAllEq<U>>::Result
+where
+ T: sealed::VectorAllEq<U>,
+{
+ a.vec_all_eq(b)
+}
+
+/// Vector All Elements Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_any_eq<T, U>(a: T, b: U) -> <T as sealed::VectorAnyEq<U>>::Result
+where
+ T: sealed::VectorAnyEq<U>,
+{
+ a.vec_any_eq(b)
+}
+
+/// Vector All Elements Greater or Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_all_ge<T, U>(a: T, b: U) -> <T as sealed::VectorAllGe<U>>::Result
+where
+ T: sealed::VectorAllGe<U>,
+{
+ a.vec_all_ge(b)
+}
+
+/// Vector Any Element Greater or Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_any_ge<T, U>(a: T, b: U) -> <T as sealed::VectorAnyGe<U>>::Result
+where
+ T: sealed::VectorAnyGe<U>,
+{
+ a.vec_any_ge(b)
+}
+
+/// Vector All Elements Greater Than
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_all_gt<T, U>(a: T, b: U) -> <T as sealed::VectorAllGt<U>>::Result
+where
+ T: sealed::VectorAllGt<U>,
+{
+ a.vec_all_gt(b)
+}
+
+/// Vector Any Element Greater Than
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_any_gt<T, U>(a: T, b: U) -> <T as sealed::VectorAnyGt<U>>::Result
+where
+ T: sealed::VectorAnyGt<U>,
+{
+ a.vec_any_gt(b)
+}
+
+/// Vector All In
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpbfp."))]
+pub unsafe fn vec_all_in(a: vector_float, b: vector_float) -> bool {
+ vcmpbfp_p(0, a, b) != 0
+}
+
+/// Vector All Elements Less Than or Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_all_le<T, U>(a: U, b: T) -> <T as sealed::VectorAllGe<U>>::Result
+where
+ T: sealed::VectorAllGe<U>,
+{
+ b.vec_all_ge(a)
+}
+
+/// Vector Any Element Less Than or Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_any_le<T, U>(a: U, b: T) -> <T as sealed::VectorAnyGe<U>>::Result
+where
+ T: sealed::VectorAnyGe<U>,
+{
+ b.vec_any_ge(a)
+}
+
+/// Vector All Elements Less Than
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_all_lt<T, U>(a: U, b: T) -> <T as sealed::VectorAllGt<U>>::Result
+where
+ T: sealed::VectorAllGt<U>,
+{
+ b.vec_all_gt(a)
+}
+
+/// Vector Any Element Less Than
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_any_lt<T, U>(a: U, b: T) -> <T as sealed::VectorAnyGt<U>>::Result
+where
+ T: sealed::VectorAnyGt<U>,
+{
+ b.vec_any_gt(a)
+}
+
+/// All Elements Not a Number
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpeqfp."))]
+pub unsafe fn vec_all_nan(a: vector_float) -> bool {
+ vcmpeqfp_p(0, a, a) != 0
+}
+
+/// Any Elements Not a Number
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpeqfp."))]
+pub unsafe fn vec_any_nan(a: vector_float) -> bool {
+ vcmpeqfp_p(3, a, a) != 0
+}
+
+/// Vector All Elements Not Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_all_ne<T, U>(a: T, b: U) -> <T as sealed::VectorAllNe<U>>::Result
+where
+ T: sealed::VectorAllNe<U>,
+{
+ a.vec_all_ne(b)
+}
+
+/// Vector Any Elements Not Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_any_ne<T, U>(a: T, b: U) -> <T as sealed::VectorAnyNe<U>>::Result
+where
+ T: sealed::VectorAnyNe<U>,
+{
+ a.vec_any_ne(b)
+}
+
+/// All Elements Not Greater Than or Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgefp."))]
+pub unsafe fn vec_all_nge(a: vector_float, b: vector_float) -> bool {
+ vcmpgefp_p(0, a, b) != 0
+}
+
+/// All Elements Not Greater Than
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgtfp."))]
+pub unsafe fn vec_all_ngt(a: vector_float, b: vector_float) -> bool {
+ vcmpgtfp_p(0, a, b) != 0
+}
+
+/// All Elements Not Less Than or Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgefp."))]
+pub unsafe fn vec_all_nle(a: vector_float, b: vector_float) -> bool {
+ vcmpgefp_p(0, b, a) != 0
+}
+
+/// All Elements Not Less Than
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgtfp."))]
+pub unsafe fn vec_all_nlt(a: vector_float, b: vector_float) -> bool {
+ vcmpgtfp_p(0, b, a) != 0
+}
+
+/// All Elements Numeric
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgefp."))]
+pub unsafe fn vec_all_numeric(a: vector_float) -> bool {
+ vcmpgefp_p(2, a, a) != 0
+}
+
+/// Any Elements Not Greater Than or Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgefp."))]
+pub unsafe fn vec_any_nge(a: vector_float, b: vector_float) -> bool {
+ vcmpgefp_p(3, a, b) != 0
+}
+
+/// Any Elements Not Greater Than
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgtfp."))]
+pub unsafe fn vec_any_ngt(a: vector_float, b: vector_float) -> bool {
+ vcmpgtfp_p(3, a, b) != 0
+}
+
+/// Any Elements Not Less Than or Equal
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgefp."))]
+pub unsafe fn vec_any_nle(a: vector_float, b: vector_float) -> bool {
+ vcmpgefp_p(3, b, a) != 0
+}
+
+/// Any Elements Not Less Than
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgtfp."))]
+pub unsafe fn vec_any_nlt(a: vector_float, b: vector_float) -> bool {
+ vcmpgtfp_p(3, b, a) != 0
+}
+
+/// Any Elements Numeric
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpgefp."))]
+pub unsafe fn vec_any_numeric(a: vector_float) -> bool {
+ vcmpgefp_p(1, a, a) != 0
+}
+
+/// Any Element Out of Bounds
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr("vcmpeqfp."))]
+pub unsafe fn vec_any_out(a: vector_float) -> bool {
+ vcmpeqfp_p(1, a, a) != 0
+}
+
#[cfg(target_endian = "big")]
mod endian {
use super::*;
@@ -1865,8 +2731,18 @@ mod tests {
let r : $ty_out = transmute($fn(a, b));
assert_eq!(d, r);
}
+ };
+ { $name: ident, $fn:ident, $ty: ident -> $ty_out: ident, [$($a:expr),+], [$($b:expr),+], $d:expr } => {
+ #[simd_test(enable = "altivec")]
+ unsafe fn $name() {
+ let a: s_t_l!($ty) = transmute($ty::new($($a),+));
+ let b: s_t_l!($ty) = transmute($ty::new($($b),+));
+
+ let r : $ty_out = transmute($fn(a, b));
+ assert_eq!($d, r);
+ }
}
- }
+ }
macro_rules! test_vec_1 {
{ $name: ident, $fn:ident, f32x4, [$($a:expr),+], ~[$($d:expr),+] } => {
@@ -1920,6 +2796,60 @@ mod tests {
}
}
+ #[simd_test(enable = "altivec")]
+ unsafe fn test_vec_ldl() {
+ let pat = [
+ u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
+ u8x16::new(
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ ),
+ ];
+
+ for off in 0..16 {
+ let v: u8x16 = transmute(vec_ldl(0, (pat.as_ptr() as *const u8).offset(off)));
+ assert_eq!(
+ v,
+ u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
+ );
+ }
+ for off in 16..32 {
+ let v: u8x16 = transmute(vec_ldl(0, (pat.as_ptr() as *const u8).offset(off)));
+ assert_eq!(
+ v,
+ u8x16::new(16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31)
+ );
+ }
+ }
+
+ #[simd_test(enable = "altivec")]
+ unsafe fn test_vec_lde_u8() {
+ let pat = [u8x16::new(
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ )];
+ for off in 0..16 {
+ let v: u8x16 = transmute(vec_lde(off, pat.as_ptr() as *const u8));
+ assert_eq!(off as u8, v.extract(off as _));
+ }
+ }
+
+ #[simd_test(enable = "altivec")]
+ unsafe fn test_vec_lde_u16() {
+ let pat = [u16x8::new(0, 1, 2, 3, 4, 5, 6, 7)];
+ for off in 0..8 {
+ let v: u16x8 = transmute(vec_lde(off * 2, pat.as_ptr() as *const u8));
+ assert_eq!(off as u16, v.extract(off as _));
+ }
+ }
+
+ #[simd_test(enable = "altivec")]
+ unsafe fn test_vec_lde_u32() {
+ let pat = [u32x4::new(0, 1, 2, 3)];
+ for off in 0..4 {
+ let v: u32x4 = transmute(vec_lde(off * 4, pat.as_ptr() as *const u8));
+ assert_eq!(off as u32, v.extract(off as _));
+ }
+ }
+
test_vec_1! { test_vec_floor, vec_floor, f32x4,
[1.1, 1.9, -0.5, -0.9],
[1.0, 1.0, -1.0, -1.0]
@@ -2008,6 +2938,882 @@ mod tests {
[false, true, true, false]
}
+ test_vec_2! { test_vec_all_eq_i8_false, vec_all_eq, i8x16 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_eq_u8_false, vec_all_eq, u8x16 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 255, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_eq_i16_false, vec_all_eq, i16x8 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_eq_u16_false, vec_all_eq, u16x8 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0],
+ [0, 0, 255, 1, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_eq_i32_false, vec_all_eq, i32x4 -> bool,
+ [1, -1, 0, 0],
+ [0, -1, 0, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_all_eq_u32_false, vec_all_eq, u32x4 -> bool,
+ [1, 255, 0, 0],
+ [0, 255, 0, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_all_eq_i8_true, vec_all_eq, i8x16 -> bool,
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_eq_u8_true, vec_all_eq, u8x16 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_eq_i16_true, vec_all_eq, i16x8 -> bool,
+ [1, -1, 1, 0, 0, 0, 0, 0],
+ [1, -1, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_eq_u16_true, vec_all_eq, u16x8 -> bool,
+ [1, 255, 1, 0, 0, 0, 0, 0],
+ [1, 255, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_eq_i32_true, vec_all_eq, i32x4 -> bool,
+ [1, -1, 0, 1],
+ [1, -1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_eq_u32_true, vec_all_eq, u32x4 -> bool,
+ [1, 255, 0, 1],
+ [1, 255, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_eq_i8_false, vec_any_eq, i8x16 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_eq_u8_false, vec_any_eq, u8x16 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 255, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_eq_i16_false, vec_any_eq, i16x8 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_eq_u16_false, vec_any_eq, u16x8 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0],
+ [0, 0, 255, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_eq_i32_false, vec_any_eq, i32x4 -> bool,
+ [1, -1, 0, 0],
+ [0, -2, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_eq_u32_false, vec_any_eq, u32x4 -> bool,
+ [1, 2, 1, 0],
+ [0, 255, 0, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_eq_i8_true, vec_any_eq, i8x16 -> bool,
+ [1, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_eq_u8_true, vec_any_eq, u8x16 -> bool,
+ [0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_eq_i16_true, vec_any_eq, i16x8 -> bool,
+ [0, -1, 1, 0, 0, 0, 0, 0],
+ [1, -1, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_eq_u16_true, vec_any_eq, u16x8 -> bool,
+ [0, 255, 1, 0, 0, 0, 0, 0],
+ [1, 255, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_eq_i32_true, vec_any_eq, i32x4 -> bool,
+ [0, -1, 0, 1],
+ [1, -1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_eq_u32_true, vec_any_eq, u32x4 -> bool,
+ [0, 255, 0, 1],
+ [1, 255, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ge_i8_false, vec_all_ge, i8x16 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ge_u8_false, vec_all_ge, u8x16 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 255, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ge_i16_false, vec_all_ge, i16x8 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ge_u16_false, vec_all_ge, u16x8 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0],
+ [0, 0, 255, 1, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ge_i32_false, vec_all_ge, i32x4 -> bool,
+ [1, -1, 0, 0],
+ [0, -1, 0, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ge_u32_false, vec_all_ge, u32x4 -> bool,
+ [1, 255, 0, 0],
+ [0, 255, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ge_i8_true, vec_all_ge, i8x16 -> bool,
+ [0, 0, -1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ge_u8_true, vec_all_ge, u8x16 -> bool,
+ [1, 255, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ge_i16_true, vec_all_ge, i16x8 -> bool,
+ [1, -1, 42, 0, 0, 0, 0, 0],
+ [1, -5, 2, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ge_u16_true, vec_all_ge, u16x8 -> bool,
+ [42, 255, 1, 0, 0, 0, 0, 0],
+ [2, 255, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ge_i32_true, vec_all_ge, i32x4 -> bool,
+ [1, -1, 0, 1],
+ [0, -1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ge_u32_true, vec_all_ge, u32x4 -> bool,
+ [1, 255, 0, 1],
+ [1, 254, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ge_i8_false, vec_any_ge, i8x16 -> bool,
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ge_u8_false, vec_any_ge, u8x16 -> bool,
+ [1, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [42, 255, 255, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ge_i16_false, vec_any_ge, i16x8 -> bool,
+ [1, -1, -2, 0, 0, 0, 0, 0],
+ [2, 0, -1, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ge_u16_false, vec_any_ge, u16x8 -> bool,
+ [1, 2, 0, 0, 0, 0, 0, 0],
+ [2, 42, 255, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ge_i32_false, vec_any_ge, i32x4 -> bool,
+ [1, -1, 0, 0],
+ [2, 0, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ge_u32_false, vec_any_ge, u32x4 -> bool,
+ [1, 2, 1, 0],
+ [4, 255, 4, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ge_i8_true, vec_any_ge, i8x16 -> bool,
+ [1, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ge_u8_true, vec_any_ge, u8x16 -> bool,
+ [0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ge_i16_true, vec_any_ge, i16x8 -> bool,
+ [0, -1, 1, 0, 0, 0, 0, 0],
+ [1, -1, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ge_u16_true, vec_any_ge, u16x8 -> bool,
+ [0, 255, 1, 0, 0, 0, 0, 0],
+ [1, 255, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ge_i32_true, vec_any_ge, i32x4 -> bool,
+ [0, -1, 0, 1],
+ [1, -1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ge_u32_true, vec_any_ge, u32x4 -> bool,
+ [0, 255, 0, 1],
+ [1, 255, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_gt_i8_false, vec_all_gt, i8x16 -> bool,
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_gt_u8_false, vec_all_gt, u8x16 -> bool,
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 255, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_gt_i16_false, vec_all_gt, i16x8 -> bool,
+ [1, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_gt_u16_false, vec_all_gt, u16x8 -> bool,
+ [1, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 255, 1, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_gt_i32_false, vec_all_gt, i32x4 -> bool,
+ [1, -1, 0, 0],
+ [0, -1, 0, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_all_gt_u32_false, vec_all_gt, u32x4 -> bool,
+ [1, 255, 0, 0],
+ [0, 255, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_all_gt_i8_true, vec_all_gt, i8x16 -> bool,
+ [2, 1, -1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -2, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_gt_u8_true, vec_all_gt, u8x16 -> bool,
+ [1, 255, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_gt_i16_true, vec_all_gt, i16x8 -> bool,
+ [1, -1, 42, 1, 1, 1, 1, 1],
+ [0, -5, 2, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_gt_u16_true, vec_all_gt, u16x8 -> bool,
+ [42, 255, 1, 1, 1, 1, 1, 1],
+ [2, 254, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_gt_i32_true, vec_all_gt, i32x4 -> bool,
+ [1, -1, 1, 1],
+ [0, -2, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_gt_u32_true, vec_all_gt, u32x4 -> bool,
+ [1, 255, 1, 1],
+ [0, 254, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_gt_i8_false, vec_any_gt, i8x16 -> bool,
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_gt_u8_false, vec_any_gt, u8x16 -> bool,
+ [1, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [42, 255, 255, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_gt_i16_false, vec_any_gt, i16x8 -> bool,
+ [1, -1, -2, 0, 0, 0, 0, 0],
+ [2, 0, -1, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_gt_u16_false, vec_any_gt, u16x8 -> bool,
+ [1, 2, 0, 0, 0, 0, 0, 0],
+ [2, 42, 255, 1, 1, 1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_gt_i32_false, vec_any_gt, i32x4 -> bool,
+ [1, -1, 0, 0],
+ [2, 0, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_gt_u32_false, vec_any_gt, u32x4 -> bool,
+ [1, 2, 1, 0],
+ [4, 255, 4, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_gt_i8_true, vec_any_gt, i8x16 -> bool,
+ [1, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_gt_u8_true, vec_any_gt, u8x16 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_gt_i16_true, vec_any_gt, i16x8 -> bool,
+ [1, -1, 1, 0, 0, 0, 0, 0],
+ [0, -1, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_gt_u16_true, vec_any_gt, u16x8 -> bool,
+ [1, 255, 1, 0, 0, 0, 0, 0],
+ [0, 255, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_gt_i32_true, vec_any_gt, i32x4 -> bool,
+ [1, -1, 0, 1],
+ [0, -1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_gt_u32_true, vec_any_gt, u32x4 -> bool,
+ [1, 255, 0, 1],
+ [0, 255, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_in_true, vec_all_in, f32x4 -> bool,
+ [0.0, -0.1, 0.0, 0.0],
+ [0.1, 0.2, 0.0, 0.0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_in_false, vec_all_in, f32x4 -> bool,
+ [0.5, 0.4, -0.5, 0.8],
+ [0.1, 0.4, -0.5, 0.8],
+ false
+ }
+
+ test_vec_2! { test_vec_all_le_i8_false, vec_all_le, i8x16 -> bool,
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_le_u8_false, vec_all_le, u8x16 -> bool,
+ [0, 0, 255, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_le_i16_false, vec_all_le, i16x8 -> bool,
+ [0, 0, -1, 1, 0, 0, 0, 0],
+ [1, -1, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_le_u16_false, vec_all_le, u16x8 -> bool,
+ [0, 0, 255, 1, 0, 0, 0, 0],
+ [1, 255, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_le_i32_false, vec_all_le, i32x4 -> bool,
+ [0, -1, 0, 1],
+ [1, -1, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_le_u32_false, vec_all_le, u32x4 -> bool,
+ [0, 255, 1, 1],
+ [1, 255, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_le_i8_true, vec_all_le, i8x16 -> bool,
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_le_u8_true, vec_all_le, u8x16 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_le_i16_true, vec_all_le, i16x8 -> bool,
+ [1, -5, 2, 0, 0, 0, 0, 0],
+ [1, -1, 42, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_le_u16_true, vec_all_le, u16x8 -> bool,
+ [2, 255, 1, 0, 0, 0, 0, 0],
+ [42, 255, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_le_i32_true, vec_all_le, i32x4 -> bool,
+ [0, -1, 0, 1],
+ [1, -1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_le_u32_true, vec_all_le, u32x4 -> bool,
+ [1, 254, 0, 0],
+ [1, 255, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_le_i8_false, vec_any_le, i8x16 -> bool,
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_le_u8_false, vec_any_le, u8x16 -> bool,
+ [42, 255, 255, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_le_i16_false, vec_any_le, i16x8 -> bool,
+ [2, 0, -1, 1, 1, 1, 1, 1],
+ [1, -1, -2, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_le_u16_false, vec_any_le, u16x8 -> bool,
+ [2, 42, 255, 1, 1, 1, 1, 1],
+ [1, 2, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_le_i32_false, vec_any_le, i32x4 -> bool,
+ [2, 0, 1, 1],
+ [1, -1, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_le_u32_false, vec_any_le, u32x4 -> bool,
+ [4, 255, 4, 1],
+ [1, 2, 1, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_le_i8_true, vec_any_le, i8x16 -> bool,
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_le_u8_true, vec_any_le, u8x16 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_le_i16_true, vec_any_le, i16x8 -> bool,
+ [1, -1, 1, 0, 0, 0, 0, 0],
+ [0, -1, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_le_u16_true, vec_any_le, u16x8 -> bool,
+ [1, 255, 1, 0, 0, 0, 0, 0],
+ [0, 255, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_le_i32_true, vec_any_le, i32x4 -> bool,
+ [1, -1, 0, 1],
+ [0, -1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_le_u32_true, vec_any_le, u32x4 -> bool,
+ [1, 255, 0, 1],
+ [0, 255, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_lt_i8_false, vec_all_lt, i8x16 -> bool,
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_lt_u8_false, vec_all_lt, u8x16 -> bool,
+ [0, 0, 255, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_lt_i16_false, vec_all_lt, i16x8 -> bool,
+ [0, 0, -1, 1, 0, 0, 0, 0],
+ [1, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_lt_u16_false, vec_all_lt, u16x8 -> bool,
+ [0, 0, 255, 1, 0, 0, 0, 0],
+ [1, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_lt_i32_false, vec_all_lt, i32x4 -> bool,
+ [0, -1, 0, 1],
+ [1, -1, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_lt_u32_false, vec_all_lt, u32x4 -> bool,
+ [0, 255, 1, 1],
+ [1, 255, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_lt_i8_true, vec_all_lt, i8x16 -> bool,
+ [0, 0, -2, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [2, 1, -1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_lt_u8_true, vec_all_lt, u8x16 -> bool,
+ [0, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_lt_i16_true, vec_all_lt, i16x8 -> bool,
+ [0, -5, 2, 0, 0, 0, 0, 0],
+ [1, -1, 42, 1, 1, 1, 1, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_lt_u16_true, vec_all_lt, u16x8 -> bool,
+ [2, 254, 0, 0, 0, 0, 0, 0],
+ [42, 255, 1, 1, 1, 1, 1, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_lt_i32_true, vec_all_lt, i32x4 -> bool,
+ [0, -2, 0, 0],
+ [1, -1, 1, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_lt_u32_true, vec_all_lt, u32x4 -> bool,
+ [0, 254, 0, 0],
+ [1, 255, 1, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_lt_i8_false, vec_any_lt, i8x16 -> bool,
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_lt_u8_false, vec_any_lt, u8x16 -> bool,
+ [42, 255, 255, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_lt_i16_false, vec_any_lt, i16x8 -> bool,
+ [2, 0, -1, 1, 1, 1, 1, 1],
+ [1, -1, -2, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_lt_u16_false, vec_any_lt, u16x8 -> bool,
+ [2, 42, 255, 1, 1, 1, 1, 1],
+ [1, 2, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_lt_i32_false, vec_any_lt, i32x4 -> bool,
+ [2, 0, 1, 1],
+ [1, -1, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_lt_u32_false, vec_any_lt, u32x4 -> bool,
+ [4, 255, 4, 1],
+ [1, 2, 1, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_lt_i8_true, vec_any_lt, i8x16 -> bool,
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_lt_u8_true, vec_any_lt, u8x16 -> bool,
+ [0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_lt_i16_true, vec_any_lt, i16x8 -> bool,
+ [0, -1, 1, 0, 0, 0, 0, 0],
+ [1, -1, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_lt_u16_true, vec_any_lt, u16x8 -> bool,
+ [0, 255, 1, 0, 0, 0, 0, 0],
+ [1, 255, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_lt_i32_true, vec_any_lt, i32x4 -> bool,
+ [0, -1, 0, 1],
+ [1, -1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_lt_u32_true, vec_any_lt, u32x4 -> bool,
+ [0, 255, 0, 1],
+ [1, 255, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ne_i8_false, vec_all_ne, i8x16 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ne_u8_false, vec_all_ne, u8x16 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 255, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ne_i16_false, vec_all_ne, i16x8 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0],
+ [0, -1, 1, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ne_u16_false, vec_all_ne, u16x8 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0],
+ [0, 255, 0, 1, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ne_i32_false, vec_all_ne, i32x4 -> bool,
+ [1, -1, 0, 0],
+ [0, -1, 0, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ne_u32_false, vec_all_ne, u32x4 -> bool,
+ [1, 255, 0, 0],
+ [0, 255, 0, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_all_ne_i8_true, vec_all_ne, i8x16 -> bool,
+ [0, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ne_u8_true, vec_all_ne, u8x16 -> bool,
+ [0, 254, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ne_i16_true, vec_all_ne, i16x8 -> bool,
+ [2, -2, 0, 1, 1, 1, 1, 1],
+ [1, -1, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ne_u16_true, vec_all_ne, u16x8 -> bool,
+ [0, 254, 1, 1, 0, 0, 1, 0],
+ [1, 255, 0, 0, 1, 1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ne_i32_true, vec_all_ne, i32x4 -> bool,
+ [0, -2, 0, 0],
+ [1, -1, 1, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_all_ne_u32_true, vec_all_ne, u32x4 -> bool,
+ [1, 255, 0, 0],
+ [0, 254, 1, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ne_i8_false, vec_any_ne, i8x16 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ne_u8_false, vec_any_ne, u8x16 -> bool,
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ne_i16_false, vec_any_ne, i16x8 -> bool,
+ [1, -1, 0, 0, 0, 0, 0, 0],
+ [1, -1, 0, 0, 0, 0, 0, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ne_u16_false, vec_any_ne, u16x8 -> bool,
+ [1, 255, 1, 1, 1, 1, 1, 0],
+ [1, 255, 1, 1, 1, 1, 1, 0],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ne_i32_false, vec_any_ne, i32x4 -> bool,
+ [0, -1, 1, 1],
+ [0, -1, 1, 1],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ne_u32_false, vec_any_ne, u32x4 -> bool,
+ [1, 2, 1, 255],
+ [1, 2, 1, 255],
+ false
+ }
+
+ test_vec_2! { test_vec_any_ne_i8_true, vec_any_ne, i8x16 -> bool,
+ [1, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ne_u8_true, vec_any_ne, u8x16 -> bool,
+ [0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ne_i16_true, vec_any_ne, i16x8 -> bool,
+ [0, -1, 1, 0, 0, 0, 0, 0],
+ [1, -1, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ne_u16_true, vec_any_ne, u16x8 -> bool,
+ [0, 255, 1, 0, 0, 0, 0, 0],
+ [1, 255, 1, 0, 0, 0, 0, 0],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ne_i32_true, vec_any_ne, i32x4 -> bool,
+ [0, -1, 0, 1],
+ [1, -1, 0, 1],
+ true
+ }
+
+ test_vec_2! { test_vec_any_ne_u32_true, vec_any_ne, u32x4 -> bool,
+ [0, 255, 0, 1],
+ [1, 255, 0, 1],
+ true
+ }
+
#[simd_test(enable = "altivec")]
unsafe fn test_vec_cmpb() {
let a: vector_float = transmute(f32x4::new(0.1, 0.5, 0.6, 0.9));
@@ -2776,4 +4582,108 @@ mod tests {
let z = vec_add(x, y);
assert_eq!(i32x4::splat(5), transmute(z));
}
+
+ #[simd_test(enable = "altivec")]
+ unsafe fn vec_ctf_u32() {
+ let v: vector_unsigned_int = transmute(u32x4::new(u32::MIN, u32::MAX, u32::MAX, 42));
+ let v2 = vec_ctf::<1, _>(v);
+ let r2: vector_float = transmute(f32x4::new(0.0, 2147483600.0, 2147483600.0, 21.0));
+ let v4 = vec_ctf::<2, _>(v);
+ let r4: vector_float = transmute(f32x4::new(0.0, 1073741800.0, 1073741800.0, 10.5));
+ let v8 = vec_ctf::<3, _>(v);
+ let r8: vector_float = transmute(f32x4::new(0.0, 536870900.0, 536870900.0, 5.25));
+
+ let check = |a, b| {
+ let r = transmute(vec_cmple(
+ vec_abs(vec_sub(a, b)),
+ vec_splats(std::f32::EPSILON),
+ ));
+ let e = m32x4::new(true, true, true, true);
+ assert_eq!(e, r);
+ };
+
+ check(v2, r2);
+ check(v4, r4);
+ check(v8, r8);
+ }
+
+ #[simd_test(enable = "altivec")]
+ unsafe fn test_vec_ctu() {
+ let v = u32x4::new(u32::MIN, u32::MAX, u32::MAX, 42);
+ let v2: u32x4 = transmute(vec_ctu::<1>(transmute(f32x4::new(
+ 0.0,
+ 2147483600.0,
+ 2147483600.0,
+ 21.0,
+ ))));
+ let v4: u32x4 = transmute(vec_ctu::<2>(transmute(f32x4::new(
+ 0.0,
+ 1073741800.0,
+ 1073741800.0,
+ 10.5,
+ ))));
+ let v8: u32x4 = transmute(vec_ctu::<3>(transmute(f32x4::new(
+ 0.0,
+ 536870900.0,
+ 536870900.0,
+ 5.25,
+ ))));
+
+ assert_eq!(v2, v);
+ assert_eq!(v4, v);
+ assert_eq!(v8, v);
+ }
+
+ #[simd_test(enable = "altivec")]
+ unsafe fn vec_ctf_i32() {
+ let v: vector_signed_int = transmute(i32x4::new(i32::MIN, i32::MAX, i32::MAX - 42, 42));
+ let v2 = vec_ctf::<1, _>(v);
+ let r2: vector_float =
+ transmute(f32x4::new(-1073741800.0, 1073741800.0, 1073741800.0, 21.0));
+ let v4 = vec_ctf::<2, _>(v);
+ let r4: vector_float = transmute(f32x4::new(-536870900.0, 536870900.0, 536870900.0, 10.5));
+ let v8 = vec_ctf::<3, _>(v);
+ let r8: vector_float = transmute(f32x4::new(-268435460.0, 268435460.0, 268435460.0, 5.25));
+
+ let check = |a, b| {
+ let r = transmute(vec_cmple(
+ vec_abs(vec_sub(a, b)),
+ vec_splats(std::f32::EPSILON),
+ ));
+ println!("{:?} {:?}", a, b);
+ let e = m32x4::new(true, true, true, true);
+ assert_eq!(e, r);
+ };
+
+ check(v2, r2);
+ check(v4, r4);
+ check(v8, r8);
+ }
+
+ #[simd_test(enable = "altivec")]
+ unsafe fn test_vec_cts() {
+ let v = i32x4::new(i32::MIN, i32::MAX, i32::MAX, 42);
+ let v2: i32x4 = transmute(vec_cts::<1>(transmute(f32x4::new(
+ -1073741800.0,
+ 1073741800.0,
+ 1073741800.0,
+ 21.0,
+ ))));
+ let v4: i32x4 = transmute(vec_cts::<2>(transmute(f32x4::new(
+ -536870900.0,
+ 536870900.0,
+ 536870900.0,
+ 10.5,
+ ))));
+ let v8: i32x4 = transmute(vec_cts::<3>(transmute(f32x4::new(
+ -268435460.0,
+ 268435460.0,
+ 268435460.0,
+ 5.25,
+ ))));
+
+ assert_eq!(v2, v);
+ assert_eq!(v4, v);
+ assert_eq!(v8, v);
+ }
}
diff --git a/library/stdarch/crates/core_arch/src/powerpc/macros.rs b/library/stdarch/crates/core_arch/src/powerpc/macros.rs
new file mode 100644
index 000000000..3b61dd5e7
--- /dev/null
+++ b/library/stdarch/crates/core_arch/src/powerpc/macros.rs
@@ -0,0 +1,88 @@
+macro_rules! test_impl {
+ ($fun:ident ($($v:ident : $ty:ty),*) -> $r:ty [$call:ident, $instr:ident]) => {
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(test, assert_instr($instr))]
+ pub unsafe fn $fun ($($v : $ty),*) -> $r {
+ $call ($($v),*)
+ }
+ };
+ ($fun:ident ($($v:ident : $ty:ty),*) -> $r:ty [$call:ident, $instr_altivec:ident / $instr_vsx:ident]) => {
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(all(test, not(target_feature="vsx")), assert_instr($instr_altivec))]
+ #[cfg_attr(all(test, target_feature="vsx"), assert_instr($instr_vsx))]
+ pub unsafe fn $fun ($($v : $ty),*) -> $r {
+ $call ($($v),*)
+ }
+ }
+}
+
+#[allow(unknown_lints, unused_macro_rules)]
+macro_rules! impl_vec_trait {
+ ([$Trait:ident $m:ident] $fun:ident ($a:ty)) => {
+ impl $Trait for $a {
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ unsafe fn $m(self) -> Self {
+ $fun(transmute(self))
+ }
+ }
+ };
+ ([$Trait:ident $m:ident] $fun:ident ($a:ty) -> $r:ty) => {
+ impl $Trait for $a {
+ type Result = $r;
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ unsafe fn $m(self) -> Self::Result {
+ $fun(transmute(self))
+ }
+ }
+ };
+ ([$Trait:ident $m:ident] 1 ($ub:ident, $sb:ident, $uh:ident, $sh:ident, $uw:ident, $sw:ident, $sf: ident)) => {
+ impl_vec_trait!{ [$Trait $m] $ub (vector_unsigned_char) -> vector_unsigned_char }
+ impl_vec_trait!{ [$Trait $m] $sb (vector_signed_char) -> vector_signed_char }
+ impl_vec_trait!{ [$Trait $m] $uh (vector_unsigned_short) -> vector_unsigned_short }
+ impl_vec_trait!{ [$Trait $m] $sh (vector_signed_short) -> vector_signed_short }
+ impl_vec_trait!{ [$Trait $m] $uw (vector_unsigned_int) -> vector_unsigned_int }
+ impl_vec_trait!{ [$Trait $m] $sw (vector_signed_int) -> vector_signed_int }
+ impl_vec_trait!{ [$Trait $m] $sf (vector_float) -> vector_float }
+ };
+ ([$Trait:ident $m:ident] $fun:ident ($a:ty, $b:ty) -> $r:ty) => {
+ impl $Trait<$b> for $a {
+ type Result = $r;
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ unsafe fn $m(self, b: $b) -> Self::Result {
+ $fun(transmute(self), transmute(b))
+ }
+ }
+ };
+ ([$Trait:ident $m:ident] $fun:ident ($a:ty, ~$b:ty) -> $r:ty) => {
+ impl_vec_trait!{ [$Trait $m] $fun ($a, $a) -> $r }
+ impl_vec_trait!{ [$Trait $m] $fun ($a, $b) -> $r }
+ impl_vec_trait!{ [$Trait $m] $fun ($b, $a) -> $r }
+ };
+ ([$Trait:ident $m:ident] ~($ub:ident, $sb:ident, $uh:ident, $sh:ident, $uw:ident, $sw:ident)) => {
+ impl_vec_trait!{ [$Trait $m] $ub (vector_unsigned_char, ~vector_bool_char) -> vector_unsigned_char }
+ impl_vec_trait!{ [$Trait $m] $sb (vector_signed_char, ~vector_bool_char) -> vector_signed_char }
+ impl_vec_trait!{ [$Trait $m] $uh (vector_unsigned_short, ~vector_bool_short) -> vector_unsigned_short }
+ impl_vec_trait!{ [$Trait $m] $sh (vector_signed_short, ~vector_bool_short) -> vector_signed_short }
+ impl_vec_trait!{ [$Trait $m] $uw (vector_unsigned_int, ~vector_bool_int) -> vector_unsigned_int }
+ impl_vec_trait!{ [$Trait $m] $sw (vector_signed_int, ~vector_bool_int) -> vector_signed_int }
+ };
+ ([$Trait:ident $m:ident] ~($fn:ident)) => {
+ impl_vec_trait!{ [$Trait $m] ~($fn, $fn, $fn, $fn, $fn, $fn) }
+ };
+ ([$Trait:ident $m:ident] 2 ($ub:ident, $sb:ident, $uh:ident, $sh:ident, $uw:ident, $sw:ident)) => {
+ impl_vec_trait!{ [$Trait $m] $ub (vector_unsigned_char, vector_unsigned_char) -> vector_unsigned_char }
+ impl_vec_trait!{ [$Trait $m] $sb (vector_signed_char, vector_signed_char) -> vector_signed_char }
+ impl_vec_trait!{ [$Trait $m] $uh (vector_unsigned_short, vector_unsigned_short) -> vector_unsigned_short }
+ impl_vec_trait!{ [$Trait $m] $sh (vector_signed_short, vector_signed_short) -> vector_signed_short }
+ impl_vec_trait!{ [$Trait $m] $uw (vector_unsigned_int, vector_unsigned_int) -> vector_unsigned_int }
+ impl_vec_trait!{ [$Trait $m] $sw (vector_signed_int, vector_signed_int) -> vector_signed_int }
+ };
+ ([$Trait:ident $m:ident] 2 ($fn:ident)) => {
+ impl_vec_trait!{ [$Trait $m] ($fn, $fn, $fn, $fn, $fn, $fn) }
+ }
+}
diff --git a/library/stdarch/crates/core_arch/src/powerpc/mod.rs b/library/stdarch/crates/core_arch/src/powerpc/mod.rs
index 9765d11d1..753f84b0b 100644
--- a/library/stdarch/crates/core_arch/src/powerpc/mod.rs
+++ b/library/stdarch/crates/core_arch/src/powerpc/mod.rs
@@ -1,8 +1,9 @@
//! PowerPC intrinsics
-#[cfg(target_feature = "altivec")]
+#[macro_use]
+mod macros;
+
mod altivec;
-#[cfg(target_feature = "altivec")]
pub use self::altivec::*;
mod vsx;
diff --git a/library/stdarch/crates/core_arch/src/powerpc/vsx.rs b/library/stdarch/crates/core_arch/src/powerpc/vsx.rs
index 283a7e5ce..f2ebc23b2 100644
--- a/library/stdarch/crates/core_arch/src/powerpc/vsx.rs
+++ b/library/stdarch/crates/core_arch/src/powerpc/vsx.rs
@@ -13,7 +13,7 @@ use crate::core_arch::simd_llvm::*;
#[cfg(test)]
use stdarch_test::assert_instr;
-use crate::mem;
+use crate::mem::transmute;
types! {
// pub struct vector_Float16 = f16x8;
@@ -45,13 +45,16 @@ mod sealed {
#[target_feature(enable = "vsx")]
#[cfg_attr(all(test, target_endian = "little"), assert_instr(xxmrgld, dm = 0x0))]
#[cfg_attr(all(test, target_endian = "big"), assert_instr(xxspltd, dm = 0x0))]
- unsafe fn xxpermdi(a: i64x2, b: i64x2, dm: u8) -> i64x2 {
- match dm & 0b11 {
+ unsafe fn xxpermdi(a: vector_signed_long, b: vector_signed_long, dm: u8) -> vector_signed_long {
+ let a: i64x2 = transmute(a);
+ let b: i64x2 = transmute(b);
+ let r: i64x2 = match dm & 0b11 {
0 => simd_shuffle!(a, b, [0b00, 0b10]),
1 => simd_shuffle!(a, b, [0b01, 0b10]),
2 => simd_shuffle!(a, b, [0b00, 0b11]),
_ => simd_shuffle!(a, b, [0b01, 0b11]),
- }
+ };
+ transmute(r)
}
macro_rules! vec_xxpermdi {
@@ -60,7 +63,7 @@ mod sealed {
#[inline]
#[target_feature(enable = "vsx")]
unsafe fn vec_xxpermdi(self, b: Self, dm: u8) -> Self {
- mem::transmute(xxpermdi(mem::transmute(self), mem::transmute(b), dm))
+ transmute(xxpermdi(transmute(self), transmute(b), dm))
}
}
}
@@ -92,21 +95,21 @@ mod tests {
#[cfg(target_arch = "powerpc64")]
use crate::core_arch::arch::powerpc64::*;
- use super::mem;
use crate::core_arch::simd::*;
+ use crate::mem::transmute;
use stdarch_test::simd_test;
macro_rules! test_vec_xxpermdi {
{$name:ident, $shorttype:ident, $longtype:ident, [$($a:expr),+], [$($b:expr),+], [$($c:expr),+], [$($d:expr),+]} => {
#[simd_test(enable = "vsx")]
unsafe fn $name() {
- let a: $longtype = mem::transmute($shorttype::new($($a),+, $($b),+));
- let b = mem::transmute($shorttype::new($($c),+, $($d),+));
+ let a: $longtype = transmute($shorttype::new($($a),+, $($b),+));
+ let b = transmute($shorttype::new($($c),+, $($d),+));
- assert_eq!($shorttype::new($($a),+, $($c),+), mem::transmute(vec_xxpermdi::<_, 0>(a, b)));
- assert_eq!($shorttype::new($($b),+, $($c),+), mem::transmute(vec_xxpermdi::<_, 1>(a, b)));
- assert_eq!($shorttype::new($($a),+, $($d),+), mem::transmute(vec_xxpermdi::<_, 2>(a, b)));
- assert_eq!($shorttype::new($($b),+, $($d),+), mem::transmute(vec_xxpermdi::<_, 3>(a, b)));
+ assert_eq!($shorttype::new($($a),+, $($c),+), transmute(vec_xxpermdi::<_, 0>(a, b)));
+ assert_eq!($shorttype::new($($b),+, $($c),+), transmute(vec_xxpermdi::<_, 1>(a, b)));
+ assert_eq!($shorttype::new($($a),+, $($d),+), transmute(vec_xxpermdi::<_, 2>(a, b)));
+ assert_eq!($shorttype::new($($b),+, $($d),+), transmute(vec_xxpermdi::<_, 3>(a, b)));
}
}
}
diff --git a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs
index 5220fa74f..e974d9e56 100644
--- a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs
+++ b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs
@@ -4719,7 +4719,7 @@ pub mod tests {
let v2_v128: v128 = mem::transmute(v2);
let v3_v128 = super::$f(v1_v128, v2_v128);
let mut v3 = [$($vec1)*];
- drop(v3);
+ let _ignore = v3;
v3 = mem::transmute(v3_v128);
for (i, actual) in v3.iter().enumerate() {
@@ -4746,7 +4746,7 @@ pub mod tests {
let v1_v128: v128 = mem::transmute(v1);
let v2_v128 = super::$f(v1_v128);
let mut v2 = [$($vec1)*];
- drop(v2);
+ let _ignore = v2;
v2 = mem::transmute(v2_v128);
for (i, actual) in v2.iter().enumerate() {
diff --git a/library/stdarch/crates/core_arch/src/x86/avx.rs b/library/stdarch/crates/core_arch/src/x86/avx.rs
index a77005c0e..fafee5c0b 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx.rs
@@ -2436,8 +2436,6 @@ pub unsafe fn _mm256_set1_ps(a: f32) -> __m256 {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set1_epi8)
#[inline]
#[target_feature(enable = "avx")]
-#[cfg_attr(test, assert_instr(vpshufb))]
-#[cfg_attr(test, assert_instr(vinsertf128))]
// This intrinsic has no corresponding instruction.
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_set1_epi8(a: i8) -> __m256i {
@@ -2689,7 +2687,9 @@ pub unsafe fn _mm256_zextpd128_pd256(a: __m128d) -> __m256d {
simd_shuffle!(a, _mm_setzero_pd(), [0, 1, 2, 3])
}
-/// Returns vector of type `__m256` with undefined elements.
+/// Returns vector of type `__m256` with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_undefined_ps)
#[inline]
@@ -2700,7 +2700,9 @@ pub unsafe fn _mm256_undefined_ps() -> __m256 {
_mm256_set1_ps(0.0)
}
-/// Returns vector of type `__m256d` with undefined elements.
+/// Returns vector of type `__m256d` with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_undefined_pd)
#[inline]
@@ -2711,7 +2713,9 @@ pub unsafe fn _mm256_undefined_pd() -> __m256d {
_mm256_set1_pd(0.0)
}
-/// Returns vector of type __m256i with undefined elements.
+/// Returns vector of type __m256i with with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_undefined_si256)
#[inline]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs
index 5262628e1..cdf84b382 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs
@@ -3592,7 +3592,6 @@ pub unsafe fn _mm256_cvtsd_f64(a: __m256d) -> f64 {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsi256_si32)
#[inline]
#[target_feature(enable = "avx2")]
-//#[cfg_attr(test, assert_instr(movd))] FIXME
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cvtsi256_si32(a: __m256i) -> i32 {
simd_extract(a.as_i32x8(), 0)
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
index 0ef919617..bc1e7ddfb 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
@@ -7386,7 +7386,7 @@ pub unsafe fn _mm_maskz_set1_epi16(k: __mmask8, a: i16) -> __m128i {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_set1_epi8&expand=4970)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(vpbroadcastb))]
+#[cfg_attr(test, assert_instr(vpbroadcast))]
pub unsafe fn _mm512_mask_set1_epi8(src: __m512i, k: __mmask64, a: i8) -> __m512i {
let r = _mm512_set1_epi8(a).as_i8x64();
transmute(simd_select_bitmask(k, r, src.as_i8x64()))
@@ -7397,7 +7397,7 @@ pub unsafe fn _mm512_mask_set1_epi8(src: __m512i, k: __mmask64, a: i8) -> __m512
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_set1_epi8&expand=4971)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(vpbroadcastb))]
+#[cfg_attr(test, assert_instr(vpbroadcast))]
pub unsafe fn _mm512_maskz_set1_epi8(k: __mmask64, a: i8) -> __m512i {
let r = _mm512_set1_epi8(a).as_i8x64();
let zero = _mm512_setzero_si512().as_i8x64();
@@ -7409,7 +7409,7 @@ pub unsafe fn _mm512_maskz_set1_epi8(k: __mmask64, a: i8) -> __m512i {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_set1_epi8&expand=4967)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
-#[cfg_attr(test, assert_instr(vpbroadcastb))]
+#[cfg_attr(test, assert_instr(vpbroadcast))]
pub unsafe fn _mm256_mask_set1_epi8(src: __m256i, k: __mmask32, a: i8) -> __m256i {
let r = _mm256_set1_epi8(a).as_i8x32();
transmute(simd_select_bitmask(k, r, src.as_i8x32()))
@@ -7420,7 +7420,7 @@ pub unsafe fn _mm256_mask_set1_epi8(src: __m256i, k: __mmask32, a: i8) -> __m256
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_set1_epi8&expand=4968)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
-#[cfg_attr(test, assert_instr(vpbroadcastb))]
+#[cfg_attr(test, assert_instr(vpbroadcast))]
pub unsafe fn _mm256_maskz_set1_epi8(k: __mmask32, a: i8) -> __m256i {
let r = _mm256_set1_epi8(a).as_i8x32();
let zero = _mm256_setzero_si256().as_i8x32();
@@ -7432,7 +7432,7 @@ pub unsafe fn _mm256_maskz_set1_epi8(k: __mmask32, a: i8) -> __m256i {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_set1_epi8&expand=4964)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
-#[cfg_attr(test, assert_instr(vpbroadcastb))]
+#[cfg_attr(test, assert_instr(vpbroadcast))]
pub unsafe fn _mm_mask_set1_epi8(src: __m128i, k: __mmask16, a: i8) -> __m128i {
let r = _mm_set1_epi8(a).as_i8x16();
transmute(simd_select_bitmask(k, r, src.as_i8x16()))
@@ -7443,7 +7443,7 @@ pub unsafe fn _mm_mask_set1_epi8(src: __m128i, k: __mmask16, a: i8) -> __m128i {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_set1_epi8&expand=4965)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
-#[cfg_attr(test, assert_instr(vpbroadcastb))]
+#[cfg_attr(test, assert_instr(vpbroadcast))]
pub unsafe fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i {
let r = _mm_set1_epi8(a).as_i8x16();
let zero = _mm_setzero_si128().as_i8x16();
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs
index e0014f7ed..9baa7eeca 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs
@@ -15757,6 +15757,26 @@ pub unsafe fn _mm512_mask_i32scatter_epi64<const SCALE: i32>(
vpscatterdq(slice, mask, offsets, src, SCALE);
}
+/// Scatter 64-bit integers from a into memory using 32-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). scale should be 1, 2, 4 or 8.
+///
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i32scatter_epi64&expand=4099)
+#[inline]
+#[target_feature(enable = "avx512f,avx512vl")]
+#[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn _mm256_i32scatter_epi64<const SCALE: i32>(
+ slice: *mut u8,
+ offsets: __m128i,
+ src: __m256i,
+) {
+ static_assert_imm8_scale!(SCALE);
+ let src = src.as_i64x4();
+ let neg_one = -1;
+ let slice = slice as *mut i8;
+ let offsets = offsets.as_i32x4();
+ vpscatterdq256(slice, neg_one, offsets, src, SCALE);
+}
+
/// Scatter 64-bit integers from a into memory using 64-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). scale should be 1, 2, 4 or 8.
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64scatter_epi64&expand=3116)
@@ -29616,7 +29636,9 @@ pub unsafe fn _mm512_mask_reduce_or_epi64(k: __mmask8, a: __m512i) -> i64 {
))
}
-/// Returns vector of type `__m512d` with undefined elements.
+/// Returns vector of type `__m512d` with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined_pd)
#[inline]
@@ -29626,7 +29648,9 @@ pub unsafe fn _mm512_undefined_pd() -> __m512d {
_mm512_set1_pd(0.0)
}
-/// Returns vector of type `__m512` with undefined elements.
+/// Returns vector of type `__m512` with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined_ps)
#[inline]
@@ -29636,7 +29660,9 @@ pub unsafe fn _mm512_undefined_ps() -> __m512 {
_mm512_set1_ps(0.0)
}
-/// Return vector of type __m512i with undefined elements.
+/// Return vector of type __m512i with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined_epi32&expand=5995)
#[inline]
@@ -29646,7 +29672,9 @@ pub unsafe fn _mm512_undefined_epi32() -> __m512i {
_mm512_set1_epi32(0)
}
-/// Return vector of type __m512 with undefined elements.
+/// Return vector of type __m512 with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined&expand=5994)
#[inline]
@@ -38299,6 +38327,8 @@ extern "C" {
fn vscatterqps(slice: *mut i8, mask: i8, offsets: i64x8, src: f32x8, scale: i32);
#[link_name = "llvm.x86.avx512.scatter.dpq.512"]
fn vpscatterdq(slice: *mut i8, mask: i8, offsets: i32x8, src: i64x8, scale: i32);
+ #[link_name = "llvm.x86.avx512.scattersiv4.di"]
+ fn vpscatterdq256(slice: *mut i8, mask: i8, offsets: i32x4, src: i64x4, scale: i32);
#[link_name = "llvm.x86.avx512.scatter.dpi.512"]
fn vpscatterdd(slice: *mut i8, mask: i16, offsets: i32x16, src: i32x16, scale: i32);
diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs
index 7e4b352df..3d4471ba3 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse.rs
@@ -1374,7 +1374,7 @@ pub unsafe fn _mm_sfence() {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_getcsr() -> u32 {
let mut result = 0_i32;
- stmxcsr((&mut result) as *mut _ as *mut i8);
+ stmxcsr(&mut result as *mut _ as *mut i8);
result as u32
}
@@ -1754,7 +1754,9 @@ pub unsafe fn _mm_prefetch<const STRATEGY: i32>(p: *const i8) {
prefetch(p, (STRATEGY >> 2) & 1, STRATEGY & 3, 1);
}
-/// Returns vector of type __m128 with undefined elements.
+/// Returns vector of type __m128 with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_ps)
#[inline]
diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs
index e118ac05f..f4fdb5046 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs
@@ -949,7 +949,6 @@ pub unsafe fn _mm_cvtps_epi32(a: __m128) -> __m128i {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_si128)
#[inline]
#[target_feature(enable = "sse2")]
-#[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(movd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtsi32_si128(a: i32) -> __m128i {
transmute(i32x4::new(a, 0, 0, 0))
@@ -960,7 +959,6 @@ pub unsafe fn _mm_cvtsi32_si128(a: i32) -> __m128i {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si32)
#[inline]
#[target_feature(enable = "sse2")]
-#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(movd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtsi128_si32(a: __m128i) -> i32 {
simd_extract(a.as_i32x4(), 0)
@@ -2739,7 +2737,9 @@ pub unsafe fn _mm_castsi128_ps(a: __m128i) -> __m128 {
transmute(a)
}
-/// Returns vector of type __m128d with undefined elements.
+/// Returns vector of type __m128d with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_pd)
#[inline]
@@ -2749,7 +2749,9 @@ pub unsafe fn _mm_undefined_pd() -> __m128d {
__m128d(0.0, 0.0)
}
-/// Returns vector of type __m128i with undefined elements.
+/// Returns vector of type __m128i with indeterminate elements.
+/// Despite being "undefined", this is some valid value and not equivalent to [`mem::MaybeUninit`].
+/// In practice, this is equivalent to [`mem::zeroed`].
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_si128)
#[inline]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs b/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs
index d414effa7..68f332767 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs
@@ -7551,6 +7551,20 @@ mod tests {
assert_eq!(&arr[..], &expected[..],);
}
+ #[simd_test(enable = "avx512f,avx512vl")]
+ unsafe fn test_mm256_i32scatter_epi64() {
+ let mut arr = [0i64; 64];
+ let index = _mm_setr_epi32(0, 16, 32, 48);
+ let src = _mm256_setr_epi64x(1, 2, 3, 4);
+ // A multiplier of 8 is word-addressing
+ _mm256_i32scatter_epi64::<8>(arr.as_mut_ptr() as *mut u8, index, src);
+ let mut expected = [0i64; 64];
+ for i in 0..4 {
+ expected[i * 16] = (i + 1) as i64;
+ }
+ assert_eq!(&arr[..], &expected[..],);
+ }
+
#[simd_test(enable = "avx512f")]
unsafe fn test_mm512_i64scatter_epi64() {
let mut arr = [0i64; 128];
diff --git a/library/stdarch/crates/std_detect/README.md b/library/stdarch/crates/std_detect/README.md
index 71f474d65..521177104 100644
--- a/library/stdarch/crates/std_detect/README.md
+++ b/library/stdarch/crates/std_detect/README.md
@@ -30,14 +30,19 @@ run-time feature detection. When this feature is disabled, `std_detect` assumes
that [`getauxval`] is linked to the binary. If that is not the case the behavior
is undefined.
- Note: This feature is ignored on `*-linux-gnu*` targets, since all `*-linux-gnu*` targets ([since Rust 1.64](https://blog.rust-lang.org/2022/08/01/Increasing-glibc-kernel-requirements.html)) have glibc requirements higher than [glibc 2.16 that added `getauxval`](https://sourceware.org/legacy-ml/libc-announce/2012/msg00000.html), and we can safely assume [`getauxval`] is linked to the binary.
+ Note: This feature is ignored on `*-linux-gnu*` and `*-android*` targets
+ because we can safely assume `getauxval` is linked to the binary.
+ * `*-linux-gnu*` targets ([since Rust 1.64](https://blog.rust-lang.org/2022/08/01/Increasing-glibc-kernel-requirements.html))
+ have glibc requirements higher than [glibc 2.16 that added `getauxval`](https://sourceware.org/legacy-ml/libc-announce/2012/msg00000.html).
+ * `*-android*` targets ([since Rust 1.68](https://blog.rust-lang.org/2023/01/09/android-ndk-update-r25.html))
+ have the minimum supported API level higher than [Android 4.3 (API level 18) that added `getauxval`](https://github.com/aosp-mirror/platform_bionic/blob/d3ebc2f7c49a9893b114124d4a6b315f3a328764/libc/include/sys/auxv.h#L49).
* `std_detect_file_io` (enabled by default, requires `std`): Enable to perform run-time feature
detection using file APIs (e.g. `/proc/cpuinfo`, etc.) if other more performant
methods fail. This feature requires `libstd` as a dependency, preventing the
crate from working on applications in which `std` is not available.
-[`getauxval`]: http://man7.org/linux/man-pages/man3/getauxval.3.html
+[`getauxval`]: https://man7.org/linux/man-pages/man3/getauxval.3.html
# Platform support
diff --git a/library/stdarch/crates/std_detect/src/detect/arch/arm.rs b/library/stdarch/crates/std_detect/src/detect/arch/arm.rs
index 897dc314c..a7dea27fb 100644
--- a/library/stdarch/crates/std_detect/src/detect/arch/arm.rs
+++ b/library/stdarch/crates/std_detect/src/detect/arch/arm.rs
@@ -17,8 +17,6 @@ features! {
/// Polynomial Multiply
@FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] crc: "crc";
/// CRC32 (Cyclic Redundancy Check)
- @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] crypto: "crypto";
- /// Crypto: AES + PMULL + SHA1 + SHA256. Prefer using the individual features where possible.
@FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] aes: "aes";
/// FEAT_AES (AES instructions)
@FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] sha2: "sha2";
diff --git a/library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs b/library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs
index 97ede1d26..d904eaebd 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs
@@ -23,16 +23,13 @@ pub(crate) fn detect_features() -> cache::Initializer {
if let Ok(auxv) = auxvec::auxv() {
enable_feature(&mut value, Feature::neon, auxv.hwcap & HWCAP_NEON != 0);
- let pmull = auxv.hwcap2 & HWCAP2_PMULL != 0;
- enable_feature(&mut value, Feature::pmull, pmull);
+ enable_feature(&mut value, Feature::pmull, auxv.hwcap2 & HWCAP2_PMULL != 0);
enable_feature(&mut value, Feature::crc, auxv.hwcap2 & HWCAP2_CRC32 != 0);
- let aes = auxv.hwcap2 & HWCAP2_AES != 0;
- enable_feature(&mut value, Feature::aes, aes);
+ enable_feature(&mut value, Feature::aes, auxv.hwcap2 & HWCAP2_AES != 0);
// SHA2 requires SHA1 & SHA2 features
let sha1 = auxv.hwcap2 & HWCAP2_SHA1 != 0;
let sha2 = auxv.hwcap2 & HWCAP2_SHA2 != 0;
enable_feature(&mut value, Feature::sha2, sha1 && sha2);
- enable_feature(&mut value, Feature::crypto, aes && pmull && sha1 && sha2);
return value;
}
value
diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs
index 7383e487f..7601cf0a8 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs
@@ -20,14 +20,6 @@ pub(crate) fn detect_features() -> cache::Initializer {
enable_feature(&mut value, Feature::neon, bit::test(auxv.hwcap, 12));
enable_feature(&mut value, Feature::pmull, bit::test(auxv.hwcap2, 1));
enable_feature(&mut value, Feature::crc, bit::test(auxv.hwcap2, 4));
- enable_feature(
- &mut value,
- Feature::crypto,
- bit::test(auxv.hwcap2, 0)
- && bit::test(auxv.hwcap2, 1)
- && bit::test(auxv.hwcap2, 2)
- && bit::test(auxv.hwcap2, 3),
- );
enable_feature(&mut value, Feature::aes, bit::test(auxv.hwcap2, 0));
// SHA2 requires SHA1 & SHA2 features
enable_feature(
@@ -47,14 +39,6 @@ pub(crate) fn detect_features() -> cache::Initializer {
);
enable_feature(&mut value, Feature::pmull, c.field("Features").has("pmull"));
enable_feature(&mut value, Feature::crc, c.field("Features").has("crc32"));
- enable_feature(
- &mut value,
- Feature::crypto,
- c.field("Features").has("aes")
- && c.field("Features").has("pmull")
- && c.field("Features").has("sha1")
- && c.field("Features").has("sha2"),
- );
enable_feature(&mut value, Feature::aes, c.field("Features").has("aes"));
enable_feature(
&mut value,
diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
index 11d9c103e..8bc0b30c3 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
@@ -54,10 +54,13 @@ pub(crate) struct AuxVec {
/// error, cpuinfo still can (and will) be used to try to perform run-time
/// feature detection on some platforms.
///
-/// Note: The `std_detect_dlsym_getauxval` cargo feature is ignored on `*-linux-gnu*` targets,
-/// since [all `*-linux-gnu*` targets ([since Rust 1.64](https://blog.rust-lang.org/2022/08/01/Increasing-glibc-kernel-requirements.html))
-/// have glibc requirements higher than [glibc 2.16 that added `getauxval`](https://sourceware.org/legacy-ml/libc-announce/2012/msg00000.html),
-/// and we can safely assume [`getauxval`] is linked to the binary.
+/// Note: The `std_detect_dlsym_getauxval` cargo feature is ignored on
+/// `*-linux-gnu*` and `*-android*` targets because we can safely assume `getauxval`
+/// is linked to the binary.
+/// - `*-linux-gnu*` targets ([since Rust 1.64](https://blog.rust-lang.org/2022/08/01/Increasing-glibc-kernel-requirements.html))
+/// have glibc requirements higher than [glibc 2.16 that added `getauxval`](https://sourceware.org/legacy-ml/libc-announce/2012/msg00000.html).
+/// - `*-android*` targets ([since Rust 1.68](https://blog.rust-lang.org/2023/01/09/android-ndk-update-r25.html))
+/// have the minimum supported API level higher than [Android 4.3 (API level 18) that added `getauxval`](https://github.com/aosp-mirror/platform_bionic/blob/d3ebc2f7c49a9893b114124d4a6b315f3a328764/libc/include/sys/auxv.h#L49).
///
/// For more information about when `getauxval` is available check the great
/// [`auxv` crate documentation][auxv_docs].
@@ -67,7 +70,9 @@ pub(crate) struct AuxVec {
pub(crate) fn auxv() -> Result<AuxVec, ()> {
#[cfg(all(
feature = "std_detect_dlsym_getauxval",
- not(all(target_os = "linux", target_env = "gnu"))
+ not(all(target_os = "linux", target_env = "gnu")),
+ // TODO: libc crate currently doesn't provide getauxval on 32-bit Android.
+ not(all(target_os = "android", target_pointer_width = "64")),
))]
{
// Try to call a dynamically-linked getauxval function.
@@ -105,13 +110,17 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
}
}
}
- drop(hwcap);
+
+ // Intentionnaly not used
+ let _ = hwcap;
}
}
#[cfg(any(
not(feature = "std_detect_dlsym_getauxval"),
- all(target_os = "linux", target_env = "gnu")
+ all(target_os = "linux", target_env = "gnu"),
+ // TODO: libc crate currently doesn't provide getauxval on 32-bit Android.
+ all(target_os = "android", target_pointer_width = "64"),
))]
{
// Targets with only AT_HWCAP:
@@ -189,13 +198,12 @@ fn getauxval(key: usize) -> Result<usize, ()> {
pub(super) fn auxv_from_file(file: &str) -> Result<AuxVec, ()> {
let file = super::read_file(file)?;
- // See <https://github.com/torvalds/linux/blob/v3.19/include/uapi/linux/auxvec.h>.
+ // See <https://github.com/torvalds/linux/blob/v5.15/include/uapi/linux/auxvec.h>.
//
- // The auxiliary vector contains at most 32 (key,value) fields: from
- // `AT_EXECFN = 31` to `AT_NULL = 0`. That is, a buffer of
- // 2*32 `usize` elements is enough to read the whole vector.
- let mut buf = [0_usize; 64];
- let len = core::mem::size_of_val(&buf).max(file.len());
+ // The auxiliary vector contains at most 34 (key,value) fields: from
+ // `AT_MINSIGSTKSZ` to `AT_NULL`, but its number may increase.
+ let len = file.len();
+ let mut buf = alloc::vec![0_usize; 1 + len / core::mem::size_of::<usize>()];
unsafe {
core::ptr::copy_nonoverlapping(file.as_ptr(), buf.as_mut_ptr() as *mut u8, len);
}
@@ -206,7 +214,7 @@ pub(super) fn auxv_from_file(file: &str) -> Result<AuxVec, ()> {
/// Tries to interpret the `buffer` as an auxiliary vector. If that fails, this
/// function returns `Err`.
#[cfg(feature = "std_detect_file_io")]
-fn auxv_from_buf(buf: &[usize; 64]) -> Result<AuxVec, ()> {
+fn auxv_from_buf(buf: &[usize]) -> Result<AuxVec, ()> {
// Targets with only AT_HWCAP:
#[cfg(any(
target_arch = "riscv32",
@@ -247,7 +255,8 @@ fn auxv_from_buf(buf: &[usize; 64]) -> Result<AuxVec, ()> {
return Ok(AuxVec { hwcap, hwcap2 });
}
}
- drop(buf);
+ // Suppress unused variable
+ let _ = buf;
Err(())
}
diff --git a/library/stdarch/crates/std_detect/tests/cpu-detection.rs b/library/stdarch/crates/std_detect/tests/cpu-detection.rs
index eb3a3e409..38bdb5bbd 100644
--- a/library/stdarch/crates/std_detect/tests/cpu-detection.rs
+++ b/library/stdarch/crates/std_detect/tests/cpu-detection.rs
@@ -28,7 +28,6 @@ fn arm_linux_or_freebsd() {
println!("neon: {}", is_arm_feature_detected!("neon"));
println!("pmull: {}", is_arm_feature_detected!("pmull"));
println!("crc: {}", is_arm_feature_detected!("crc"));
- println!("crypto: {}", is_arm_feature_detected!("crypto"));
println!("aes: {}", is_arm_feature_detected!("aes"));
println!("sha2: {}", is_arm_feature_detected!("sha2"));
}
diff --git a/library/stdarch/crates/stdarch-gen/neon.spec b/library/stdarch/crates/stdarch-gen/neon.spec
index f2c1e200d..06090e669 100644
--- a/library/stdarch/crates/stdarch-gen/neon.spec
+++ b/library/stdarch/crates/stdarch-gen/neon.spec
@@ -492,7 +492,7 @@ generate int64x1_t:uint64x1_t, int64x2_t:uint64x2_t
arm = vcgt.s
generate int8x8_t:uint8x8_t, int8x16_t:uint8x16_t, int16x4_t:uint16x4_t, int16x8_t:uint16x8_t, int32x2_t:uint32x2_t, int32x4_t:uint32x4_t
-/// Compare unsigned highe
+/// Compare unsigned greater than
name = vcgt
fn = simd_gt
a = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
@@ -732,7 +732,7 @@ a = MIN, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0
fixed = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
validate FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE
-aarch64 = cmge
+aarch64 = cmgt
generate int8x8_t:uint8x8_t, int8x16_t:uint8x16_t, int16x4_t:uint16x4_t, int16x8_t:uint16x8_t, int32x2_t:uint32x2_t, int32x4_t:uint32x4_t, int64x1_t:uint64x1_t, int64x2_t:uint64x2_t
/// Floating-point compare greater than or equal to zero
@@ -751,7 +751,7 @@ multi_fn = transmute, {vcgez-in_ntt-noext, {transmute, a}}
a = -1
validate 0
-aarch64 = eor
+aarch64 = nop
generate i64:u64
/// Floating-point compare greater than or equal to zero
@@ -5138,8 +5138,20 @@ b = 1
c = 2
validate 5
-aarch64 = sqdmull
-generate i32:i16:i16:i32, i64:i32:i32:i64
+aarch64 = sqdmlal
+generate i32:i16:i16:i32
+
+/// Signed saturating doubling multiply-add long
+name = vqdmlal
+multi_fn = vqadd-out-noext, x:out_t, a, {vqdmulls-in_ntt-noext, b, c}
+multi_fn = x as out_t
+a = 1
+b = 1
+c = 2
+validate 5
+
+aarch64 = sqdmlal
+generate i64:i32:i32:i64
/// Signed saturating doubling multiply-add long
name = vqdmlalh_lane
@@ -5156,7 +5168,7 @@ validate 5
aarch64 = sqdmlal
generate i32:i16:int16x4_t:i32, i32:i16:int16x8_t:i32
name = vqdmlals_lane
-aarch64 = sqdmull
+aarch64 = sqdmlal
generate i64:i32:int32x2_t:i64, i64:i32:int32x4_t:i64
/// Signed saturating doubling multiply-subtract long
@@ -5250,8 +5262,20 @@ b = 1
c = 2
validate 6
-aarch64 = sqdmull
-generate i32:i16:i16:i32, i64:i32:i32:i64
+aarch64 = sqdmlsl
+generate i32:i16:i16:i32
+
+/// Signed saturating doubling multiply-subtract long
+name = vqdmlsl
+multi_fn = vqsub-out-noext, x:out_t, a, {vqdmulls-in_ntt-noext, b, c}
+multi_fn = x as out_t
+a = 10
+b = 1
+c = 2
+validate 6
+
+aarch64 = sqdmlsl
+generate i64:i32:i32:i64
/// Signed saturating doubling multiply-subtract long
name = vqdmlslh_lane
@@ -5268,7 +5292,7 @@ validate 6
aarch64 = sqdmlsl
generate i32:i16:int16x4_t:i32, i32:i16:int16x8_t:i32
name = vqdmlsls_lane
-aarch64 = sqdmull
+aarch64 = sqdmlsl
generate i64:i32:int32x2_t:i64, i64:i32:int32x4_t:i64
/// Signed saturating doubling multiply returning high half
diff --git a/library/stdarch/crates/stdarch-test/src/disassembly.rs b/library/stdarch/crates/stdarch-test/src/disassembly.rs
index 8e4d57d4e..5d7a27e8a 100644
--- a/library/stdarch/crates/stdarch-test/src/disassembly.rs
+++ b/library/stdarch/crates/stdarch-test/src/disassembly.rs
@@ -148,7 +148,7 @@ fn parse(output: &str) -> HashSet<Function> {
instruction
.split_whitespace()
.skip(1)
- .skip_while(|s| *s == "lock") // skip x86-specific prefix
+ .skip_while(|s| *s == "lock" || *s == "{evex}") // skip x86-specific prefix
.map(std::string::ToString::to_string)
.collect::<Vec<String>>()
};