From 2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:19:50 +0200 Subject: Merging upstream version 1.69.0+dfsg1. Signed-off-by: Daniel Baumann --- library/stdarch/.github/workflows/main.yml | 12 - library/stdarch/ci/android-install-ndk.sh | 38 - library/stdarch/ci/android-install-sdk.sh | 60 - library/stdarch/ci/android-sysimage.sh | 56 - .../ci/docker/aarch64-linux-android/Dockerfile | 46 - .../ci/docker/arm-linux-androideabi/Dockerfile | 46 - .../ci/docker/x86_64-linux-android/Dockerfile | 29 - library/stdarch/ci/runtest-android.rs | 45 - .../stdarch/crates/assert-instr-macro/Cargo.toml | 2 +- .../stdarch/crates/assert-instr-macro/src/lib.rs | 8 +- library/stdarch/crates/core_arch/Cargo.toml | 2 +- .../crates/core_arch/src/aarch64/neon/mod.rs | 174 +- .../core_arch/src/arm_shared/neon/generated.rs | 3090 ++++++++++---------- .../crates/core_arch/src/arm_shared/neon/mod.rs | 257 +- library/stdarch/crates/core_arch/src/mips/msa.rs | 4 +- library/stdarch/crates/core_arch/src/mod.rs | 2 +- library/stdarch/crates/core_arch/src/x86/avx.rs | 2 +- library/stdarch/crates/core_arch/src/x86/avx2.rs | 76 +- .../stdarch/crates/core_arch/src/x86/avx512bf16.rs | 2 +- .../crates/core_arch/src/x86/avx512bitalg.rs | 12 +- .../stdarch/crates/core_arch/src/x86/avx512bw.rs | 28 +- .../stdarch/crates/core_arch/src/x86/avx512f.rs | 2 +- .../stdarch/crates/core_arch/src/x86/avx512gfni.rs | 1492 ---------- .../stdarch/crates/core_arch/src/x86/avx512vaes.rs | 332 --- .../crates/core_arch/src/x86/avx512vpclmulqdq.rs | 258 -- library/stdarch/crates/core_arch/src/x86/gfni.rs | 1492 ++++++++++ library/stdarch/crates/core_arch/src/x86/mod.rs | 12 +- library/stdarch/crates/core_arch/src/x86/sse.rs | 5 +- library/stdarch/crates/core_arch/src/x86/sse2.rs | 32 +- library/stdarch/crates/core_arch/src/x86/sse41.rs | 48 +- library/stdarch/crates/core_arch/src/x86/sse42.rs | 8 +- library/stdarch/crates/core_arch/src/x86/vaes.rs | 332 +++ .../stdarch/crates/core_arch/src/x86/vpclmulqdq.rs | 258 ++ .../crates/core_arch/tests/cpu-detection.rs | 21 +- library/stdarch/crates/intrinsic-test/Cargo.toml | 4 +- .../stdarch/crates/intrinsic-test/missing_arm.txt | 11 - .../crates/intrinsic-test/src/acle_csv_parser.rs | 6 +- .../stdarch/crates/intrinsic-test/src/intrinsic.rs | 2 +- library/stdarch/crates/intrinsic-test/src/main.rs | 22 +- library/stdarch/crates/intrinsic-test/src/types.rs | 14 +- .../stdarch/crates/intrinsic-test/src/values.rs | 2 +- library/stdarch/crates/simd-test-macro/Cargo.toml | 2 +- library/stdarch/crates/simd-test-macro/src/lib.rs | 4 +- library/stdarch/crates/std_detect/Cargo.toml | 2 +- .../crates/std_detect/src/detect/arch/x86.rs | 15 +- .../stdarch/crates/std_detect/src/detect/cache.rs | 2 +- .../stdarch/crates/std_detect/src/detect/macros.rs | 12 +- .../stdarch/crates/std_detect/src/detect/mod.rs | 2 +- .../std_detect/src/detect/os/linux/aarch64.rs | 6 +- .../std_detect/src/detect/os/linux/auxvec.rs | 8 +- .../crates/std_detect/src/detect/os/linux/riscv.rs | 146 +- .../stdarch/crates/std_detect/src/detect/os/x86.rs | 6 +- .../crates/std_detect/tests/cpu-detection.rs | 11 +- .../crates/std_detect/tests/x86-specific.rs | 9 +- library/stdarch/crates/stdarch-gen/Cargo.toml | 2 +- library/stdarch/crates/stdarch-gen/src/main.rs | 322 +- library/stdarch/crates/stdarch-test/Cargo.toml | 2 +- .../stdarch/crates/stdarch-test/src/disassembly.rs | 8 +- library/stdarch/crates/stdarch-test/src/lib.rs | 10 +- library/stdarch/crates/stdarch-verify/Cargo.toml | 2 +- library/stdarch/crates/stdarch-verify/src/lib.rs | 14 +- library/stdarch/crates/stdarch-verify/tests/arm.rs | 6 +- .../stdarch/crates/stdarch-verify/tests/mips.rs | 8 +- .../crates/stdarch-verify/tests/x86-intel.rs | 20 +- library/stdarch/examples/Cargo.toml | 2 +- library/stdarch/examples/connect5.rs | 2 +- library/stdarch/examples/hex.rs | 2 +- library/stdarch/triagebot.toml | 3 + 68 files changed, 4343 insertions(+), 4629 deletions(-) delete mode 100644 library/stdarch/ci/android-install-ndk.sh delete mode 100644 library/stdarch/ci/android-install-sdk.sh delete mode 100644 library/stdarch/ci/android-sysimage.sh delete mode 100644 library/stdarch/ci/docker/aarch64-linux-android/Dockerfile delete mode 100644 library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile delete mode 100644 library/stdarch/ci/docker/x86_64-linux-android/Dockerfile delete mode 100644 library/stdarch/ci/runtest-android.rs delete mode 100644 library/stdarch/crates/core_arch/src/x86/avx512gfni.rs delete mode 100644 library/stdarch/crates/core_arch/src/x86/avx512vaes.rs delete mode 100644 library/stdarch/crates/core_arch/src/x86/avx512vpclmulqdq.rs create mode 100644 library/stdarch/crates/core_arch/src/x86/gfni.rs create mode 100644 library/stdarch/crates/core_arch/src/x86/vaes.rs create mode 100644 library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs (limited to 'library/stdarch') diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index fd8713ff8..7d4085334 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -80,10 +80,7 @@ jobs: - s390x-unknown-linux-gnu - wasm32-wasi - i586-unknown-linux-gnu - - x86_64-linux-android - - arm-linux-androideabi - mipsel-unknown-linux-musl - - aarch64-linux-android - nvptx64-nvidia-cuda - thumbv6m-none-eabi - thumbv7m-none-eabi @@ -146,18 +143,9 @@ jobs: os: windows-latest - target: i586-unknown-linux-gnu os: ubuntu-latest - - target: x86_64-linux-android - os: ubuntu-latest - disable_assert_instr: 1 - - target: arm-linux-androideabi - os: ubuntu-latest - disable_assert_instr: 1 - target: mipsel-unknown-linux-musl os: ubuntu-latest norun: 1 - - target: aarch64-linux-android - os: ubuntu-latest - disable_assert_instr: 1 - target: nvptx64-nvidia-cuda os: ubuntu-latest - target: thumbv6m-none-eabi diff --git a/library/stdarch/ci/android-install-ndk.sh b/library/stdarch/ci/android-install-ndk.sh deleted file mode 100644 index 944a8389a..000000000 --- a/library/stdarch/ci/android-install-ndk.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env sh -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -curl --retry 5 -O \ - https://dl.google.com/android/repository/android-ndk-r15b-linux-x86_64.zip -unzip -q android-ndk-r15b-linux-x86_64.zip - -case "${1}" in - aarch64) - arch=arm64 - ;; - - i686) - arch=x86 - ;; - - *) - arch="${1}" - ;; -esac; - -android-ndk-r15b/build/tools/make_standalone_toolchain.py \ - --unified-headers \ - --install-dir "/android/ndk-${1}" \ - --arch "${arch}" \ - --api 24 - -rm -rf ./android-ndk-r15b-linux-x86_64.zip ./android-ndk-r15b diff --git a/library/stdarch/ci/android-install-sdk.sh b/library/stdarch/ci/android-install-sdk.sh deleted file mode 100644 index 3383dcb7f..000000000 --- a/library/stdarch/ci/android-install-sdk.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env sh -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -# Prep the SDK and emulator -# -# Note that the update process requires that we accept a bunch of licenses, and -# we can't just pipe `yes` into it for some reason, so we take the same strategy -# located in https://github.com/appunite/docker by just wrapping it in a script -# which apparently magically accepts the licenses. - -mkdir sdk -curl --retry 5 https://dl.google.com/android/repository/sdk-tools-linux-4333796.zip -O -unzip -d sdk sdk-tools-linux-4333796.zip - -case "$1" in - arm | armv7) - abi=armeabi-v7a - ;; - - aarch64) - abi=arm64-v8a - ;; - - i686) - abi=x86 - ;; - - x86_64) - abi=x86_64 - ;; - - *) - echo "invalid arch: $1" - exit 1 - ;; -esac; - -# --no_https avoids -# javax.net.ssl.SSLHandshakeException: sun.security.validator.ValidatorException: No trusted certificate found -yes | ./sdk/tools/bin/sdkmanager --licenses --no_https -yes | ./sdk/tools/bin/sdkmanager --no_https \ - "emulator" \ - "platform-tools" \ - "platforms;android-24" \ - "system-images;android-24;default;$abi" - -echo "no" | - ./sdk/tools/bin/avdmanager create avd \ - --name "${1}" \ - --package "system-images;android-24;default;$abi" diff --git a/library/stdarch/ci/android-sysimage.sh b/library/stdarch/ci/android-sysimage.sh deleted file mode 100644 index 31a6762cb..000000000 --- a/library/stdarch/ci/android-sysimage.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -URL=https://dl.google.com/android/repository/sys-img/android - -main() { - local arch="${1}" - local name="${2}" - local dest=/system - local td - td="$(mktemp -d)" - - apt-get install --no-install-recommends e2tools - - pushd "$td" - curl --retry 5 -O "${URL}/${name}" - unzip -q "${name}" - - local system - system=$(find . -name system.img) - mkdir -p $dest/{bin,lib,lib64} - - # Extract android linker and libraries to /system - # This allows android executables to be run directly (or with qemu) - if [ "${arch}" = "x86_64" ] || [ "${arch}" = "arm64" ]; then - e2cp -p "${system}:/bin/linker64" "${dest}/bin/" - e2cp -p "${system}:/lib64/libdl.so" "${dest}/lib64/" - e2cp -p "${system}:/lib64/libc.so" "${dest}/lib64/" - e2cp -p "${system}:/lib64/libm.so" "${dest}/lib64/" - else - e2cp -p "${system}:/bin/linker" "${dest}/bin/" - e2cp -p "${system}:/lib/libdl.so" "${dest}/lib/" - e2cp -p "${system}:/lib/libc.so" "${dest}/lib/" - e2cp -p "${system}:/lib/libm.so" "${dest}/lib/" - fi - - # clean up - apt-get purge --auto-remove -y e2tools - - popd - - rm -rf "${td}" -} - -main "${@}" diff --git a/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile b/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile deleted file mode 100644 index 6cf9b5061..000000000 --- a/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -FROM ubuntu:22.04 - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - file \ - make \ - curl \ - ca-certificates \ - python-is-python3 \ - unzip \ - expect \ - openjdk-8-jre \ - libstdc++6-i386-cross \ - libpulse0 \ - gcc \ - libc6-dev - -WORKDIR /android/ -COPY android* /android/ - -ENV ANDROID_ARCH=aarch64 -ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools - -RUN sh /android/android-install-ndk.sh $ANDROID_ARCH -RUN sh /android/android-install-sdk.sh $ANDROID_ARCH -RUN mv /root/.android /tmp -RUN chmod 777 -R /tmp/.android -RUN chmod 755 /android/sdk/tools/* /android/sdk/emulator/qemu/linux-x86_64/* - -ENV PATH=$PATH:/rust/bin \ - CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER=aarch64-linux-android-gcc \ - CARGO_TARGET_AARCH64_LINUX_ANDROID_RUNNER=/tmp/runtest \ - OBJDUMP=aarch64-linux-android-objdump \ - HOME=/tmp - -ADD runtest-android.rs /tmp/runtest.rs -ENTRYPOINT [ \ - "bash", \ - "-c", \ - # set SHELL so android can detect a 64bits system, see - # http://stackoverflow.com/a/41789144 - "SHELL=/bin/dash /android/sdk/emulator/emulator @aarch64 -no-window & \ - rustc /tmp/runtest.rs -o /tmp/runtest && \ - exec \"$@\"", \ - "--" \ -] diff --git a/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile b/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile deleted file mode 100644 index fb1a0cecf..000000000 --- a/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -FROM ubuntu:22.04 - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - file \ - make \ - curl \ - ca-certificates \ - python-is-python3 \ - unzip \ - expect \ - openjdk-8-jre \ - libstdc++6-i386-cross \ - libpulse0 \ - gcc \ - libc6-dev - -WORKDIR /android/ -COPY android* /android/ - -ENV ANDROID_ARCH=arm -ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools - -RUN sh /android/android-install-ndk.sh $ANDROID_ARCH -RUN sh /android/android-install-sdk.sh $ANDROID_ARCH -RUN mv /root/.android /tmp -RUN chmod 777 -R /tmp/.android -RUN chmod 755 /android/sdk/tools/* /android/sdk/emulator/qemu/linux-x86_64/* - -ENV PATH=$PATH:/rust/bin \ - CARGO_TARGET_ARM_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ - CARGO_TARGET_ARM_LINUX_ANDROIDEABI_RUNNER=/tmp/runtest \ - OBJDUMP=arm-linux-androideabi-objdump \ - HOME=/tmp - -ADD runtest-android.rs /tmp/runtest.rs -ENTRYPOINT [ \ - "bash", \ - "-c", \ - # set SHELL so android can detect a 64bits system, see - # http://stackoverflow.com/a/41789144 - "SHELL=/bin/dash /android/sdk/emulator/emulator @arm -no-window & \ - rustc /tmp/runtest.rs -o /tmp/runtest && \ - exec \"$@\"", \ - "--" \ -] diff --git a/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile b/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile deleted file mode 100644 index 82119be74..000000000 --- a/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -FROM ubuntu:22.04 - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - ca-certificates \ - curl \ - gcc \ - libc-dev \ - python-is-python3 \ - unzip \ - file \ - make - -WORKDIR /android/ -ENV ANDROID_ARCH=x86_64 -COPY android-install-ndk.sh /android/ -RUN sh /android/android-install-ndk.sh $ANDROID_ARCH - -# We do not run x86_64-linux-android tests on an android emulator. -# See ci/android-sysimage.sh for information about how tests are run. -COPY android-sysimage.sh /android/ -RUN bash /android/android-sysimage.sh x86_64 x86_64-24_r07.zip - -ENV PATH=$PATH:/rust/bin:/android/ndk-$ANDROID_ARCH/bin \ - CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER=x86_64-linux-android-gcc \ - CC_x86_64_linux_android=x86_64-linux-android-gcc \ - CXX_x86_64_linux_android=x86_64-linux-android-g++ \ - OBJDUMP=x86_64-linux-android-objdump \ - HOME=/tmp diff --git a/library/stdarch/ci/runtest-android.rs b/library/stdarch/ci/runtest-android.rs deleted file mode 100644 index ed1cd80c8..000000000 --- a/library/stdarch/ci/runtest-android.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::env; -use std::process::Command; -use std::path::{Path, PathBuf}; - -fn main() { - let args = env::args_os() - .skip(1) - .filter(|arg| arg != "--quiet") - .collect::>(); - assert_eq!(args.len(), 1); - let test = PathBuf::from(&args[0]); - let dst = Path::new("/data/local/tmp").join(test.file_name().unwrap()); - - let status = Command::new("adb") - .arg("wait-for-device") - .status() - .expect("failed to run: adb wait-for-device"); - assert!(status.success()); - - let status = Command::new("adb") - .arg("push") - .arg(&test) - .arg(&dst) - .status() - .expect("failed to run: adb pushr"); - assert!(status.success()); - - let output = Command::new("adb") - .arg("shell") - .arg(&dst) - .output() - .expect("failed to run: adb shell"); - assert!(status.success()); - - println!("status: {}\nstdout ---\n{}\nstderr ---\n{}", - output.status, - String::from_utf8_lossy(&output.stdout), - String::from_utf8_lossy(&output.stderr)); - - let stdout = String::from_utf8_lossy(&output.stdout); - let mut lines = stdout.lines().filter(|l| l.starts_with("test result")); - if !lines.all(|l| l.contains("test result: ok") && l.contains("0 failed")) { - panic!("failed to find successful test run"); - } -} diff --git a/library/stdarch/crates/assert-instr-macro/Cargo.toml b/library/stdarch/crates/assert-instr-macro/Cargo.toml index 3d9b32067..4ad654e69 100644 --- a/library/stdarch/crates/assert-instr-macro/Cargo.toml +++ b/library/stdarch/crates/assert-instr-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "assert-instr-macro" version = "0.1.0" authors = ["Alex Crichton "] -edition = "2018" +edition = "2021" [lib] proc-macro = true diff --git a/library/stdarch/crates/assert-instr-macro/src/lib.rs b/library/stdarch/crates/assert-instr-macro/src/lib.rs index 9fa411df3..99e37c910 100644 --- a/library/stdarch/crates/assert-instr-macro/src/lib.rs +++ b/library/stdarch/crates/assert-instr-macro/src/lib.rs @@ -56,14 +56,14 @@ pub fn assert_instr( .replace('/', "_") .replace(':', "_") .replace(char::is_whitespace, ""); - let assert_name = syn::Ident::new(&format!("assert_{}_{}", name, instr_str), name.span()); + let assert_name = syn::Ident::new(&format!("assert_{name}_{instr_str}"), name.span()); // These name has to be unique enough for us to find it in the disassembly later on: let shim_name = syn::Ident::new( - &format!("stdarch_test_shim_{}_{}", name, instr_str), + &format!("stdarch_test_shim_{name}_{instr_str}"), name.span(), ); let shim_name_ptr = syn::Ident::new( - &format!("stdarch_test_shim_{}_{}_ptr", name, instr_str).to_ascii_uppercase(), + &format!("stdarch_test_shim_{name}_{instr_str}_ptr").to_ascii_uppercase(), name.span(), ); let mut inputs = Vec::new(); @@ -131,7 +131,7 @@ pub fn assert_instr( } else { syn::LitStr::new("C", proc_macro2::Span::call_site()) }; - let shim_name_str = format!("{}{}", shim_name, assert_name); + let shim_name_str = format!("{shim_name}{assert_name}"); let to_test = if disable_dedup_guard { quote! { #attrs diff --git a/library/stdarch/crates/core_arch/Cargo.toml b/library/stdarch/crates/core_arch/Cargo.toml index e2b332af2..a1bb168ee 100644 --- a/library/stdarch/crates/core_arch/Cargo.toml +++ b/library/stdarch/crates/core_arch/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" keywords = ["core", "simd", "arch", "intrinsics"] categories = ["hardware-support", "no-std"] license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" [badges] is-it-maintained-issue-resolution = { repository = "rust-lang/stdarch" } diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs index 9d9946b4f..7ff26ac21 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs @@ -162,7 +162,7 @@ extern "unadjusted" { #[link_name = "llvm.aarch64.neon.smaxv.i8.v8i8"] fn vmaxv_s8_(a: int8x8_t) -> i8; - #[link_name = "llvm.aarch64.neon.smaxv.i8.6i8"] + #[link_name = "llvm.aarch64.neon.smaxv.i8.v16i8"] fn vmaxvq_s8_(a: int8x16_t) -> i8; #[link_name = "llvm.aarch64.neon.smaxv.i16.v4i16"] fn vmaxv_s16_(a: int16x4_t) -> i16; @@ -175,7 +175,7 @@ extern "unadjusted" { #[link_name = "llvm.aarch64.neon.umaxv.i8.v8i8"] fn vmaxv_u8_(a: uint8x8_t) -> u8; - #[link_name = "llvm.aarch64.neon.umaxv.i8.6i8"] + #[link_name = "llvm.aarch64.neon.umaxv.i8.v16i8"] fn vmaxvq_u8_(a: uint8x16_t) -> u8; #[link_name = "llvm.aarch64.neon.umaxv.i16.v4i16"] fn vmaxv_u16_(a: uint16x4_t) -> u16; @@ -195,7 +195,7 @@ extern "unadjusted" { #[link_name = "llvm.aarch64.neon.sminv.i8.v8i8"] fn vminv_s8_(a: int8x8_t) -> i8; - #[link_name = "llvm.aarch64.neon.sminv.i8.6i8"] + #[link_name = "llvm.aarch64.neon.sminv.i8.v16i8"] fn vminvq_s8_(a: int8x16_t) -> i8; #[link_name = "llvm.aarch64.neon.sminv.i16.v4i16"] fn vminv_s16_(a: int16x4_t) -> i16; @@ -208,7 +208,7 @@ extern "unadjusted" { #[link_name = "llvm.aarch64.neon.uminv.i8.v8i8"] fn vminv_u8_(a: uint8x8_t) -> u8; - #[link_name = "llvm.aarch64.neon.uminv.i8.6i8"] + #[link_name = "llvm.aarch64.neon.uminv.i8.v16i8"] fn vminvq_u8_(a: uint8x16_t) -> u8; #[link_name = "llvm.aarch64.neon.uminv.i16.v4i16"] fn vminv_u16_(a: uint16x4_t) -> u16; @@ -1964,94 +1964,6 @@ pub unsafe fn vext_f64(a: float64x1_t, _b: float64x1_t) -> float64 static_assert!(N : i32 where N == 0); a } -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { - simd_shuffle16!( - low, - high, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { - simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { - simd_shuffle4!(low, high, [0, 1, 2, 3]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { - simd_shuffle2!(low, high, [0, 1]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { - simd_shuffle16!( - low, - high, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { - simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { - simd_shuffle4!(low, high, [0, 1, 2, 3]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { - simd_shuffle2!(low, high, [0, 1]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t { - simd_shuffle2!(low, high, [0, 1]) -} /// Duplicate vector element to vector or scalar #[inline] @@ -2183,47 +2095,6 @@ pub unsafe fn vgetq_lane_f64(v: float64x2_t) -> f64 { simd_extract(v, IMM5 as u32) } -/* FIXME: 16-bit float -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { - simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} -*/ - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t { - simd_shuffle4!(low, high, [0, 1, 2, 3]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { - simd_shuffle16!( - low, - high, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { - simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} - /// Vector combine #[inline] #[target_feature(enable = "neon")] @@ -4478,43 +4349,6 @@ mod tests { assert_eq!(r, e); } - macro_rules! test_vcombine { - ($test_id:ident => $fn_id:ident ([$($a:expr),*], [$($b:expr),*])) => { - #[allow(unused_assignments)] - #[simd_test(enable = "neon")] - unsafe fn $test_id() { - let a = [$($a),*]; - let b = [$($b),*]; - let e = [$($a),* $(, $b)*]; - let c = $fn_id(transmute(a), transmute(b)); - let mut d = e; - d = transmute(c); - assert_eq!(d, e); - } - } - } - - test_vcombine!(test_vcombine_s8 => vcombine_s8([3_i8, -4, 5, -6, 7, 8, 9, 10], [13_i8, -14, 15, -16, 17, 18, 19, 110])); - test_vcombine!(test_vcombine_u8 => vcombine_u8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); - test_vcombine!(test_vcombine_p8 => vcombine_p8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); - - test_vcombine!(test_vcombine_s16 => vcombine_s16([3_i16, -4, 5, -6], [13_i16, -14, 15, -16])); - test_vcombine!(test_vcombine_u16 => vcombine_u16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); - test_vcombine!(test_vcombine_p16 => vcombine_p16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); - // FIXME: 16-bit floats - // test_vcombine!(test_vcombine_f16 => vcombine_f16([3_f16, 4., 5., 6.], - // [13_f16, 14., 15., 16.])); - - test_vcombine!(test_vcombine_s32 => vcombine_s32([3_i32, -4], [13_i32, -14])); - test_vcombine!(test_vcombine_u32 => vcombine_u32([3_u32, 4], [13_u32, 14])); - // note: poly32x4 does not exist, and neither does vcombine_p32 - test_vcombine!(test_vcombine_f32 => vcombine_f32([3_f32, -4.], [13_f32, -14.])); - - test_vcombine!(test_vcombine_s64 => vcombine_s64([-3_i64], [13_i64])); - test_vcombine!(test_vcombine_u64 => vcombine_u64([3_u64], [13_u64])); - test_vcombine!(test_vcombine_p64 => vcombine_p64([3_u64], [13_u64])); - test_vcombine!(test_vcombine_f64 => vcombine_f64([-3_f64], [13_f64])); - #[simd_test(enable = "neon")] unsafe fn test_vdup_n_f64() { let a: f64 = 3.3; diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index ac2709744..fe473c51e 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -17,7 +17,7 @@ use stdarch_test::assert_instr; #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_and(a, b) } @@ -30,7 +30,7 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_and(a, b) } @@ -43,7 +43,7 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_and(a, b) } @@ -56,7 +56,7 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_and(a, b) } @@ -69,7 +69,7 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_and(a, b) } @@ -82,7 +82,7 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_and(a, b) } @@ -95,7 +95,7 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_and(a, b) } @@ -108,7 +108,7 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_and(a, b) } @@ -121,7 +121,7 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_and(a, b) } @@ -134,7 +134,7 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_and(a, b) } @@ -147,7 +147,7 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_and(a, b) } @@ -160,7 +160,7 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_and(a, b) } @@ -173,7 +173,7 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_and(a, b) } @@ -186,7 +186,7 @@ pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_and(a, b) } @@ -199,7 +199,7 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_and(a, b) } @@ -212,7 +212,7 @@ pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_and(a, b) } @@ -225,7 +225,7 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_or(a, b) } @@ -238,7 +238,7 @@ pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_or(a, b) } @@ -251,7 +251,7 @@ pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_or(a, b) } @@ -264,7 +264,7 @@ pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_or(a, b) } @@ -277,7 +277,7 @@ pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_or(a, b) } @@ -290,7 +290,7 @@ pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_or(a, b) } @@ -303,7 +303,7 @@ pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_or(a, b) } @@ -316,7 +316,7 @@ pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_or(a, b) } @@ -329,7 +329,7 @@ pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_or(a, b) } @@ -342,7 +342,7 @@ pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_or(a, b) } @@ -355,7 +355,7 @@ pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_or(a, b) } @@ -368,7 +368,7 @@ pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_or(a, b) } @@ -381,7 +381,7 @@ pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_or(a, b) } @@ -394,7 +394,7 @@ pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_or(a, b) } @@ -407,7 +407,7 @@ pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_or(a, b) } @@ -420,7 +420,7 @@ pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_or(a, b) } @@ -433,7 +433,7 @@ pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_xor(a, b) } @@ -446,7 +446,7 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_xor(a, b) } @@ -459,7 +459,7 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_xor(a, b) } @@ -472,7 +472,7 @@ pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_xor(a, b) } @@ -485,7 +485,7 @@ pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_xor(a, b) } @@ -498,7 +498,7 @@ pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_xor(a, b) } @@ -511,7 +511,7 @@ pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_xor(a, b) } @@ -524,7 +524,7 @@ pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_xor(a, b) } @@ -537,7 +537,7 @@ pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_xor(a, b) } @@ -550,7 +550,7 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_xor(a, b) } @@ -563,7 +563,7 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_xor(a, b) } @@ -576,7 +576,7 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_xor(a, b) } @@ -589,7 +589,7 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_xor(a, b) } @@ -602,7 +602,7 @@ pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_xor(a, b) } @@ -615,7 +615,7 @@ pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_xor(a, b) } @@ -628,7 +628,7 @@ pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_xor(a, b) } @@ -641,7 +641,7 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -660,7 +660,7 @@ vabd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -679,7 +679,7 @@ vabdq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -698,7 +698,7 @@ vabd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -717,7 +717,7 @@ vabdq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -736,7 +736,7 @@ vabd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -755,7 +755,7 @@ vabdq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -774,7 +774,7 @@ vabd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -793,7 +793,7 @@ vabdq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -812,7 +812,7 @@ vabd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -831,7 +831,7 @@ vabdq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -850,7 +850,7 @@ vabd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -869,7 +869,7 @@ vabdq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -888,7 +888,7 @@ vabd_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -907,7 +907,7 @@ vabdq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { simd_cast(vabd_u8(a, b)) } @@ -920,7 +920,7 @@ pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { simd_cast(vabd_u16(a, b)) } @@ -933,7 +933,7 @@ pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { simd_cast(vabd_u32(a, b)) } @@ -946,7 +946,7 @@ pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let c: uint8x8_t = simd_cast(vabd_s8(a, b)); simd_cast(c) @@ -960,7 +960,7 @@ pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let c: uint16x4_t = simd_cast(vabd_s16(a, b)); simd_cast(c) @@ -974,7 +974,7 @@ pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let c: uint32x2_t = simd_cast(vabd_s32(a, b)); simd_cast(c) @@ -988,7 +988,7 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -1001,7 +1001,7 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -1014,7 +1014,7 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_eq(a, b) } @@ -1027,7 +1027,7 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_eq(a, b) } @@ -1040,7 +1040,7 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -1053,7 +1053,7 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -1066,7 +1066,7 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -1079,7 +1079,7 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -1092,7 +1092,7 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_eq(a, b) } @@ -1105,7 +1105,7 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_eq(a, b) } @@ -1118,7 +1118,7 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -1131,7 +1131,7 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -1144,7 +1144,7 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -1157,7 +1157,7 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -1170,7 +1170,7 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -1183,7 +1183,7 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -1196,7 +1196,7 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let c: int8x8_t = simd_and(a, b); let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1211,7 +1211,7 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { let c: int8x16_t = simd_and(a, b); let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1226,7 +1226,7 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let c: int16x4_t = simd_and(a, b); let d: i16x4 = i16x4::new(0, 0, 0, 0); @@ -1241,7 +1241,7 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let c: int16x8_t = simd_and(a, b); let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1256,7 +1256,7 @@ pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let c: int32x2_t = simd_and(a, b); let d: i32x2 = i32x2::new(0, 0); @@ -1271,7 +1271,7 @@ pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let c: int32x4_t = simd_and(a, b); let d: i32x4 = i32x4::new(0, 0, 0, 0); @@ -1286,7 +1286,7 @@ pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { let c: poly8x8_t = simd_and(a, b); let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1301,7 +1301,7 @@ pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { let c: poly8x16_t = simd_and(a, b); let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1316,7 +1316,7 @@ pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { let c: poly16x4_t = simd_and(a, b); let d: i16x4 = i16x4::new(0, 0, 0, 0); @@ -1331,7 +1331,7 @@ pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { let c: poly16x8_t = simd_and(a, b); let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1346,7 +1346,7 @@ pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c: uint8x8_t = simd_and(a, b); let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1361,7 +1361,7 @@ pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c: uint8x16_t = simd_and(a, b); let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1376,7 +1376,7 @@ pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c: uint16x4_t = simd_and(a, b); let d: u16x4 = u16x4::new(0, 0, 0, 0); @@ -1391,7 +1391,7 @@ pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c: uint16x8_t = simd_and(a, b); let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1406,7 +1406,7 @@ pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c: uint32x2_t = simd_and(a, b); let d: u32x2 = u32x2::new(0, 0); @@ -1421,7 +1421,7 @@ pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c: uint32x4_t = simd_and(a, b); let d: u32x4 = u32x4::new(0, 0, 0, 0); @@ -1436,7 +1436,7 @@ pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { simd_fabs(a) } @@ -1449,7 +1449,7 @@ pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { simd_fabs(a) } @@ -1462,7 +1462,7 @@ pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_gt(a, b) } @@ -1475,7 +1475,7 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_gt(a, b) } @@ -1488,7 +1488,7 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_gt(a, b) } @@ -1501,7 +1501,7 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_gt(a, b) } @@ -1514,7 +1514,7 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1527,7 +1527,7 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1540,7 +1540,7 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_gt(a, b) } @@ -1553,7 +1553,7 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_gt(a, b) } @@ -1566,7 +1566,7 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_gt(a, b) } @@ -1579,7 +1579,7 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_gt(a, b) } @@ -1592,7 +1592,7 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1605,7 +1605,7 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1618,7 +1618,7 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1631,7 +1631,7 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1644,7 +1644,7 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_lt(a, b) } @@ -1657,7 +1657,7 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_lt(a, b) } @@ -1670,7 +1670,7 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_lt(a, b) } @@ -1683,7 +1683,7 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_lt(a, b) } @@ -1696,7 +1696,7 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1709,7 +1709,7 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1722,7 +1722,7 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_lt(a, b) } @@ -1735,7 +1735,7 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_lt(a, b) } @@ -1748,7 +1748,7 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_lt(a, b) } @@ -1761,7 +1761,7 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_lt(a, b) } @@ -1774,7 +1774,7 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1787,7 +1787,7 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1800,7 +1800,7 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1813,7 +1813,7 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1826,7 +1826,7 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_le(a, b) } @@ -1839,7 +1839,7 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_le(a, b) } @@ -1852,7 +1852,7 @@ pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_le(a, b) } @@ -1865,7 +1865,7 @@ pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_le(a, b) } @@ -1878,7 +1878,7 @@ pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1891,7 +1891,7 @@ pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -1904,7 +1904,7 @@ pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_le(a, b) } @@ -1917,7 +1917,7 @@ pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_le(a, b) } @@ -1930,7 +1930,7 @@ pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_le(a, b) } @@ -1943,7 +1943,7 @@ pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_le(a, b) } @@ -1956,7 +1956,7 @@ pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1969,7 +1969,7 @@ pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -1982,7 +1982,7 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1995,7 +1995,7 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -2008,7 +2008,7 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_ge(a, b) } @@ -2021,7 +2021,7 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_ge(a, b) } @@ -2034,7 +2034,7 @@ pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_ge(a, b) } @@ -2047,7 +2047,7 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_ge(a, b) } @@ -2060,7 +2060,7 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -2073,7 +2073,7 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -2086,7 +2086,7 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_ge(a, b) } @@ -2099,7 +2099,7 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_ge(a, b) } @@ -2112,7 +2112,7 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_ge(a, b) } @@ -2125,7 +2125,7 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_ge(a, b) } @@ -2138,7 +2138,7 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -2151,7 +2151,7 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -2164,7 +2164,7 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -2177,7 +2177,7 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -2190,7 +2190,7 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2209,7 +2209,7 @@ vcls_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2228,7 +2228,7 @@ vclsq_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2247,7 +2247,7 @@ vcls_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2266,7 +2266,7 @@ vclsq_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2285,7 +2285,7 @@ vcls_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2304,7 +2304,7 @@ vclsq_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { transmute(vcls_s8(transmute(a))) } @@ -2317,7 +2317,7 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { transmute(vclsq_s8(transmute(a))) } @@ -2330,7 +2330,7 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { transmute(vcls_s16(transmute(a))) } @@ -2343,7 +2343,7 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { transmute(vclsq_s16(transmute(a))) } @@ -2356,7 +2356,7 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { transmute(vcls_s32(transmute(a))) } @@ -2369,7 +2369,7 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { transmute(vclsq_s32(transmute(a))) } @@ -2382,7 +2382,7 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { vclz_s8_(a) } @@ -2395,7 +2395,7 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { vclzq_s8_(a) } @@ -2408,7 +2408,7 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { vclz_s16_(a) } @@ -2421,7 +2421,7 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { vclzq_s16_(a) } @@ -2434,7 +2434,7 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { vclz_s32_(a) } @@ -2447,7 +2447,7 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { vclzq_s32_(a) } @@ -2460,7 +2460,7 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { transmute(vclz_s8_(transmute(a))) } @@ -2473,7 +2473,7 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vclzq_s8_(transmute(a))) } @@ -2486,7 +2486,7 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { transmute(vclz_s16_(transmute(a))) } @@ -2499,7 +2499,7 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { transmute(vclzq_s16_(transmute(a))) } @@ -2512,7 +2512,7 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { transmute(vclz_s32_(transmute(a))) } @@ -2525,7 +2525,7 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { transmute(vclzq_s32_(transmute(a))) } @@ -2538,7 +2538,7 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2557,7 +2557,7 @@ vcagt_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2576,7 +2576,7 @@ vcagtq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2595,7 +2595,7 @@ vcage_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2614,7 +2614,7 @@ vcageq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcagt_f32(b, a) } @@ -2627,7 +2627,7 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcagtq_f32(b, a) } @@ -2640,7 +2640,7 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcage_f32(b, a) } @@ -2653,7 +2653,7 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcageq_f32(b, a) } @@ -2666,7 +2666,7 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { transmute(a) } @@ -2679,7 +2679,7 @@ pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { transmute(a) } @@ -2692,7 +2692,7 @@ pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { transmute(a) } @@ -2705,7 +2705,7 @@ pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { transmute(a) } @@ -2718,7 +2718,7 @@ pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { transmute(a) } @@ -2731,7 +2731,7 @@ pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { transmute(a) } @@ -2744,7 +2744,7 @@ pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { transmute(a) } @@ -2757,7 +2757,7 @@ pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { transmute(a) } @@ -2770,7 +2770,7 @@ pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { transmute(a) } @@ -2783,7 +2783,7 @@ pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { transmute(a) } @@ -2796,7 +2796,7 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { transmute(a) } @@ -2809,7 +2809,7 @@ pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { transmute(a) } @@ -2822,7 +2822,7 @@ pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { simd_cast(a) } @@ -2835,7 +2835,7 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { simd_cast(a) } @@ -2848,7 +2848,7 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { simd_cast(a) } @@ -2861,7 +2861,7 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { simd_cast(a) } @@ -3170,7 +3170,7 @@ vcvtq_n_u32_f32_(a, N) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3189,7 +3189,7 @@ vcvt_s32_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3208,7 +3208,7 @@ vcvtq_s32_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3227,7 +3227,7 @@ vcvt_u32_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3247,7 +3247,7 @@ vcvtq_u32_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3262,7 +3262,7 @@ pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { static_assert_imm4!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3277,7 +3277,7 @@ pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3292,7 +3292,7 @@ pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3307,7 +3307,7 @@ pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3322,7 +3322,7 @@ pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3337,7 +3337,7 @@ pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { static_assert_imm4!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3352,7 +3352,7 @@ pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { static_assert_imm3!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3367,7 +3367,7 @@ pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { static_assert_imm2!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3382,7 +3382,7 @@ pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { static_assert_imm3!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3397,7 +3397,7 @@ pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { static_assert_imm2!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3412,7 +3412,7 @@ pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { static_assert_imm1!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3427,7 +3427,7 @@ pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3442,7 +3442,7 @@ pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { static_assert_imm4!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3457,7 +3457,7 @@ pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3472,7 +3472,7 @@ pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3487,7 +3487,7 @@ pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3502,7 +3502,7 @@ pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3517,7 +3517,7 @@ pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { static_assert_imm4!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3532,7 +3532,7 @@ pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { static_assert_imm3!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3547,7 +3547,7 @@ pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { static_assert_imm2!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3562,7 +3562,7 @@ pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { static_assert_imm3!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3577,7 +3577,7 @@ pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { static_assert_imm2!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3592,7 +3592,7 @@ pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { static_assert_imm1!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3607,7 +3607,7 @@ pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3622,7 +3622,7 @@ pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { static_assert_imm4!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3637,7 +3637,7 @@ pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3652,7 +3652,7 @@ pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3667,7 +3667,7 @@ pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { static_assert_imm4!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3682,7 +3682,7 @@ pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { static_assert_imm3!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3697,7 +3697,7 @@ pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { static_assert_imm3!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3712,7 +3712,7 @@ pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { static_assert_imm2!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3727,7 +3727,7 @@ pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3742,7 +3742,7 @@ pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { static_assert!(N : i32 where N == 0); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3757,7 +3757,7 @@ pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3772,7 +3772,7 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { static_assert!(N : i32 where N == 0); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3787,7 +3787,7 @@ pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3802,7 +3802,7 @@ pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3817,7 +3817,7 @@ pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { static_assert_imm2!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3832,7 +3832,7 @@ pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { static_assert_imm1!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3847,7 +3847,7 @@ pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N == 0); a @@ -3862,7 +3862,7 @@ pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N == 0); a @@ -3877,7 +3877,7 @@ pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { static_assert_imm1!(N); transmute::(simd_extract(a, N as u32)) @@ -3892,7 +3892,7 @@ pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { static_assert_imm1!(N); transmute::(simd_extract(a, N as u32)) @@ -3907,7 +3907,7 @@ pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -3932,7 +3932,7 @@ pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_imm4!(N); match N & 0b1111 { @@ -3965,7 +3965,7 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -3986,7 +3986,7 @@ pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4011,7 +4011,7 @@ pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4030,7 +4030,7 @@ pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4051,7 +4051,7 @@ pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4076,7 +4076,7 @@ pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert_imm4!(N); match N & 0b1111 { @@ -4109,7 +4109,7 @@ pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4130,7 +4130,7 @@ pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4155,7 +4155,7 @@ pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4174,7 +4174,7 @@ pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4195,7 +4195,7 @@ pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4220,7 +4220,7 @@ pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert_imm4!(N); match N & 0b1111 { @@ -4253,7 +4253,7 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4274,7 +4274,7 @@ pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4299,7 +4299,7 @@ pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4318,7 +4318,7 @@ pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4337,7 +4337,7 @@ pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4356,7 +4356,7 @@ pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4376,7 +4376,7 @@ pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_add(a, simd_mul(b, c)) } @@ -4389,7 +4389,7 @@ pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_add(a, simd_mul(b, c)) } @@ -4402,7 +4402,7 @@ pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_add(a, simd_mul(b, c)) } @@ -4415,7 +4415,7 @@ pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_add(a, simd_mul(b, c)) } @@ -4428,7 +4428,7 @@ pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_add(a, simd_mul(b, c)) } @@ -4441,7 +4441,7 @@ pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_add(a, simd_mul(b, c)) } @@ -4454,7 +4454,7 @@ pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_add(a, simd_mul(b, c)) } @@ -4467,7 +4467,7 @@ pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_add(a, simd_mul(b, c)) } @@ -4480,7 +4480,7 @@ pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_add(a, simd_mul(b, c)) } @@ -4493,7 +4493,7 @@ pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_add(a, simd_mul(b, c)) } @@ -4506,7 +4506,7 @@ pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_add(a, simd_mul(b, c)) } @@ -4519,7 +4519,7 @@ pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_add(a, simd_mul(b, c)) } @@ -4532,7 +4532,7 @@ pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_add(a, simd_mul(b, c)) } @@ -4545,7 +4545,7 @@ pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_add(a, simd_mul(b, c)) } @@ -4558,7 +4558,7 @@ pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmla_s16(a, b, vdup_n_s16(c)) } @@ -4571,7 +4571,7 @@ pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlaq_s16(a, b, vdupq_n_s16(c)) } @@ -4584,7 +4584,7 @@ pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmla_s32(a, b, vdup_n_s32(c)) } @@ -4597,7 +4597,7 @@ pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlaq_s32(a, b, vdupq_n_s32(c)) } @@ -4610,7 +4610,7 @@ pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmla_u16(a, b, vdup_n_u16(c)) } @@ -4623,7 +4623,7 @@ pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlaq_u16(a, b, vdupq_n_u16(c)) } @@ -4636,7 +4636,7 @@ pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmla_u32(a, b, vdup_n_u32(c)) } @@ -4649,7 +4649,7 @@ pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlaq_u32(a, b, vdupq_n_u32(c)) } @@ -4662,7 +4662,7 @@ pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmla_f32(a, b, vdup_n_f32(c)) } @@ -4675,7 +4675,7 @@ pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlaq_f32(a, b, vdupq_n_f32(c)) } @@ -4689,7 +4689,7 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); vmla_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4704,7 +4704,7 @@ pub unsafe fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vmla_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4719,7 +4719,7 @@ pub unsafe fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); vmlaq_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4734,7 +4734,7 @@ pub unsafe fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vmlaq_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4749,7 +4749,7 @@ pub unsafe fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); vmla_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4764,7 +4764,7 @@ pub unsafe fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vmla_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4779,7 +4779,7 @@ pub unsafe fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); vmlaq_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4794,7 +4794,7 @@ pub unsafe fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlaq_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4809,7 +4809,7 @@ pub unsafe fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); vmla_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4824,7 +4824,7 @@ pub unsafe fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { static_assert_imm3!(LANE); vmla_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4839,7 +4839,7 @@ pub unsafe fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_imm2!(LANE); vmlaq_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4854,7 +4854,7 @@ pub unsafe fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); vmlaq_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4869,7 +4869,7 @@ pub unsafe fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); vmla_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4884,7 +4884,7 @@ pub unsafe fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { static_assert_imm2!(LANE); vmla_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4899,7 +4899,7 @@ pub unsafe fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { static_assert_imm1!(LANE); vmlaq_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4914,7 +4914,7 @@ pub unsafe fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlaq_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4929,7 +4929,7 @@ pub unsafe fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); vmla_f32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4944,7 +4944,7 @@ pub unsafe fn vmla_lane_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); vmla_f32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4959,7 +4959,7 @@ pub unsafe fn vmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); vmlaq_f32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4974,7 +4974,7 @@ pub unsafe fn vmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); vmlaq_f32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4988,7 +4988,7 @@ pub unsafe fn vmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_add(a, vmull_s8(b, c)) } @@ -5001,7 +5001,7 @@ pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_add(a, vmull_s16(b, c)) } @@ -5014,7 +5014,7 @@ pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_add(a, vmull_s32(b, c)) } @@ -5027,7 +5027,7 @@ pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_add(a, vmull_u8(b, c)) } @@ -5040,7 +5040,7 @@ pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_add(a, vmull_u16(b, c)) } @@ -5053,7 +5053,7 @@ pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_add(a, vmull_u32(b, c)) } @@ -5066,7 +5066,7 @@ pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlal_s16(a, b, vdup_n_s16(c)) } @@ -5079,7 +5079,7 @@ pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlal_s32(a, b, vdup_n_s32(c)) } @@ -5092,7 +5092,7 @@ pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlal_u16(a, b, vdup_n_u16(c)) } @@ -5105,7 +5105,7 @@ pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlal_u32(a, b, vdup_n_u32(c)) } @@ -5119,7 +5119,7 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlal_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5134,7 +5134,7 @@ pub unsafe fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmlal_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5149,7 +5149,7 @@ pub unsafe fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmlal_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5164,7 +5164,7 @@ pub unsafe fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmlal_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5179,7 +5179,7 @@ pub unsafe fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlal_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5194,7 +5194,7 @@ pub unsafe fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmlal_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5209,7 +5209,7 @@ pub unsafe fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmlal_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5224,7 +5224,7 @@ pub unsafe fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmlal_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5238,7 +5238,7 @@ pub unsafe fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5251,7 +5251,7 @@ pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_sub(a, simd_mul(b, c)) } @@ -5264,7 +5264,7 @@ pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5277,7 +5277,7 @@ pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5290,7 +5290,7 @@ pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -5303,7 +5303,7 @@ pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5316,7 +5316,7 @@ pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5329,7 +5329,7 @@ pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_sub(a, simd_mul(b, c)) } @@ -5342,7 +5342,7 @@ pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5355,7 +5355,7 @@ pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5368,7 +5368,7 @@ pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -5381,7 +5381,7 @@ pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5394,7 +5394,7 @@ pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -5407,7 +5407,7 @@ pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5420,7 +5420,7 @@ pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmls_s16(a, b, vdup_n_s16(c)) } @@ -5433,7 +5433,7 @@ pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlsq_s16(a, b, vdupq_n_s16(c)) } @@ -5446,7 +5446,7 @@ pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmls_s32(a, b, vdup_n_s32(c)) } @@ -5459,7 +5459,7 @@ pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlsq_s32(a, b, vdupq_n_s32(c)) } @@ -5472,7 +5472,7 @@ pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmls_u16(a, b, vdup_n_u16(c)) } @@ -5485,7 +5485,7 @@ pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlsq_u16(a, b, vdupq_n_u16(c)) } @@ -5498,7 +5498,7 @@ pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmls_u32(a, b, vdup_n_u32(c)) } @@ -5511,7 +5511,7 @@ pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlsq_u32(a, b, vdupq_n_u32(c)) } @@ -5524,7 +5524,7 @@ pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmls_f32(a, b, vdup_n_f32(c)) } @@ -5537,7 +5537,7 @@ pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlsq_f32(a, b, vdupq_n_f32(c)) } @@ -5551,7 +5551,7 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); vmls_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5566,7 +5566,7 @@ pub unsafe fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vmls_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5581,7 +5581,7 @@ pub unsafe fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); vmlsq_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5596,7 +5596,7 @@ pub unsafe fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vmlsq_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5611,7 +5611,7 @@ pub unsafe fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); vmls_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5626,7 +5626,7 @@ pub unsafe fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vmls_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5641,7 +5641,7 @@ pub unsafe fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); vmlsq_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5656,7 +5656,7 @@ pub unsafe fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlsq_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5671,7 +5671,7 @@ pub unsafe fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); vmls_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5686,7 +5686,7 @@ pub unsafe fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { static_assert_imm3!(LANE); vmls_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5701,7 +5701,7 @@ pub unsafe fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_imm2!(LANE); vmlsq_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5716,7 +5716,7 @@ pub unsafe fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); vmlsq_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5731,7 +5731,7 @@ pub unsafe fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); vmls_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5746,7 +5746,7 @@ pub unsafe fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { static_assert_imm2!(LANE); vmls_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5761,7 +5761,7 @@ pub unsafe fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { static_assert_imm1!(LANE); vmlsq_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5776,7 +5776,7 @@ pub unsafe fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlsq_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5791,7 +5791,7 @@ pub unsafe fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); vmls_f32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5806,7 +5806,7 @@ pub unsafe fn vmls_lane_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); vmls_f32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -5821,7 +5821,7 @@ pub unsafe fn vmls_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); vmlsq_f32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5836,7 +5836,7 @@ pub unsafe fn vmlsq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); vmlsq_f32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5850,7 +5850,7 @@ pub unsafe fn vmlsq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_sub(a, vmull_s8(b, c)) } @@ -5863,7 +5863,7 @@ pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_sub(a, vmull_s16(b, c)) } @@ -5876,7 +5876,7 @@ pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_sub(a, vmull_s32(b, c)) } @@ -5889,7 +5889,7 @@ pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_sub(a, vmull_u8(b, c)) } @@ -5902,7 +5902,7 @@ pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_sub(a, vmull_u16(b, c)) } @@ -5915,7 +5915,7 @@ pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_sub(a, vmull_u32(b, c)) } @@ -5928,7 +5928,7 @@ pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlsl_s16(a, b, vdup_n_s16(c)) } @@ -5941,7 +5941,7 @@ pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlsl_s32(a, b, vdup_n_s32(c)) } @@ -5954,7 +5954,7 @@ pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlsl_u16(a, b, vdup_n_u16(c)) } @@ -5967,7 +5967,7 @@ pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlsl_u32(a, b, vdup_n_u32(c)) } @@ -5981,7 +5981,7 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlsl_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5996,7 +5996,7 @@ pub unsafe fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmlsl_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6011,7 +6011,7 @@ pub unsafe fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmlsl_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -6026,7 +6026,7 @@ pub unsafe fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmlsl_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -6041,7 +6041,7 @@ pub unsafe fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlsl_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6056,7 +6056,7 @@ pub unsafe fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmlsl_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6071,7 +6071,7 @@ pub unsafe fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmlsl_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -6086,7 +6086,7 @@ pub unsafe fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmlsl_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -6100,7 +6100,7 @@ pub unsafe fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { simd_neg(a) } @@ -6113,7 +6113,7 @@ pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { simd_neg(a) } @@ -6126,7 +6126,7 @@ pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { simd_neg(a) } @@ -6139,7 +6139,7 @@ pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { simd_neg(a) } @@ -6152,7 +6152,7 @@ pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { simd_neg(a) } @@ -6165,7 +6165,7 @@ pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { simd_neg(a) } @@ -6178,7 +6178,7 @@ pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { simd_neg(a) } @@ -6191,7 +6191,7 @@ pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { simd_neg(a) } @@ -6204,7 +6204,7 @@ pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6223,7 +6223,7 @@ vqneg_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6242,7 +6242,7 @@ vqnegq_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6261,7 +6261,7 @@ vqneg_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6280,7 +6280,7 @@ vqnegq_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6299,7 +6299,7 @@ vqneg_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6318,7 +6318,7 @@ vqnegq_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6337,7 +6337,7 @@ vqsub_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6356,7 +6356,7 @@ vqsubq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6375,7 +6375,7 @@ vqsub_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6394,7 +6394,7 @@ vqsubq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6413,7 +6413,7 @@ vqsub_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6432,7 +6432,7 @@ vqsubq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6451,7 +6451,7 @@ vqsub_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6470,7 +6470,7 @@ vqsubq_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6489,7 +6489,7 @@ vqsub_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6508,7 +6508,7 @@ vqsubq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6527,7 +6527,7 @@ vqsub_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6546,7 +6546,7 @@ vqsubq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6565,7 +6565,7 @@ vqsub_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6584,7 +6584,7 @@ vqsubq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6603,7 +6603,7 @@ vqsub_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6622,7 +6622,7 @@ vqsubq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6641,7 +6641,7 @@ vhadd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6660,7 +6660,7 @@ vhaddq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6679,7 +6679,7 @@ vhadd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6698,7 +6698,7 @@ vhaddq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6717,7 +6717,7 @@ vhadd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6736,7 +6736,7 @@ vhaddq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6755,7 +6755,7 @@ vhadd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6774,7 +6774,7 @@ vhaddq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6793,7 +6793,7 @@ vhadd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6812,7 +6812,7 @@ vhaddq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6831,7 +6831,7 @@ vhadd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6850,7 +6850,7 @@ vhaddq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6869,7 +6869,7 @@ vrhadd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6888,7 +6888,7 @@ vrhaddq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6907,7 +6907,7 @@ vrhadd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6926,7 +6926,7 @@ vrhaddq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6945,7 +6945,7 @@ vrhadd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6964,7 +6964,7 @@ vrhaddq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6983,7 +6983,7 @@ vrhadd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7002,7 +7002,7 @@ vrhaddq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7021,7 +7021,7 @@ vrhadd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7040,7 +7040,7 @@ vrhaddq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7059,7 +7059,7 @@ vrhadd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7078,7 +7078,7 @@ vrhaddq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7097,7 +7097,7 @@ vrndn_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7116,7 +7116,7 @@ vrndnq_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7135,7 +7135,7 @@ vqadd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7154,7 +7154,7 @@ vqaddq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7173,7 +7173,7 @@ vqadd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7192,7 +7192,7 @@ vqaddq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7211,7 +7211,7 @@ vqadd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7230,7 +7230,7 @@ vqaddq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7249,7 +7249,7 @@ vqadd_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7268,7 +7268,7 @@ vqaddq_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7287,7 +7287,7 @@ vqadd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7306,7 +7306,7 @@ vqaddq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7325,7 +7325,7 @@ vqadd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7344,7 +7344,7 @@ vqaddq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7363,7 +7363,7 @@ vqadd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7382,7 +7382,7 @@ vqaddq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7401,7 +7401,7 @@ vqadd_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7420,7 +7420,7 @@ vqaddq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7439,7 +7439,7 @@ vld1_s8_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7458,7 +7458,7 @@ vld1_s16_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7477,7 +7477,7 @@ vld1_s32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7496,7 +7496,7 @@ vld1_s64_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7515,7 +7515,7 @@ vld1q_s8_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7534,7 +7534,7 @@ vld1q_s16_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7553,7 +7553,7 @@ vld1q_s32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7572,7 +7572,7 @@ vld1q_s64_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7591,7 +7591,7 @@ vld1_s8_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7610,7 +7610,7 @@ vld1_s16_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7629,7 +7629,7 @@ vld1_s32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7648,7 +7648,7 @@ vld1_s64_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7667,7 +7667,7 @@ vld1q_s8_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7686,7 +7686,7 @@ vld1q_s16_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7705,7 +7705,7 @@ vld1q_s32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7724,7 +7724,7 @@ vld1q_s64_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7743,7 +7743,7 @@ vld1_s8_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7762,7 +7762,7 @@ vld1_s16_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7781,7 +7781,7 @@ vld1_s32_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7800,7 +7800,7 @@ vld1_s64_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7819,7 +7819,7 @@ vld1q_s8_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7838,7 +7838,7 @@ vld1q_s16_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7857,7 +7857,7 @@ vld1q_s32_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7876,7 +7876,7 @@ vld1q_s64_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } @@ -7889,7 +7889,7 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } @@ -7902,7 +7902,7 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { transmute(vld1_s32_x2(transmute(a))) } @@ -7915,7 +7915,7 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } @@ -7928,7 +7928,7 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } @@ -7941,7 +7941,7 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } @@ -7954,7 +7954,7 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { transmute(vld1q_s32_x2(transmute(a))) } @@ -7967,7 +7967,7 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } @@ -7980,7 +7980,7 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } @@ -7993,7 +7993,7 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } @@ -8006,7 +8006,7 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { transmute(vld1_s32_x3(transmute(a))) } @@ -8019,7 +8019,7 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } @@ -8032,7 +8032,7 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } @@ -8045,7 +8045,7 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } @@ -8058,7 +8058,7 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { transmute(vld1q_s32_x3(transmute(a))) } @@ -8071,7 +8071,7 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } @@ -8084,7 +8084,7 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } @@ -8097,7 +8097,7 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } @@ -8110,7 +8110,7 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { transmute(vld1_s32_x4(transmute(a))) } @@ -8123,7 +8123,7 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } @@ -8136,7 +8136,7 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } @@ -8149,7 +8149,7 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } @@ -8162,7 +8162,7 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { transmute(vld1q_s32_x4(transmute(a))) } @@ -8175,7 +8175,7 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } @@ -8188,7 +8188,7 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } @@ -8201,7 +8201,7 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } @@ -8214,7 +8214,7 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } @@ -8227,7 +8227,7 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } @@ -8240,7 +8240,7 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } @@ -8253,7 +8253,7 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } @@ -8266,7 +8266,7 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } @@ -8279,7 +8279,7 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } @@ -8292,7 +8292,7 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } @@ -8305,7 +8305,7 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } @@ -8318,7 +8318,7 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } @@ -8331,7 +8331,7 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } @@ -8344,7 +8344,7 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } @@ -8357,7 +8357,7 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } @@ -8370,7 +8370,7 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } @@ -8383,7 +8383,7 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } @@ -8396,7 +8396,7 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } @@ -8409,7 +8409,7 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } @@ -8422,7 +8422,7 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8441,7 +8441,7 @@ vld1_f32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8460,7 +8460,7 @@ vld1q_f32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8479,7 +8479,7 @@ vld1_f32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8498,7 +8498,7 @@ vld1q_f32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8517,7 +8517,7 @@ vld1_f32_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8767,7 +8767,7 @@ vld2_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_s8(transmute(a))) } @@ -8780,7 +8780,7 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_s16(transmute(a))) } @@ -8793,7 +8793,7 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_s32(transmute(a))) } @@ -8806,7 +8806,7 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_s8(transmute(a))) } @@ -8819,7 +8819,7 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_s16(transmute(a))) } @@ -8832,7 +8832,7 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_s32(transmute(a))) } @@ -8845,7 +8845,7 @@ pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_s8(transmute(a))) } @@ -8858,7 +8858,7 @@ pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_s16(transmute(a))) } @@ -8871,7 +8871,7 @@ pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_s8(transmute(a))) } @@ -8884,7 +8884,7 @@ pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_s16(transmute(a))) } @@ -8897,7 +8897,7 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_s64(transmute(a))) } @@ -8910,7 +8910,7 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_s64(transmute(a))) } @@ -9220,7 +9220,7 @@ vld2_dup_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } @@ -9233,7 +9233,7 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } @@ -9246,7 +9246,7 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_dup_s32(transmute(a))) } @@ -9259,7 +9259,7 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } @@ -9272,7 +9272,7 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } @@ -9285,7 +9285,7 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_dup_s32(transmute(a))) } @@ -9298,7 +9298,7 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } @@ -9311,7 +9311,7 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } @@ -9324,7 +9324,7 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } @@ -9337,7 +9337,7 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } @@ -9350,7 +9350,7 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } @@ -9363,7 +9363,7 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } @@ -9628,7 +9628,7 @@ vld2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { static_assert_imm3!(LANE); transmute(vld2_lane_s8::(transmute(a), transmute(b))) @@ -9643,7 +9643,7 @@ pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { static_assert_imm2!(LANE); transmute(vld2_lane_s16::(transmute(a), transmute(b))) @@ -9658,7 +9658,7 @@ pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { static_assert_imm1!(LANE); transmute(vld2_lane_s32::(transmute(a), transmute(b))) @@ -9673,7 +9673,7 @@ pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { static_assert_imm3!(LANE); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) @@ -9688,7 +9688,7 @@ pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { static_assert_imm2!(LANE); transmute(vld2q_lane_s32::(transmute(a), transmute(b))) @@ -9703,7 +9703,7 @@ pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { static_assert_imm3!(LANE); transmute(vld2_lane_s8::(transmute(a), transmute(b))) @@ -9718,7 +9718,7 @@ pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> pol #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { static_assert_imm2!(LANE); transmute(vld2_lane_s16::(transmute(a), transmute(b))) @@ -9733,7 +9733,7 @@ pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { static_assert_imm3!(LANE); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) @@ -10052,7 +10052,7 @@ vld3_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_s8(transmute(a))) } @@ -10065,7 +10065,7 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_s16(transmute(a))) } @@ -10078,7 +10078,7 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_s32(transmute(a))) } @@ -10091,7 +10091,7 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_s8(transmute(a))) } @@ -10104,7 +10104,7 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_s16(transmute(a))) } @@ -10117,7 +10117,7 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_s32(transmute(a))) } @@ -10130,7 +10130,7 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_s8(transmute(a))) } @@ -10143,7 +10143,7 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_s16(transmute(a))) } @@ -10156,7 +10156,7 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_s8(transmute(a))) } @@ -10169,7 +10169,7 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_s16(transmute(a))) } @@ -10182,7 +10182,7 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_s64(transmute(a))) } @@ -10195,7 +10195,7 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_s64(transmute(a))) } @@ -10505,7 +10505,7 @@ vld3_dup_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } @@ -10518,7 +10518,7 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } @@ -10531,7 +10531,7 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_dup_s32(transmute(a))) } @@ -10544,7 +10544,7 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } @@ -10557,7 +10557,7 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } @@ -10570,7 +10570,7 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_dup_s32(transmute(a))) } @@ -10583,7 +10583,7 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } @@ -10596,7 +10596,7 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } @@ -10609,7 +10609,7 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } @@ -10622,7 +10622,7 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } @@ -10635,7 +10635,7 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } @@ -10648,7 +10648,7 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } @@ -10913,7 +10913,7 @@ vld3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { static_assert_imm3!(LANE); transmute(vld3_lane_s8::(transmute(a), transmute(b))) @@ -10928,7 +10928,7 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { static_assert_imm2!(LANE); transmute(vld3_lane_s16::(transmute(a), transmute(b))) @@ -10943,7 +10943,7 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { static_assert_imm1!(LANE); transmute(vld3_lane_s32::(transmute(a), transmute(b))) @@ -10958,7 +10958,7 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { static_assert_imm3!(LANE); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) @@ -10973,7 +10973,7 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { static_assert_imm2!(LANE); transmute(vld3q_lane_s32::(transmute(a), transmute(b))) @@ -10988,7 +10988,7 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { static_assert_imm3!(LANE); transmute(vld3_lane_s8::(transmute(a), transmute(b))) @@ -11003,7 +11003,7 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { static_assert_imm2!(LANE); transmute(vld3_lane_s16::(transmute(a), transmute(b))) @@ -11018,7 +11018,7 @@ pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { static_assert_imm3!(LANE); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) @@ -11337,7 +11337,7 @@ vld4_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_s8(transmute(a))) } @@ -11350,7 +11350,7 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_s16(transmute(a))) } @@ -11363,7 +11363,7 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_s32(transmute(a))) } @@ -11376,7 +11376,7 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_s8(transmute(a))) } @@ -11389,7 +11389,7 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_s16(transmute(a))) } @@ -11402,7 +11402,7 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_s32(transmute(a))) } @@ -11415,7 +11415,7 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_s8(transmute(a))) } @@ -11428,7 +11428,7 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_s16(transmute(a))) } @@ -11441,7 +11441,7 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_s8(transmute(a))) } @@ -11454,7 +11454,7 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_s16(transmute(a))) } @@ -11467,7 +11467,7 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_s64(transmute(a))) } @@ -11480,7 +11480,7 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_s64(transmute(a))) } @@ -11790,7 +11790,7 @@ vld4_dup_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } @@ -11803,7 +11803,7 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } @@ -11816,7 +11816,7 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_dup_s32(transmute(a))) } @@ -11829,7 +11829,7 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } @@ -11842,7 +11842,7 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } @@ -11855,7 +11855,7 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_dup_s32(transmute(a))) } @@ -11868,7 +11868,7 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } @@ -11881,7 +11881,7 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } @@ -11894,7 +11894,7 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } @@ -11907,7 +11907,7 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } @@ -11920,7 +11920,7 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } @@ -11933,7 +11933,7 @@ pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } @@ -12198,7 +12198,7 @@ vld4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { static_assert_imm3!(LANE); transmute(vld4_lane_s8::(transmute(a), transmute(b))) @@ -12213,7 +12213,7 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { static_assert_imm2!(LANE); transmute(vld4_lane_s16::(transmute(a), transmute(b))) @@ -12228,7 +12228,7 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { static_assert_imm1!(LANE); transmute(vld4_lane_s32::(transmute(a), transmute(b))) @@ -12243,7 +12243,7 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { static_assert_imm3!(LANE); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) @@ -12258,7 +12258,7 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { static_assert_imm2!(LANE); transmute(vld4q_lane_s32::(transmute(a), transmute(b))) @@ -12273,7 +12273,7 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { static_assert_imm3!(LANE); transmute(vld4_lane_s8::(transmute(a), transmute(b))) @@ -12288,7 +12288,7 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { static_assert_imm2!(LANE); transmute(vld4_lane_s16::(transmute(a), transmute(b))) @@ -12303,7 +12303,7 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { static_assert_imm3!(LANE); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) @@ -12392,7 +12392,7 @@ vld4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12407,7 +12407,7 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12422,7 +12422,7 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12437,7 +12437,7 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -12452,7 +12452,7 @@ pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { static_assert_imm4!(LANE); *a = simd_extract(b, LANE as u32); @@ -12467,7 +12467,7 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12482,7 +12482,7 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12497,7 +12497,7 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12512,7 +12512,7 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12527,7 +12527,7 @@ pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12542,7 +12542,7 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12557,7 +12557,7 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -12572,7 +12572,7 @@ pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { static_assert_imm4!(LANE); *a = simd_extract(b, LANE as u32); @@ -12587,7 +12587,7 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12602,7 +12602,7 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12617,7 +12617,7 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12632,7 +12632,7 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12647,7 +12647,7 @@ pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12662,7 +12662,7 @@ pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { static_assert_imm4!(LANE); *a = simd_extract(b, LANE as u32); @@ -12677,7 +12677,7 @@ pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12692,7 +12692,7 @@ pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -12707,7 +12707,7 @@ pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12722,7 +12722,7 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12737,7 +12737,7 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -13543,7 +13543,7 @@ vst1q_s64_x4_(b.0, b.1, b.2, b.3, a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } @@ -13556,7 +13556,7 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } @@ -13569,7 +13569,7 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { vst1_s32_x2(transmute(a), transmute(b)) } @@ -13582,7 +13582,7 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } @@ -13595,7 +13595,7 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } @@ -13608,7 +13608,7 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } @@ -13621,7 +13621,7 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { vst1q_s32_x2(transmute(a), transmute(b)) } @@ -13634,7 +13634,7 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } @@ -13647,7 +13647,7 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } @@ -13660,7 +13660,7 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } @@ -13673,7 +13673,7 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { vst1_s32_x3(transmute(a), transmute(b)) } @@ -13686,7 +13686,7 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } @@ -13699,7 +13699,7 @@ pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } @@ -13712,7 +13712,7 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } @@ -13725,7 +13725,7 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { vst1q_s32_x3(transmute(a), transmute(b)) } @@ -13738,7 +13738,7 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } @@ -13751,7 +13751,7 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } @@ -13764,7 +13764,7 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } @@ -13777,7 +13777,7 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { vst1_s32_x4(transmute(a), transmute(b)) } @@ -13790,7 +13790,7 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } @@ -13803,7 +13803,7 @@ pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } @@ -13816,7 +13816,7 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } @@ -13829,7 +13829,7 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { vst1q_s32_x4(transmute(a), transmute(b)) } @@ -13842,7 +13842,7 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } @@ -13855,7 +13855,7 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } @@ -13868,7 +13868,7 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } @@ -13881,7 +13881,7 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } @@ -13894,7 +13894,7 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } @@ -13907,7 +13907,7 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } @@ -13920,7 +13920,7 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } @@ -13933,7 +13933,7 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } @@ -13946,7 +13946,7 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } @@ -13959,7 +13959,7 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } @@ -13972,7 +13972,7 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } @@ -13985,7 +13985,7 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } @@ -13998,7 +13998,7 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } @@ -14011,7 +14011,7 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } @@ -14024,7 +14024,7 @@ pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } @@ -14037,7 +14037,7 @@ pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } @@ -14050,7 +14050,7 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } @@ -14063,7 +14063,7 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } @@ -14076,7 +14076,7 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } @@ -14518,7 +14518,7 @@ vst2_s64_(b.0, b.1, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { transmute(vst2_s8(transmute(a), transmute(b))) } @@ -14531,7 +14531,7 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { transmute(vst2_s16(transmute(a), transmute(b))) } @@ -14544,7 +14544,7 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { transmute(vst2_s32(transmute(a), transmute(b))) } @@ -14557,7 +14557,7 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { transmute(vst2q_s8(transmute(a), transmute(b))) } @@ -14570,7 +14570,7 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { transmute(vst2q_s16(transmute(a), transmute(b))) } @@ -14583,7 +14583,7 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { transmute(vst2q_s32(transmute(a), transmute(b))) } @@ -14596,7 +14596,7 @@ pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { transmute(vst2_s8(transmute(a), transmute(b))) } @@ -14609,7 +14609,7 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { transmute(vst2_s16(transmute(a), transmute(b))) } @@ -14622,7 +14622,7 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { transmute(vst2q_s8(transmute(a), transmute(b))) } @@ -14635,7 +14635,7 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { transmute(vst2q_s16(transmute(a), transmute(b))) } @@ -14648,7 +14648,7 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { transmute(vst2_s64(transmute(a), transmute(b))) } @@ -14661,7 +14661,7 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { transmute(vst2_s64(transmute(a), transmute(b))) } @@ -14926,7 +14926,7 @@ vst2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { static_assert_imm3!(LANE); transmute(vst2_lane_s8::(transmute(a), transmute(b))) @@ -14941,7 +14941,7 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { static_assert_imm2!(LANE); transmute(vst2_lane_s16::(transmute(a), transmute(b))) @@ -14956,7 +14956,7 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { static_assert_imm1!(LANE); transmute(vst2_lane_s32::(transmute(a), transmute(b))) @@ -14971,7 +14971,7 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { static_assert_imm3!(LANE); transmute(vst2q_lane_s16::(transmute(a), transmute(b))) @@ -14986,7 +14986,7 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { static_assert_imm2!(LANE); transmute(vst2q_lane_s32::(transmute(a), transmute(b))) @@ -15001,7 +15001,7 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { static_assert_imm3!(LANE); transmute(vst2_lane_s8::(transmute(a), transmute(b))) @@ -15016,7 +15016,7 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { static_assert_imm2!(LANE); transmute(vst2_lane_s16::(transmute(a), transmute(b))) @@ -15031,7 +15031,7 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { static_assert_imm3!(LANE); transmute(vst2q_lane_s16::(transmute(a), transmute(b))) @@ -15350,7 +15350,7 @@ vst3_s64_(b.0, b.1, b.2, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { transmute(vst3_s8(transmute(a), transmute(b))) } @@ -15363,7 +15363,7 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { transmute(vst3_s16(transmute(a), transmute(b))) } @@ -15376,7 +15376,7 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { transmute(vst3_s32(transmute(a), transmute(b))) } @@ -15389,7 +15389,7 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { transmute(vst3q_s8(transmute(a), transmute(b))) } @@ -15402,7 +15402,7 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { transmute(vst3q_s16(transmute(a), transmute(b))) } @@ -15415,7 +15415,7 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { transmute(vst3q_s32(transmute(a), transmute(b))) } @@ -15428,7 +15428,7 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { transmute(vst3_s8(transmute(a), transmute(b))) } @@ -15441,7 +15441,7 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { transmute(vst3_s16(transmute(a), transmute(b))) } @@ -15454,7 +15454,7 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { transmute(vst3q_s8(transmute(a), transmute(b))) } @@ -15467,7 +15467,7 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { transmute(vst3q_s16(transmute(a), transmute(b))) } @@ -15480,7 +15480,7 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { transmute(vst3_s64(transmute(a), transmute(b))) } @@ -15493,7 +15493,7 @@ pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { transmute(vst3_s64(transmute(a), transmute(b))) } @@ -15758,7 +15758,7 @@ vst3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { static_assert_imm3!(LANE); transmute(vst3_lane_s8::(transmute(a), transmute(b))) @@ -15773,7 +15773,7 @@ pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { static_assert_imm2!(LANE); transmute(vst3_lane_s16::(transmute(a), transmute(b))) @@ -15788,7 +15788,7 @@ pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { static_assert_imm1!(LANE); transmute(vst3_lane_s32::(transmute(a), transmute(b))) @@ -15803,7 +15803,7 @@ pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { static_assert_imm3!(LANE); transmute(vst3q_lane_s16::(transmute(a), transmute(b))) @@ -15818,7 +15818,7 @@ pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { static_assert_imm2!(LANE); transmute(vst3q_lane_s32::(transmute(a), transmute(b))) @@ -15833,7 +15833,7 @@ pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { static_assert_imm3!(LANE); transmute(vst3_lane_s8::(transmute(a), transmute(b))) @@ -15848,7 +15848,7 @@ pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { static_assert_imm2!(LANE); transmute(vst3_lane_s16::(transmute(a), transmute(b))) @@ -15863,7 +15863,7 @@ pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { static_assert_imm3!(LANE); transmute(vst3q_lane_s16::(transmute(a), transmute(b))) @@ -16182,7 +16182,7 @@ vst4_s64_(b.0, b.1, b.2, b.3, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { transmute(vst4_s8(transmute(a), transmute(b))) } @@ -16195,7 +16195,7 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { transmute(vst4_s16(transmute(a), transmute(b))) } @@ -16208,7 +16208,7 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { transmute(vst4_s32(transmute(a), transmute(b))) } @@ -16221,7 +16221,7 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { transmute(vst4q_s8(transmute(a), transmute(b))) } @@ -16234,7 +16234,7 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { transmute(vst4q_s16(transmute(a), transmute(b))) } @@ -16247,7 +16247,7 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { transmute(vst4q_s32(transmute(a), transmute(b))) } @@ -16260,7 +16260,7 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { transmute(vst4_s8(transmute(a), transmute(b))) } @@ -16273,7 +16273,7 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { transmute(vst4_s16(transmute(a), transmute(b))) } @@ -16286,7 +16286,7 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { transmute(vst4q_s8(transmute(a), transmute(b))) } @@ -16299,7 +16299,7 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { transmute(vst4q_s16(transmute(a), transmute(b))) } @@ -16312,7 +16312,7 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { transmute(vst4_s64(transmute(a), transmute(b))) } @@ -16325,7 +16325,7 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { transmute(vst4_s64(transmute(a), transmute(b))) } @@ -16590,7 +16590,7 @@ vst4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { static_assert_imm3!(LANE); transmute(vst4_lane_s8::(transmute(a), transmute(b))) @@ -16605,7 +16605,7 @@ pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { static_assert_imm2!(LANE); transmute(vst4_lane_s16::(transmute(a), transmute(b))) @@ -16620,7 +16620,7 @@ pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { static_assert_imm1!(LANE); transmute(vst4_lane_s32::(transmute(a), transmute(b))) @@ -16635,7 +16635,7 @@ pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { static_assert_imm3!(LANE); transmute(vst4q_lane_s16::(transmute(a), transmute(b))) @@ -16650,7 +16650,7 @@ pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { static_assert_imm2!(LANE); transmute(vst4q_lane_s32::(transmute(a), transmute(b))) @@ -16665,7 +16665,7 @@ pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { static_assert_imm3!(LANE); transmute(vst4_lane_s8::(transmute(a), transmute(b))) @@ -16680,7 +16680,7 @@ pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { static_assert_imm2!(LANE); transmute(vst4_lane_s16::(transmute(a), transmute(b))) @@ -16695,7 +16695,7 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { static_assert_imm3!(LANE); transmute(vst4q_lane_s16::(transmute(a), transmute(b))) @@ -16783,7 +16783,7 @@ vst4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_mul(a, b) } @@ -16796,7 +16796,7 @@ pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_mul(a, b) } @@ -16809,7 +16809,7 @@ pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_mul(a, b) } @@ -16822,7 +16822,7 @@ pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_mul(a, b) } @@ -16835,7 +16835,7 @@ pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_mul(a, b) } @@ -16848,7 +16848,7 @@ pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_mul(a, b) } @@ -16861,7 +16861,7 @@ pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_mul(a, b) } @@ -16874,7 +16874,7 @@ pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_mul(a, b) } @@ -16887,7 +16887,7 @@ pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_mul(a, b) } @@ -16900,7 +16900,7 @@ pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_mul(a, b) } @@ -16913,7 +16913,7 @@ pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_mul(a, b) } @@ -16926,7 +16926,7 @@ pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_mul(a, b) } @@ -16939,7 +16939,7 @@ pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16958,7 +16958,7 @@ vmul_p8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16977,7 +16977,7 @@ vmulq_p8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_mul(a, b) } @@ -16990,7 +16990,7 @@ pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_mul(a, b) } @@ -17003,7 +17003,7 @@ pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { simd_mul(a, vdup_n_s16(b)) } @@ -17016,7 +17016,7 @@ pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { simd_mul(a, vdupq_n_s16(b)) } @@ -17029,7 +17029,7 @@ pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { simd_mul(a, vdup_n_s32(b)) } @@ -17042,7 +17042,7 @@ pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { simd_mul(a, vdupq_n_s32(b)) } @@ -17055,7 +17055,7 @@ pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { simd_mul(a, vdup_n_u16(b)) } @@ -17068,7 +17068,7 @@ pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { simd_mul(a, vdupq_n_u16(b)) } @@ -17081,7 +17081,7 @@ pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { simd_mul(a, vdup_n_u32(b)) } @@ -17094,7 +17094,7 @@ pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { simd_mul(a, vdupq_n_u32(b)) } @@ -17107,7 +17107,7 @@ pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { simd_mul(a, vdup_n_f32(b)) } @@ -17120,7 +17120,7 @@ pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { simd_mul(a, vdupq_n_f32(b)) } @@ -17134,7 +17134,7 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17149,7 +17149,7 @@ pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17164,7 +17164,7 @@ pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17179,7 +17179,7 @@ pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17194,7 +17194,7 @@ pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17209,7 +17209,7 @@ pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17224,7 +17224,7 @@ pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17239,7 +17239,7 @@ pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17254,7 +17254,7 @@ pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17269,7 +17269,7 @@ pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17284,7 +17284,7 @@ pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17299,7 +17299,7 @@ pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17314,7 +17314,7 @@ pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17329,7 +17329,7 @@ pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17344,7 +17344,7 @@ pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17359,7 +17359,7 @@ pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17374,7 +17374,7 @@ pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17389,7 +17389,7 @@ pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17404,7 +17404,7 @@ pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17419,7 +17419,7 @@ pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17433,7 +17433,7 @@ pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) - #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17452,7 +17452,7 @@ vmull_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17471,7 +17471,7 @@ vmull_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17490,7 +17490,7 @@ vmull_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17509,7 +17509,7 @@ vmull_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17528,7 +17528,7 @@ vmull_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17547,7 +17547,7 @@ vmull_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17566,7 +17566,7 @@ vmull_p8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vmull_s16(a, vdup_n_s16(b)) } @@ -17579,7 +17579,7 @@ pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vmull_s32(a, vdup_n_s32(b)) } @@ -17592,7 +17592,7 @@ pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { vmull_u16(a, vdup_n_u16(b)) } @@ -17605,7 +17605,7 @@ pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { vmull_u32(a, vdup_n_u32(b)) } @@ -17619,7 +17619,7 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmull_s16(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17634,7 +17634,7 @@ pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmull_s16(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17649,7 +17649,7 @@ pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmull_s32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17664,7 +17664,7 @@ pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmull_s32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17679,7 +17679,7 @@ pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmull_u16(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17694,7 +17694,7 @@ pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmull_u16(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17709,7 +17709,7 @@ pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmull_u32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17724,7 +17724,7 @@ pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmull_u32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -17738,7 +17738,7 @@ pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17757,7 +17757,7 @@ vfma_f32_(b, c, a) #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17776,7 +17776,7 @@ vfmaq_f32_(b, c, a) #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfma_f32(a, b, vdup_n_f32_vfp4(c)) } @@ -17789,7 +17789,7 @@ pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) } @@ -17802,7 +17802,7 @@ pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { let b: float32x2_t = simd_neg(b); vfma_f32(a, b, c) @@ -17816,7 +17816,7 @@ pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { let b: float32x4_t = simd_neg(b); vfmaq_f32(a, b, c) @@ -17830,7 +17830,7 @@ pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfms_f32(a, b, vdup_n_f32_vfp4(c)) } @@ -17843,7 +17843,7 @@ pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) } @@ -17856,7 +17856,7 @@ pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_sub(a, b) } @@ -17869,7 +17869,7 @@ pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_sub(a, b) } @@ -17882,7 +17882,7 @@ pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_sub(a, b) } @@ -17895,7 +17895,7 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_sub(a, b) } @@ -17908,7 +17908,7 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_sub(a, b) } @@ -17921,7 +17921,7 @@ pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_sub(a, b) } @@ -17934,7 +17934,7 @@ pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_sub(a, b) } @@ -17947,7 +17947,7 @@ pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_sub(a, b) } @@ -17960,7 +17960,7 @@ pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_sub(a, b) } @@ -17973,7 +17973,7 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_sub(a, b) } @@ -17986,7 +17986,7 @@ pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_sub(a, b) } @@ -17999,7 +17999,7 @@ pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_sub(a, b) } @@ -18012,7 +18012,7 @@ pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_sub(a, b) } @@ -18025,7 +18025,7 @@ pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_sub(a, b) } @@ -18038,7 +18038,7 @@ pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_sub(a, b) } @@ -18051,7 +18051,7 @@ pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_sub(a, b) } @@ -18064,7 +18064,7 @@ pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_sub(a, b) } @@ -18077,7 +18077,7 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_sub(a, b) } @@ -18090,7 +18090,7 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_xor(a, b) } @@ -18103,7 +18103,7 @@ pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_xor(a, b) } @@ -18116,7 +18116,7 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_xor(a, b) } @@ -18129,7 +18129,7 @@ pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_xor(a, b) } @@ -18142,7 +18142,7 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { simd_xor(a, b) } @@ -18155,7 +18155,7 @@ pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_xor(a, b) } @@ -18168,7 +18168,7 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { a ^ b } @@ -18181,7 +18181,7 @@ pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18195,7 +18195,7 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { let c: i32x4 = i32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18209,7 +18209,7 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { let c: i64x2 = i64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18223,7 +18223,7 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18237,7 +18237,7 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { let c: u32x4 = u32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18251,7 +18251,7 @@ pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { let c: u64x2 = u64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18265,7 +18265,7 @@ pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { let d: int8x8_t = vsubhn_s16(b, c); simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -18279,7 +18279,7 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { let d: int16x4_t = vsubhn_s32(b, c); simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -18293,7 +18293,7 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { let d: int32x2_t = vsubhn_s64(b, c); simd_shuffle4!(a, d, [0, 1, 2, 3]) @@ -18307,7 +18307,7 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { let d: uint8x8_t = vsubhn_u16(b, c); simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -18321,7 +18321,7 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { let d: uint16x4_t = vsubhn_u32(b, c); simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -18335,7 +18335,7 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { let d: uint32x2_t = vsubhn_u64(b, c); simd_shuffle4!(a, d, [0, 1, 2, 3]) @@ -18349,7 +18349,7 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18368,7 +18368,7 @@ vhsub_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18387,7 +18387,7 @@ vhsubq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18406,7 +18406,7 @@ vhsub_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18425,7 +18425,7 @@ vhsubq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18444,7 +18444,7 @@ vhsub_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18463,7 +18463,7 @@ vhsubq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18482,7 +18482,7 @@ vhsub_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18501,7 +18501,7 @@ vhsubq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18520,7 +18520,7 @@ vhsub_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18539,7 +18539,7 @@ vhsubq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18558,7 +18558,7 @@ vhsub_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18577,7 +18577,7 @@ vhsubq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { simd_sub(a, simd_cast(b)) } @@ -18590,7 +18590,7 @@ pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { simd_sub(a, simd_cast(b)) } @@ -18603,7 +18603,7 @@ pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { simd_sub(a, simd_cast(b)) } @@ -18616,7 +18616,7 @@ pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { simd_sub(a, simd_cast(b)) } @@ -18629,7 +18629,7 @@ pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { simd_sub(a, simd_cast(b)) } @@ -18642,7 +18642,7 @@ pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { simd_sub(a, simd_cast(b)) } @@ -18655,7 +18655,7 @@ pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let c: int16x8_t = simd_cast(a); let d: int16x8_t = simd_cast(b); @@ -18670,7 +18670,7 @@ pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let c: int32x4_t = simd_cast(a); let d: int32x4_t = simd_cast(b); @@ -18685,7 +18685,7 @@ pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let c: int64x2_t = simd_cast(a); let d: int64x2_t = simd_cast(b); @@ -18700,7 +18700,7 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let c: uint16x8_t = simd_cast(a); let d: uint16x8_t = simd_cast(b); @@ -18715,7 +18715,7 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let c: uint32x4_t = simd_cast(a); let d: uint32x4_t = simd_cast(b); @@ -18730,7 +18730,7 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let c: uint64x2_t = simd_cast(a); let d: uint64x2_t = simd_cast(b); @@ -18745,7 +18745,7 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18764,7 +18764,7 @@ vmax_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18783,7 +18783,7 @@ vmaxq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18802,7 +18802,7 @@ vmax_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18821,7 +18821,7 @@ vmaxq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18840,7 +18840,7 @@ vmax_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18859,7 +18859,7 @@ vmaxq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18878,7 +18878,7 @@ vmax_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18897,7 +18897,7 @@ vmaxq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18916,7 +18916,7 @@ vmax_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18935,7 +18935,7 @@ vmaxq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18954,7 +18954,7 @@ vmax_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18973,7 +18973,7 @@ vmaxq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18992,7 +18992,7 @@ vmax_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19011,7 +19011,7 @@ vmaxq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19030,7 +19030,7 @@ vmaxnm_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19049,7 +19049,7 @@ vmaxnmq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19068,7 +19068,7 @@ vmin_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19087,7 +19087,7 @@ vminq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19106,7 +19106,7 @@ vmin_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19125,7 +19125,7 @@ vminq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19144,7 +19144,7 @@ vmin_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19163,7 +19163,7 @@ vminq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19182,7 +19182,7 @@ vmin_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19201,7 +19201,7 @@ vminq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19220,7 +19220,7 @@ vmin_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19239,7 +19239,7 @@ vminq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19258,7 +19258,7 @@ vmin_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19277,7 +19277,7 @@ vminq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19296,7 +19296,7 @@ vmin_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19315,7 +19315,7 @@ vminq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19334,7 +19334,7 @@ vminnm_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19353,7 +19353,7 @@ vminnmq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(faddp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19372,7 +19372,7 @@ vpadd_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19391,7 +19391,7 @@ vqdmull_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19410,7 +19410,7 @@ vqdmull_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vqdmull_s16(a, vdup_n_s16(b)) } @@ -19423,7 +19423,7 @@ pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vqdmull_s32(a, vdup_n_s32(b)) } @@ -19437,7 +19437,7 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { static_assert_imm2!(N); let b: int16x4_t = simd_shuffle4!(b, b, [N as u32, N as u32, N as u32, N as u32]); @@ -19453,7 +19453,7 @@ pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { static_assert_imm1!(N); let b: int32x2_t = simd_shuffle2!(b, b, [N as u32, N as u32]); @@ -19468,7 +19468,7 @@ pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int6 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqaddq_s32(a, vqdmull_s16(b, c)) } @@ -19481,7 +19481,7 @@ pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqaddq_s64(a, vqdmull_s32(b, c)) } @@ -19494,7 +19494,7 @@ pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqaddq_s32(a, vqdmull_n_s16(b, c)) } @@ -19507,7 +19507,7 @@ pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqaddq_s64(a, vqdmull_n_s32(b, c)) } @@ -19521,7 +19521,7 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 2))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(N); vqaddq_s32(a, vqdmull_lane_s16::(b, c)) @@ -19536,7 +19536,7 @@ pub unsafe fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(N); vqaddq_s64(a, vqdmull_lane_s32::(b, c)) @@ -19550,7 +19550,7 @@ pub unsafe fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqsubq_s32(a, vqdmull_s16(b, c)) } @@ -19563,7 +19563,7 @@ pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqsubq_s64(a, vqdmull_s32(b, c)) } @@ -19576,7 +19576,7 @@ pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqsubq_s32(a, vqdmull_n_s16(b, c)) } @@ -19589,7 +19589,7 @@ pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqsubq_s64(a, vqdmull_n_s32(b, c)) } @@ -19603,7 +19603,7 @@ pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 2))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(N); vqsubq_s32(a, vqdmull_lane_s16::(b, c)) @@ -19618,7 +19618,7 @@ pub unsafe fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(N); vqsubq_s64(a, vqdmull_lane_s32::(b, c)) @@ -19632,7 +19632,7 @@ pub unsafe fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19651,7 +19651,7 @@ vqdmulh_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19670,7 +19670,7 @@ vqdmulhq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19689,7 +19689,7 @@ vqdmulh_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19708,7 +19708,7 @@ vqdmulhq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { let b: int16x4_t = vdup_n_s16(b); vqdmulh_s16(a, b) @@ -19722,7 +19722,7 @@ pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { let b: int32x2_t = vdup_n_s32(b); vqdmulh_s32(a, b) @@ -19736,7 +19736,7 @@ pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { let b: int16x8_t = vdupq_n_s16(b); vqdmulhq_s16(a, b) @@ -19750,7 +19750,7 @@ pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { let b: int32x4_t = vdupq_n_s32(b); vqdmulhq_s32(a, b) @@ -19765,7 +19765,7 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32))) @@ -19780,7 +19780,7 @@ pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32))) @@ -19795,7 +19795,7 @@ pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32))) @@ -19810,7 +19810,7 @@ pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32))) @@ -19824,7 +19824,7 @@ pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19843,7 +19843,7 @@ vqmovn_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19862,7 +19862,7 @@ vqmovn_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19881,7 +19881,7 @@ vqmovn_s64_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19900,7 +19900,7 @@ vqmovn_u16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19919,7 +19919,7 @@ vqmovn_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19938,7 +19938,7 @@ vqmovn_u64_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19957,7 +19957,7 @@ vqmovun_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19976,7 +19976,7 @@ vqmovun_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19995,7 +19995,7 @@ vqmovun_s64_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20014,7 +20014,7 @@ vqrdmulh_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20033,7 +20033,7 @@ vqrdmulhq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20052,7 +20052,7 @@ vqrdmulh_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20071,7 +20071,7 @@ vqrdmulhq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { vqrdmulh_s16(a, vdup_n_s16(b)) } @@ -20084,7 +20084,7 @@ pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { vqrdmulhq_s16(a, vdupq_n_s16(b)) } @@ -20097,7 +20097,7 @@ pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { vqrdmulh_s32(a, vdup_n_s32(b)) } @@ -20110,7 +20110,7 @@ pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { vqrdmulhq_s32(a, vdupq_n_s32(b)) } @@ -20124,7 +20124,7 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); let b: int16x4_t = simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20140,7 +20140,7 @@ pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); let b: int16x4_t = simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20156,7 +20156,7 @@ pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); let b: int16x8_t = simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20172,7 +20172,7 @@ pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); let b: int16x8_t = simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20188,7 +20188,7 @@ pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) - #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); let b: int32x2_t = simd_shuffle2!(b, b, [LANE as u32, LANE as u32]); @@ -20204,7 +20204,7 @@ pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); let b: int32x2_t = simd_shuffle2!(b, b, [LANE as u32, LANE as u32]); @@ -20220,7 +20220,7 @@ pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); let b: int32x4_t = simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20236,7 +20236,7 @@ pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); let b: int32x4_t = simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20251,7 +20251,7 @@ pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) - #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20270,7 +20270,7 @@ vqrshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20289,7 +20289,7 @@ vqrshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20308,7 +20308,7 @@ vqrshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20327,7 +20327,7 @@ vqrshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20346,7 +20346,7 @@ vqrshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20365,7 +20365,7 @@ vqrshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20384,7 +20384,7 @@ vqrshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20403,7 +20403,7 @@ vqrshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20422,7 +20422,7 @@ vqrshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20441,7 +20441,7 @@ vqrshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20460,7 +20460,7 @@ vqrshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20479,7 +20479,7 @@ vqrshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20498,7 +20498,7 @@ vqrshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20517,7 +20517,7 @@ vqrshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20536,7 +20536,7 @@ vqrshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20888,7 +20888,7 @@ vqrshrun_n_s64_(a, N) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20907,7 +20907,7 @@ vqshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20926,7 +20926,7 @@ vqshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20945,7 +20945,7 @@ vqshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20964,7 +20964,7 @@ vqshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20983,7 +20983,7 @@ vqshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21002,7 +21002,7 @@ vqshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21021,7 +21021,7 @@ vqshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21040,7 +21040,7 @@ vqshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21059,7 +21059,7 @@ vqshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21078,7 +21078,7 @@ vqshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21097,7 +21097,7 @@ vqshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21116,7 +21116,7 @@ vqshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21135,7 +21135,7 @@ vqshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21154,7 +21154,7 @@ vqshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21173,7 +21173,7 @@ vqshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21193,7 +21193,7 @@ vqshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { static_assert_imm3!(N); vqshl_s8(a, vdup_n_s8(N as _)) @@ -21208,7 +21208,7 @@ pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { static_assert_imm3!(N); vqshlq_s8(a, vdupq_n_s8(N as _)) @@ -21223,7 +21223,7 @@ pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { static_assert_imm4!(N); vqshl_s16(a, vdup_n_s16(N as _)) @@ -21238,7 +21238,7 @@ pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { static_assert_imm4!(N); vqshlq_s16(a, vdupq_n_s16(N as _)) @@ -21253,7 +21253,7 @@ pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { static_assert_imm5!(N); vqshl_s32(a, vdup_n_s32(N as _)) @@ -21268,7 +21268,7 @@ pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { static_assert_imm5!(N); vqshlq_s32(a, vdupq_n_s32(N as _)) @@ -21283,7 +21283,7 @@ pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { static_assert_imm6!(N); vqshl_s64(a, vdup_n_s64(N as _)) @@ -21298,7 +21298,7 @@ pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { static_assert_imm6!(N); vqshlq_s64(a, vdupq_n_s64(N as _)) @@ -21313,7 +21313,7 @@ pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); vqshl_u8(a, vdup_n_s8(N as _)) @@ -21328,7 +21328,7 @@ pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert_imm3!(N); vqshlq_u8(a, vdupq_n_s8(N as _)) @@ -21343,7 +21343,7 @@ pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert_imm4!(N); vqshl_u16(a, vdup_n_s16(N as _)) @@ -21358,7 +21358,7 @@ pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert_imm4!(N); vqshlq_u16(a, vdupq_n_s16(N as _)) @@ -21373,7 +21373,7 @@ pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert_imm5!(N); vqshl_u32(a, vdup_n_s32(N as _)) @@ -21388,7 +21388,7 @@ pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert_imm5!(N); vqshlq_u32(a, vdupq_n_s32(N as _)) @@ -21403,7 +21403,7 @@ pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert_imm6!(N); vqshl_u64(a, vdup_n_s64(N as _)) @@ -21418,7 +21418,7 @@ pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert_imm6!(N); vqshlq_u64(a, vdupq_n_s64(N as _)) @@ -22061,7 +22061,7 @@ vqshrun_n_s64_(a, N) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22080,7 +22080,7 @@ vrsqrte_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22099,7 +22099,7 @@ vrsqrteq_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22118,7 +22118,7 @@ vrsqrte_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22137,7 +22137,7 @@ vrsqrteq_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22156,7 +22156,7 @@ vrsqrts_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22175,7 +22175,7 @@ vrsqrtsq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22194,7 +22194,7 @@ vrecpe_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22213,7 +22213,7 @@ vrecpeq_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22232,7 +22232,7 @@ vrecpe_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22251,7 +22251,7 @@ vrecpeq_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22270,7 +22270,7 @@ vrecps_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22289,7 +22289,7 @@ vrecpsq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { transmute(a) } @@ -22302,7 +22302,7 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { transmute(a) } @@ -22315,7 +22315,7 @@ pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { transmute(a) } @@ -22328,7 +22328,7 @@ pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { transmute(a) } @@ -22341,7 +22341,7 @@ pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { transmute(a) } @@ -22354,7 +22354,7 @@ pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { transmute(a) } @@ -22367,7 +22367,7 @@ pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { transmute(a) } @@ -22380,7 +22380,7 @@ pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { transmute(a) } @@ -22393,7 +22393,7 @@ pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { transmute(a) } @@ -22406,7 +22406,7 @@ pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { transmute(a) } @@ -22419,7 +22419,7 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { transmute(a) } @@ -22432,7 +22432,7 @@ pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { transmute(a) } @@ -22445,7 +22445,7 @@ pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { transmute(a) } @@ -22458,7 +22458,7 @@ pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { transmute(a) } @@ -22471,7 +22471,7 @@ pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { transmute(a) } @@ -22484,7 +22484,7 @@ pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { transmute(a) } @@ -22497,7 +22497,7 @@ pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { transmute(a) } @@ -22510,7 +22510,7 @@ pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { transmute(a) } @@ -22523,7 +22523,7 @@ pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { transmute(a) } @@ -22536,7 +22536,7 @@ pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { transmute(a) } @@ -22549,7 +22549,7 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { transmute(a) } @@ -22562,7 +22562,7 @@ pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { transmute(a) } @@ -22575,7 +22575,7 @@ pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { transmute(a) } @@ -22588,7 +22588,7 @@ pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { transmute(a) } @@ -22601,7 +22601,7 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { transmute(a) } @@ -22614,7 +22614,7 @@ pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { transmute(a) } @@ -22627,7 +22627,7 @@ pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { transmute(a) } @@ -22640,7 +22640,7 @@ pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { transmute(a) } @@ -22653,7 +22653,7 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { transmute(a) } @@ -22666,7 +22666,7 @@ pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { transmute(a) } @@ -22679,7 +22679,7 @@ pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { transmute(a) } @@ -22692,7 +22692,7 @@ pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { transmute(a) } @@ -22705,7 +22705,7 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { transmute(a) } @@ -22718,7 +22718,7 @@ pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { transmute(a) } @@ -22731,7 +22731,7 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { transmute(a) } @@ -22744,7 +22744,7 @@ pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { transmute(a) } @@ -22757,7 +22757,7 @@ pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { transmute(a) } @@ -22770,7 +22770,7 @@ pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { transmute(a) } @@ -22783,7 +22783,7 @@ pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { transmute(a) } @@ -22796,7 +22796,7 @@ pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { transmute(a) } @@ -22809,7 +22809,7 @@ pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { transmute(a) } @@ -22822,7 +22822,7 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { transmute(a) } @@ -22835,7 +22835,7 @@ pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { transmute(a) } @@ -22848,7 +22848,7 @@ pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { transmute(a) } @@ -22861,7 +22861,7 @@ pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { transmute(a) } @@ -22874,7 +22874,7 @@ pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { transmute(a) } @@ -22887,7 +22887,7 @@ pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { transmute(a) } @@ -22900,7 +22900,7 @@ pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { transmute(a) } @@ -22913,7 +22913,7 @@ pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { transmute(a) } @@ -22926,7 +22926,7 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { transmute(a) } @@ -22939,7 +22939,7 @@ pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { transmute(a) } @@ -22952,7 +22952,7 @@ pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { transmute(a) } @@ -22965,7 +22965,7 @@ pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { transmute(a) } @@ -22978,7 +22978,7 @@ pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { transmute(a) } @@ -22991,7 +22991,7 @@ pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { transmute(a) } @@ -23004,7 +23004,7 @@ pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { transmute(a) } @@ -23017,7 +23017,7 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { transmute(a) } @@ -23030,7 +23030,7 @@ pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { transmute(a) } @@ -23043,7 +23043,7 @@ pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { transmute(a) } @@ -23056,7 +23056,7 @@ pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { transmute(a) } @@ -23069,7 +23069,7 @@ pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { transmute(a) } @@ -23082,7 +23082,7 @@ pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { transmute(a) } @@ -23095,7 +23095,7 @@ pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { transmute(a) } @@ -23108,7 +23108,7 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { transmute(a) } @@ -23121,7 +23121,7 @@ pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { transmute(a) } @@ -23134,7 +23134,7 @@ pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { transmute(a) } @@ -23147,7 +23147,7 @@ pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { transmute(a) } @@ -23160,7 +23160,7 @@ pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { transmute(a) } @@ -23173,7 +23173,7 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { transmute(a) } @@ -23186,7 +23186,7 @@ pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { transmute(a) } @@ -23199,7 +23199,7 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { transmute(a) } @@ -23212,7 +23212,7 @@ pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { transmute(a) } @@ -23225,7 +23225,7 @@ pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { transmute(a) } @@ -23238,7 +23238,7 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { transmute(a) } @@ -23251,7 +23251,7 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { transmute(a) } @@ -23264,7 +23264,7 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { transmute(a) } @@ -23277,7 +23277,7 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { transmute(a) } @@ -23290,7 +23290,7 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { transmute(a) } @@ -23303,7 +23303,7 @@ pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { transmute(a) } @@ -23316,7 +23316,7 @@ pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { transmute(a) } @@ -23329,7 +23329,7 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { transmute(a) } @@ -23342,7 +23342,7 @@ pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { transmute(a) } @@ -23355,7 +23355,7 @@ pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { transmute(a) } @@ -23368,7 +23368,7 @@ pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { transmute(a) } @@ -23381,7 +23381,7 @@ pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { transmute(a) } @@ -23394,7 +23394,7 @@ pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { transmute(a) } @@ -23407,7 +23407,7 @@ pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { transmute(a) } @@ -23420,7 +23420,7 @@ pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { transmute(a) } @@ -23433,7 +23433,7 @@ pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { transmute(a) } @@ -23446,7 +23446,7 @@ pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { transmute(a) } @@ -23459,7 +23459,7 @@ pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { transmute(a) } @@ -23472,7 +23472,7 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { transmute(a) } @@ -23485,7 +23485,7 @@ pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { transmute(a) } @@ -23498,7 +23498,7 @@ pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { transmute(a) } @@ -23511,7 +23511,7 @@ pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { transmute(a) } @@ -23524,7 +23524,7 @@ pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { transmute(a) } @@ -23537,7 +23537,7 @@ pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { transmute(a) } @@ -23550,7 +23550,7 @@ pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { transmute(a) } @@ -23563,7 +23563,7 @@ pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { transmute(a) } @@ -23576,7 +23576,7 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { transmute(a) } @@ -23589,7 +23589,7 @@ pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { transmute(a) } @@ -23602,7 +23602,7 @@ pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { transmute(a) } @@ -23615,7 +23615,7 @@ pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { transmute(a) } @@ -23628,7 +23628,7 @@ pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { transmute(a) } @@ -23641,7 +23641,7 @@ pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { transmute(a) } @@ -23654,7 +23654,7 @@ pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { transmute(a) } @@ -23667,7 +23667,7 @@ pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { transmute(a) } @@ -23680,7 +23680,7 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { transmute(a) } @@ -23693,7 +23693,7 @@ pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { transmute(a) } @@ -23706,7 +23706,7 @@ pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { transmute(a) } @@ -23719,7 +23719,7 @@ pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { transmute(a) } @@ -23732,7 +23732,7 @@ pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { transmute(a) } @@ -23745,7 +23745,7 @@ pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { transmute(a) } @@ -23758,7 +23758,7 @@ pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { transmute(a) } @@ -23771,7 +23771,7 @@ pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { transmute(a) } @@ -23784,7 +23784,7 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { transmute(a) } @@ -23797,7 +23797,7 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { transmute(a) } @@ -23810,7 +23810,7 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { transmute(a) } @@ -23823,7 +23823,7 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { transmute(a) } @@ -23836,7 +23836,7 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { transmute(a) } @@ -23849,7 +23849,7 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { transmute(a) } @@ -23862,7 +23862,7 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { transmute(a) } @@ -23875,7 +23875,7 @@ pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { transmute(a) } @@ -23888,7 +23888,7 @@ pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { transmute(a) } @@ -23901,7 +23901,7 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { transmute(a) } @@ -23914,7 +23914,7 @@ pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { transmute(a) } @@ -23927,7 +23927,7 @@ pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { transmute(a) } @@ -23940,7 +23940,7 @@ pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { transmute(a) } @@ -23953,7 +23953,7 @@ pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { transmute(a) } @@ -23966,7 +23966,7 @@ pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { transmute(a) } @@ -23979,7 +23979,7 @@ pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { transmute(a) } @@ -23992,7 +23992,7 @@ pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { transmute(a) } @@ -24005,7 +24005,7 @@ pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { transmute(a) } @@ -24018,7 +24018,7 @@ pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { transmute(a) } @@ -24031,7 +24031,7 @@ pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { transmute(a) } @@ -24044,7 +24044,7 @@ pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { transmute(a) } @@ -24057,7 +24057,7 @@ pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { transmute(a) } @@ -24070,7 +24070,7 @@ pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { transmute(a) } @@ -24083,7 +24083,7 @@ pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { transmute(a) } @@ -24096,7 +24096,7 @@ pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { transmute(a) } @@ -24109,7 +24109,7 @@ pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { transmute(a) } @@ -24122,7 +24122,7 @@ pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { transmute(a) } @@ -24135,7 +24135,7 @@ pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { transmute(a) } @@ -24148,7 +24148,7 @@ pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { transmute(a) } @@ -24161,7 +24161,7 @@ pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { transmute(a) } @@ -24174,7 +24174,7 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { transmute(a) } @@ -24187,7 +24187,7 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { transmute(a) } @@ -24200,7 +24200,7 @@ pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { transmute(a) } @@ -24213,7 +24213,7 @@ pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { transmute(a) } @@ -24226,7 +24226,7 @@ pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { transmute(a) } @@ -24239,7 +24239,7 @@ pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { transmute(a) } @@ -24252,7 +24252,7 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { transmute(a) } @@ -24265,7 +24265,7 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { transmute(a) } @@ -24278,7 +24278,7 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { transmute(a) } @@ -24291,7 +24291,7 @@ pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { transmute(a) } @@ -24304,7 +24304,7 @@ pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { transmute(a) } @@ -24317,7 +24317,7 @@ pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { transmute(a) } @@ -24330,7 +24330,7 @@ pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { transmute(a) } @@ -24343,7 +24343,7 @@ pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { transmute(a) } @@ -24356,7 +24356,7 @@ pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { transmute(a) } @@ -24369,7 +24369,7 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { transmute(a) } @@ -24382,7 +24382,7 @@ pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { transmute(a) } @@ -24395,7 +24395,7 @@ pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { transmute(a) } @@ -24408,7 +24408,7 @@ pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { transmute(a) } @@ -24421,7 +24421,7 @@ pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { transmute(a) } @@ -24434,7 +24434,7 @@ pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { transmute(a) } @@ -24447,7 +24447,7 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { transmute(a) } @@ -24460,7 +24460,7 @@ pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { transmute(a) } @@ -24473,7 +24473,7 @@ pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { transmute(a) } @@ -24486,7 +24486,7 @@ pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { transmute(a) } @@ -24499,7 +24499,7 @@ pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { transmute(a) } @@ -24512,7 +24512,7 @@ pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { transmute(a) } @@ -24525,7 +24525,7 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { transmute(a) } @@ -24538,7 +24538,7 @@ pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { transmute(a) } @@ -24551,7 +24551,7 @@ pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { transmute(a) } @@ -24564,7 +24564,7 @@ pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { transmute(a) } @@ -24577,7 +24577,7 @@ pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { transmute(a) } @@ -24590,7 +24590,7 @@ pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { transmute(a) } @@ -24603,7 +24603,7 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { transmute(a) } @@ -24616,7 +24616,7 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { transmute(a) } @@ -24629,7 +24629,7 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { transmute(a) } @@ -24642,7 +24642,7 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { transmute(a) } @@ -24655,7 +24655,7 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { transmute(a) } @@ -24668,7 +24668,7 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { transmute(a) } @@ -24681,7 +24681,7 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { transmute(a) } @@ -24694,7 +24694,7 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { transmute(a) } @@ -24707,7 +24707,7 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { transmute(a) } @@ -24720,7 +24720,7 @@ pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { transmute(a) } @@ -24733,7 +24733,7 @@ pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { transmute(a) } @@ -24746,7 +24746,7 @@ pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { transmute(a) } @@ -24759,7 +24759,7 @@ pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { transmute(a) } @@ -24772,7 +24772,7 @@ pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { transmute(a) } @@ -24785,7 +24785,7 @@ pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { transmute(a) } @@ -24798,7 +24798,7 @@ pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { transmute(a) } @@ -24811,7 +24811,7 @@ pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { transmute(a) } @@ -24824,7 +24824,7 @@ pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { transmute(a) } @@ -24837,7 +24837,7 @@ pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { transmute(a) } @@ -24850,7 +24850,7 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { transmute(a) } @@ -24863,7 +24863,7 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { transmute(a) } @@ -24876,7 +24876,7 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { transmute(a) } @@ -24889,7 +24889,7 @@ pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { transmute(a) } @@ -24902,7 +24902,7 @@ pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { transmute(a) } @@ -24915,7 +24915,7 @@ pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { transmute(a) } @@ -24928,7 +24928,7 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { transmute(a) } @@ -24941,7 +24941,7 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { transmute(a) } @@ -24954,7 +24954,7 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { transmute(a) } @@ -24967,7 +24967,7 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { transmute(a) } @@ -24980,7 +24980,7 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { transmute(a) } @@ -24993,7 +24993,7 @@ pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { transmute(a) } @@ -25006,7 +25006,7 @@ pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { transmute(a) } @@ -25019,7 +25019,7 @@ pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { transmute(a) } @@ -25032,7 +25032,7 @@ pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { transmute(a) } @@ -25045,7 +25045,7 @@ pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { transmute(a) } @@ -25058,7 +25058,7 @@ pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { transmute(a) } @@ -25071,7 +25071,7 @@ pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { transmute(a) } @@ -25084,7 +25084,7 @@ pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { transmute(a) } @@ -25097,7 +25097,7 @@ pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { transmute(a) } @@ -25110,7 +25110,7 @@ pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { transmute(a) } @@ -25123,7 +25123,7 @@ pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { transmute(a) } @@ -25136,7 +25136,7 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { transmute(a) } @@ -25149,7 +25149,7 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { transmute(a) } @@ -25162,7 +25162,7 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { transmute(a) } @@ -25175,7 +25175,7 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { transmute(a) } @@ -25188,7 +25188,7 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { transmute(a) } @@ -25201,7 +25201,7 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { transmute(a) } @@ -25214,7 +25214,7 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { transmute(a) } @@ -25227,7 +25227,7 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { transmute(a) } @@ -25240,7 +25240,7 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { transmute(a) } @@ -25253,7 +25253,7 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { transmute(a) } @@ -25266,7 +25266,7 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { transmute(a) } @@ -25279,7 +25279,7 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { transmute(a) } @@ -25292,7 +25292,7 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { transmute(a) } @@ -25305,7 +25305,7 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { transmute(a) } @@ -25318,7 +25318,7 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { transmute(a) } @@ -25331,7 +25331,7 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { transmute(a) } @@ -25344,7 +25344,7 @@ pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { transmute(a) } @@ -25357,7 +25357,7 @@ pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { transmute(a) } @@ -25370,7 +25370,7 @@ pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { transmute(a) } @@ -25383,7 +25383,7 @@ pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { transmute(a) } @@ -25396,7 +25396,7 @@ pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { transmute(a) } @@ -25409,7 +25409,7 @@ pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { transmute(a) } @@ -25422,7 +25422,7 @@ pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { transmute(a) } @@ -25435,7 +25435,7 @@ pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { transmute(a) } @@ -25448,7 +25448,7 @@ pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { transmute(a) } @@ -25461,7 +25461,7 @@ pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { transmute(a) } @@ -25474,7 +25474,7 @@ pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { transmute(a) } @@ -25487,7 +25487,7 @@ pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { transmute(a) } @@ -25500,7 +25500,7 @@ pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { transmute(a) } @@ -25513,7 +25513,7 @@ pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { transmute(a) } @@ -25526,7 +25526,7 @@ pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { transmute(a) } @@ -25539,7 +25539,7 @@ pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { transmute(a) } @@ -25552,7 +25552,7 @@ pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { transmute(a) } @@ -25565,7 +25565,7 @@ pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { transmute(a) } @@ -25578,7 +25578,7 @@ pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { transmute(a) } @@ -25591,7 +25591,7 @@ pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { transmute(a) } @@ -25604,7 +25604,7 @@ pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { transmute(a) } @@ -25617,7 +25617,7 @@ pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { transmute(a) } @@ -25630,7 +25630,7 @@ pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { transmute(a) } @@ -25643,7 +25643,7 @@ pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { transmute(a) } @@ -25656,7 +25656,7 @@ pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { transmute(a) } @@ -25669,7 +25669,7 @@ pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { transmute(a) } @@ -25682,7 +25682,7 @@ pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { transmute(a) } @@ -25695,7 +25695,7 @@ pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { transmute(a) } @@ -25708,7 +25708,7 @@ pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { transmute(a) } @@ -25721,7 +25721,7 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { transmute(a) } @@ -25734,7 +25734,7 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { transmute(a) } @@ -25747,7 +25747,7 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { transmute(a) } @@ -25760,7 +25760,7 @@ pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { transmute(a) } @@ -25773,7 +25773,7 @@ pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { transmute(a) } @@ -25786,7 +25786,7 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { transmute(a) } @@ -25799,7 +25799,7 @@ pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { transmute(a) } @@ -25812,7 +25812,7 @@ pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { transmute(a) } @@ -25825,7 +25825,7 @@ pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { transmute(a) } @@ -25838,7 +25838,7 @@ pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { transmute(a) } @@ -25851,7 +25851,7 @@ pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { transmute(a) } @@ -25864,7 +25864,7 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { transmute(a) } @@ -25877,7 +25877,7 @@ pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25896,7 +25896,7 @@ vrshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25915,7 +25915,7 @@ vrshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25934,7 +25934,7 @@ vrshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25953,7 +25953,7 @@ vrshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25972,7 +25972,7 @@ vrshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25991,7 +25991,7 @@ vrshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26010,7 +26010,7 @@ vrshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26029,7 +26029,7 @@ vrshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26048,7 +26048,7 @@ vrshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26067,7 +26067,7 @@ vrshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26086,7 +26086,7 @@ vrshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26105,7 +26105,7 @@ vrshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26124,7 +26124,7 @@ vrshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26143,7 +26143,7 @@ vrshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26162,7 +26162,7 @@ vrshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26182,7 +26182,7 @@ vrshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshl_s8(a, vdup_n_s8((-N) as _)) @@ -26197,7 +26197,7 @@ pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshlq_s8(a, vdupq_n_s8((-N) as _)) @@ -26212,7 +26212,7 @@ pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshl_s16(a, vdup_n_s16((-N) as _)) @@ -26227,7 +26227,7 @@ pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshlq_s16(a, vdupq_n_s16((-N) as _)) @@ -26242,7 +26242,7 @@ pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshl_s32(a, vdup_n_s32((-N) as _)) @@ -26257,7 +26257,7 @@ pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshlq_s32(a, vdupq_n_s32((-N) as _)) @@ -26272,7 +26272,7 @@ pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshl_s64(a, vdup_n_s64((-N) as _)) @@ -26287,7 +26287,7 @@ pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshlq_s64(a, vdupq_n_s64((-N) as _)) @@ -26302,7 +26302,7 @@ pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshl_u8(a, vdup_n_s8((-N) as _)) @@ -26317,7 +26317,7 @@ pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshlq_u8(a, vdupq_n_s8((-N) as _)) @@ -26332,7 +26332,7 @@ pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshl_u16(a, vdup_n_s16((-N) as _)) @@ -26347,7 +26347,7 @@ pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshlq_u16(a, vdupq_n_s16((-N) as _)) @@ -26362,7 +26362,7 @@ pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshl_u32(a, vdup_n_s32((-N) as _)) @@ -26377,7 +26377,7 @@ pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshlq_u32(a, vdupq_n_s32((-N) as _)) @@ -26392,7 +26392,7 @@ pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshl_u64(a, vdup_n_s64((-N) as _)) @@ -26407,7 +26407,7 @@ pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshlq_u64(a, vdupq_n_s64((-N) as _)) @@ -26533,7 +26533,7 @@ vrshrn_n_s64_(a, N) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); transmute(vrshrn_n_s16::(transmute(a))) @@ -26548,7 +26548,7 @@ pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); transmute(vrshrn_n_s32::(transmute(a))) @@ -26563,7 +26563,7 @@ pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); transmute(vrshrn_n_s64::(transmute(a))) @@ -26578,7 +26578,7 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshr_n_s8::(b)) @@ -26593,7 +26593,7 @@ pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshrq_n_s8::(b)) @@ -26608,7 +26608,7 @@ pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshr_n_s16::(b)) @@ -26623,7 +26623,7 @@ pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshrq_n_s16::(b)) @@ -26638,7 +26638,7 @@ pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshr_n_s32::(b)) @@ -26653,7 +26653,7 @@ pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshrq_n_s32::(b)) @@ -26668,7 +26668,7 @@ pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshr_n_s64::(b)) @@ -26683,7 +26683,7 @@ pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshrq_n_s64::(b)) @@ -26698,7 +26698,7 @@ pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshr_n_u8::(b)) @@ -26713,7 +26713,7 @@ pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshrq_n_u8::(b)) @@ -26728,7 +26728,7 @@ pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshr_n_u16::(b)) @@ -26743,7 +26743,7 @@ pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshrq_n_u16::(b)) @@ -26758,7 +26758,7 @@ pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshr_n_u32::(b)) @@ -26773,7 +26773,7 @@ pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshrq_n_u32::(b)) @@ -26788,7 +26788,7 @@ pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshr_n_u64::(b)) @@ -26803,7 +26803,7 @@ pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshrq_n_u64::(b)) @@ -26817,7 +26817,7 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26836,7 +26836,7 @@ vrsubhn_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26855,7 +26855,7 @@ vrsubhn_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26874,7 +26874,7 @@ vrsubhn_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { transmute(vrsubhn_s16(transmute(a), transmute(b))) } @@ -26887,7 +26887,7 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { transmute(vrsubhn_s32(transmute(a), transmute(b))) } @@ -26900,7 +26900,7 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { transmute(vrsubhn_s64(transmute(a), transmute(b))) } @@ -26914,7 +26914,7 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -26929,7 +26929,7 @@ pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -26944,7 +26944,7 @@ pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -26959,7 +26959,7 @@ pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -26974,7 +26974,7 @@ pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -26989,7 +26989,7 @@ pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27004,7 +27004,7 @@ pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27019,7 +27019,7 @@ pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -27034,7 +27034,7 @@ pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -27049,7 +27049,7 @@ pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27064,7 +27064,7 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -27079,7 +27079,7 @@ pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { static_assert_imm4!(LANE); simd_insert(b, LANE as u32, a) @@ -27094,7 +27094,7 @@ pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -27109,7 +27109,7 @@ pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27124,7 +27124,7 @@ pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27139,7 +27139,7 @@ pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { static_assert_imm4!(LANE); simd_insert(b, LANE as u32, a) @@ -27154,7 +27154,7 @@ pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -27169,7 +27169,7 @@ pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27184,7 +27184,7 @@ pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27199,7 +27199,7 @@ pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { static_assert_imm4!(LANE); simd_insert(b, LANE as u32, a) @@ -27214,7 +27214,7 @@ pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -27229,7 +27229,7 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27244,7 +27244,7 @@ pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27259,7 +27259,7 @@ pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27273,7 +27273,7 @@ pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27292,7 +27292,7 @@ vshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27311,7 +27311,7 @@ vshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27330,7 +27330,7 @@ vshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27349,7 +27349,7 @@ vshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27368,7 +27368,7 @@ vshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27387,7 +27387,7 @@ vshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27406,7 +27406,7 @@ vshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27425,7 +27425,7 @@ vshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27444,7 +27444,7 @@ vshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27463,7 +27463,7 @@ vshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27482,7 +27482,7 @@ vshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27501,7 +27501,7 @@ vshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27520,7 +27520,7 @@ vshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27539,7 +27539,7 @@ vshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27558,7 +27558,7 @@ vshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27578,7 +27578,7 @@ vshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { static_assert_imm3!(N); simd_shl(a, vdup_n_s8(N as _)) @@ -27593,7 +27593,7 @@ pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { static_assert_imm3!(N); simd_shl(a, vdupq_n_s8(N as _)) @@ -27608,7 +27608,7 @@ pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { static_assert_imm4!(N); simd_shl(a, vdup_n_s16(N as _)) @@ -27623,7 +27623,7 @@ pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { static_assert_imm4!(N); simd_shl(a, vdupq_n_s16(N as _)) @@ -27638,7 +27638,7 @@ pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { static_assert_imm5!(N); simd_shl(a, vdup_n_s32(N as _)) @@ -27653,7 +27653,7 @@ pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { static_assert_imm5!(N); simd_shl(a, vdupq_n_s32(N as _)) @@ -27668,7 +27668,7 @@ pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); simd_shl(a, vdup_n_u8(N as _)) @@ -27683,7 +27683,7 @@ pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert_imm3!(N); simd_shl(a, vdupq_n_u8(N as _)) @@ -27698,7 +27698,7 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert_imm4!(N); simd_shl(a, vdup_n_u16(N as _)) @@ -27713,7 +27713,7 @@ pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert_imm4!(N); simd_shl(a, vdupq_n_u16(N as _)) @@ -27728,7 +27728,7 @@ pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert_imm5!(N); simd_shl(a, vdup_n_u32(N as _)) @@ -27743,7 +27743,7 @@ pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert_imm5!(N); simd_shl(a, vdupq_n_u32(N as _)) @@ -27758,7 +27758,7 @@ pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { static_assert_imm6!(N); simd_shl(a, vdup_n_s64(N as _)) @@ -27773,7 +27773,7 @@ pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { static_assert_imm6!(N); simd_shl(a, vdupq_n_s64(N as _)) @@ -27788,7 +27788,7 @@ pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert_imm6!(N); simd_shl(a, vdup_n_u64(N as _)) @@ -27803,7 +27803,7 @@ pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert_imm6!(N); simd_shl(a, vdupq_n_u64(N as _)) @@ -27818,7 +27818,7 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_s16(N as _)) @@ -27833,7 +27833,7 @@ pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_s32(N as _)) @@ -27848,7 +27848,7 @@ pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_s64(N as _)) @@ -27863,7 +27863,7 @@ pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_u16(N as _)) @@ -27878,7 +27878,7 @@ pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_u32(N as _)) @@ -27893,7 +27893,7 @@ pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_u64(N as _)) @@ -27908,7 +27908,7 @@ pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; @@ -27924,7 +27924,7 @@ pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; @@ -27940,7 +27940,7 @@ pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; @@ -27956,7 +27956,7 @@ pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; @@ -27972,7 +27972,7 @@ pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; @@ -27988,7 +27988,7 @@ pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; @@ -28004,7 +28004,7 @@ pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; @@ -28020,7 +28020,7 @@ pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; @@ -28036,7 +28036,7 @@ pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdup_n_u8(0); } else { N }; @@ -28052,7 +28052,7 @@ pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdupq_n_u8(0); } else { N }; @@ -28068,7 +28068,7 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdup_n_u16(0); } else { N }; @@ -28084,7 +28084,7 @@ pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdupq_n_u16(0); } else { N }; @@ -28100,7 +28100,7 @@ pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdup_n_u32(0); } else { N }; @@ -28116,7 +28116,7 @@ pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdupq_n_u32(0); } else { N }; @@ -28132,7 +28132,7 @@ pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdup_n_u64(0); } else { N }; @@ -28148,7 +28148,7 @@ pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdupq_n_u64(0); } else { N }; @@ -28164,7 +28164,7 @@ pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_s16(N as _))) @@ -28179,7 +28179,7 @@ pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_s32(N as _))) @@ -28194,7 +28194,7 @@ pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_s64(N as _))) @@ -28209,7 +28209,7 @@ pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_u16(N as _))) @@ -28224,7 +28224,7 @@ pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_u32(N as _))) @@ -28239,7 +28239,7 @@ pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_u64(N as _))) @@ -28254,7 +28254,7 @@ pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshr_n_s8::(b)) @@ -28269,7 +28269,7 @@ pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshrq_n_s8::(b)) @@ -28284,7 +28284,7 @@ pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshr_n_s16::(b)) @@ -28299,7 +28299,7 @@ pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshrq_n_s16::(b)) @@ -28314,7 +28314,7 @@ pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshr_n_s32::(b)) @@ -28329,7 +28329,7 @@ pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshrq_n_s32::(b)) @@ -28344,7 +28344,7 @@ pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshr_n_s64::(b)) @@ -28359,7 +28359,7 @@ pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshrq_n_s64::(b)) @@ -28374,7 +28374,7 @@ pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshr_n_u8::(b)) @@ -28389,7 +28389,7 @@ pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshrq_n_u8::(b)) @@ -28404,7 +28404,7 @@ pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshr_n_u16::(b)) @@ -28419,7 +28419,7 @@ pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshrq_n_u16::(b)) @@ -28434,7 +28434,7 @@ pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshr_n_u32::(b)) @@ -28449,7 +28449,7 @@ pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshrq_n_u32::(b)) @@ -28464,7 +28464,7 @@ pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshr_n_u64::(b)) @@ -28479,7 +28479,7 @@ pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshrq_n_u64::(b)) @@ -28493,7 +28493,7 @@ pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a1: int8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: int8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28508,7 +28508,7 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a1: int16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: int16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28523,7 +28523,7 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a1: int8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: int8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -28538,7 +28538,7 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a1: int16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: int16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28553,7 +28553,7 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a1: int32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: int32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28568,7 +28568,7 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a1: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: uint8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28583,7 +28583,7 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a1: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: uint16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28598,7 +28598,7 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a1: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: uint8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -28613,7 +28613,7 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a1: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: uint16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28628,7 +28628,7 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a1: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: uint32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28643,7 +28643,7 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a1: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: poly8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28658,7 +28658,7 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a1: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: poly16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28673,7 +28673,7 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a1: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: poly8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -28688,7 +28688,7 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a1: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: poly16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28703,7 +28703,7 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a1: int32x2_t = simd_shuffle2!(a, b, [0, 2]); let b1: int32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28718,7 +28718,7 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a1: uint32x2_t = simd_shuffle2!(a, b, [0, 2]); let b1: uint32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28733,7 +28733,7 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a1: float32x2_t = simd_shuffle2!(a, b, [0, 2]); let b1: float32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28748,7 +28748,7 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a1: float32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: float32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28763,7 +28763,7 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a0: int8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: int8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28778,7 +28778,7 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a0: int16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: int16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28793,7 +28793,7 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: uint8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28808,7 +28808,7 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: uint16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28823,7 +28823,7 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: poly8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28838,7 +28838,7 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: poly16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28853,7 +28853,7 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28868,7 +28868,7 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28883,7 +28883,7 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a0: int8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: int8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -28898,7 +28898,7 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a0: int16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: int16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28913,7 +28913,7 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a0: int32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: int32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28928,7 +28928,7 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: uint8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -28943,7 +28943,7 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: uint16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28958,7 +28958,7 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: uint32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28973,7 +28973,7 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: poly8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -28988,7 +28988,7 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: poly16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -29003,7 +29003,7 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -29018,7 +29018,7 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a0: float32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: float32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -29033,7 +29033,7 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a0: int8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: int8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29048,7 +29048,7 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a0: int16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: int16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29063,7 +29063,7 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a0: int8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: int8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -29078,7 +29078,7 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a0: int16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: int16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29093,7 +29093,7 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a0: int32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: int32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29108,7 +29108,7 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: uint8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29123,7 +29123,7 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: uint16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29138,7 +29138,7 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: uint8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -29153,7 +29153,7 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: uint16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29168,7 +29168,7 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: uint32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29183,7 +29183,7 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: poly8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29198,7 +29198,7 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: poly16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29213,7 +29213,7 @@ pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: poly8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -29228,7 +29228,7 @@ pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: poly16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29243,7 +29243,7 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -29258,7 +29258,7 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -29273,7 +29273,7 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -29288,7 +29288,7 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a0: float32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: float32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29303,7 +29303,7 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { let d: uint8x8_t = vabd_u8(b, c); simd_add(a, simd_cast(d)) @@ -29317,7 +29317,7 @@ pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { let d: uint16x4_t = vabd_u16(b, c); simd_add(a, simd_cast(d)) @@ -29331,7 +29331,7 @@ pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { let d: uint32x2_t = vabd_u32(b, c); simd_add(a, simd_cast(d)) @@ -29345,7 +29345,7 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { let d: int8x8_t = vabd_s8(b, c); let e: uint8x8_t = simd_cast(d); @@ -29360,7 +29360,7 @@ pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { let d: int16x4_t = vabd_s16(b, c); let e: uint16x4_t = simd_cast(d); @@ -29375,7 +29375,7 @@ pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { let d: int32x2_t = vabd_s32(b, c); let e: uint32x2_t = simd_cast(d); @@ -29390,7 +29390,7 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29409,7 +29409,7 @@ vqabs_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29428,7 +29428,7 @@ vqabsq_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29447,7 +29447,7 @@ vqabs_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29466,7 +29466,7 @@ vqabsq_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29485,7 +29485,7 @@ vqabs_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs index 0559aea83..31e924b84 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs @@ -18,90 +18,90 @@ pub(crate) type p128 = u128; types! { /// ARM-specific 64-bit wide vector of eight packed `i8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int8x8_t(pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8); /// ARM-specific 64-bit wide vector of eight packed `u8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint8x8_t(pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8); /// ARM-specific 64-bit wide polynomial vector of eight packed `p8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly8x8_t(pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8); /// ARM-specific 64-bit wide vector of four packed `i16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int16x4_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); /// ARM-specific 64-bit wide vector of four packed `u16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint16x4_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); // FIXME: ARM-specific 64-bit wide vector of four packed `f16`. // pub struct float16x4_t(f16, f16, f16, f16); /// ARM-specific 64-bit wide vector of four packed `p16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly16x4_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); /// ARM-specific 64-bit wide vector of two packed `i32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int32x2_t(pub(crate) i32, pub(crate) i32); /// ARM-specific 64-bit wide vector of two packed `u32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint32x2_t(pub(crate) u32, pub(crate) u32); /// ARM-specific 64-bit wide vector of two packed `f32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct float32x2_t(pub(crate) f32, pub(crate) f32); /// ARM-specific 64-bit wide vector of one packed `i64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int64x1_t(pub(crate) i64); /// ARM-specific 64-bit wide vector of one packed `u64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint64x1_t(pub(crate) u64); /// ARM-specific 64-bit wide vector of one packed `p64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly64x1_t(pub(crate) p64); /// ARM-specific 128-bit wide vector of sixteen packed `i8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int8x16_t( pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, ); /// ARM-specific 128-bit wide vector of sixteen packed `u8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint8x16_t( pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, ); /// ARM-specific 128-bit wide vector of sixteen packed `p8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly8x16_t( pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, ); /// ARM-specific 128-bit wide vector of eight packed `i16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int16x8_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); /// ARM-specific 128-bit wide vector of eight packed `u16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint16x8_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); // FIXME: ARM-specific 128-bit wide vector of eight packed `f16`. // pub struct float16x8_t(f16, f16, f16, f16, f16, f16, f16); /// ARM-specific 128-bit wide vector of eight packed `p16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly16x8_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); /// ARM-specific 128-bit wide vector of four packed `i32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int32x4_t(pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32); /// ARM-specific 128-bit wide vector of four packed `u32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint32x4_t(pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32); /// ARM-specific 128-bit wide vector of four packed `f32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct float32x4_t(pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32); /// ARM-specific 128-bit wide vector of two packed `i64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int64x2_t(pub(crate) i64, pub(crate) i64); /// ARM-specific 128-bit wide vector of two packed `u64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint64x2_t(pub(crate) u64, pub(crate) u64); /// ARM-specific 128-bit wide vector of two packed `p64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly64x2_t(pub(crate) p64, pub(crate) p64); } @@ -6915,6 +6915,177 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4 vusmmlaq_s32_(a, b, c) } +/* FIXME: 16-bit float +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { + simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} +*/ + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t { + simd_shuffle4!(low, high, [0, 1, 2, 3]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { + simd_shuffle16!( + low, + high, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { + simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { + simd_shuffle16!( + low, + high, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { + simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { + simd_shuffle4!(low, high, [0, 1, 2, 3]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { + simd_shuffle2!(low, high, [0, 1]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { + simd_shuffle16!( + low, + high, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { + simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { + simd_shuffle4!(low, high, [0, 1, 2, 3]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { + simd_shuffle2!(low, high, [0, 1]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t { + simd_shuffle2!(low, high, [0, 1]) +} + #[cfg(test)] mod tests { use super::*; @@ -12488,6 +12659,44 @@ mod tests { let r: i32x4 = transmute(vusmmlaq_s32(transmute(a), transmute(b), transmute(c))); assert_eq!(r, e); } + + macro_rules! test_vcombine { + ($test_id:ident => $fn_id:ident ([$($a:expr),*], [$($b:expr),*])) => { + #[allow(unused_assignments)] + #[simd_test(enable = "neon")] + unsafe fn $test_id() { + let a = [$($a),*]; + let b = [$($b),*]; + let e = [$($a),* $(, $b)*]; + let c = $fn_id(transmute(a), transmute(b)); + let mut d = e; + d = transmute(c); + assert_eq!(d, e); + } + } + } + + test_vcombine!(test_vcombine_s8 => vcombine_s8([3_i8, -4, 5, -6, 7, 8, 9, 10], [13_i8, -14, 15, -16, 17, 18, 19, 110])); + test_vcombine!(test_vcombine_u8 => vcombine_u8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); + test_vcombine!(test_vcombine_p8 => vcombine_p8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); + + test_vcombine!(test_vcombine_s16 => vcombine_s16([3_i16, -4, 5, -6], [13_i16, -14, 15, -16])); + test_vcombine!(test_vcombine_u16 => vcombine_u16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); + test_vcombine!(test_vcombine_p16 => vcombine_p16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); + // FIXME: 16-bit floats + // test_vcombine!(test_vcombine_f16 => vcombine_f16([3_f16, 4., 5., 6.], + // [13_f16, 14., 15., 16.])); + + test_vcombine!(test_vcombine_s32 => vcombine_s32([3_i32, -4], [13_i32, -14])); + test_vcombine!(test_vcombine_u32 => vcombine_u32([3_u32, 4], [13_u32, 14])); + // note: poly32x4 does not exist, and neither does vcombine_p32 + test_vcombine!(test_vcombine_f32 => vcombine_f32([3_f32, -4.], [13_f32, -14.])); + + test_vcombine!(test_vcombine_s64 => vcombine_s64([-3_i64], [13_i64])); + test_vcombine!(test_vcombine_u64 => vcombine_u64([3_u64], [13_u64])); + test_vcombine!(test_vcombine_p64 => vcombine_p64([3_u64], [13_u64])); + #[cfg(target_arch = "aarch64")] + test_vcombine!(test_vcombine_f64 => vcombine_f64([-3_f64], [13_f64])); } #[cfg(all(test, target_arch = "arm", target_endian = "little"))] diff --git a/library/stdarch/crates/core_arch/src/mips/msa.rs b/library/stdarch/crates/core_arch/src/mips/msa.rs index 85ed30d18..cded73a54 100644 --- a/library/stdarch/crates/core_arch/src/mips/msa.rs +++ b/library/stdarch/crates/core_arch/src/mips/msa.rs @@ -2208,7 +2208,7 @@ pub unsafe fn __msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { /// Immediate Bit Move If Not Zero /// /// Copy to destination vector `a` (sixteen unsigned 8-bit integer numbers) all bits from source vector -/// `b` (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from from immediate `imm8` +/// `b` (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from immediate `imm8` /// are 1 and leaves unchanged all destination bits for which the corresponding target bits are 0. /// #[inline] @@ -2237,7 +2237,7 @@ pub unsafe fn __msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { /// Immediate Bit Move If Zero /// /// Copy to destination vector `a` (sixteen unsigned 8-bit integer numbers) all bits from source vector -/// `b` (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from from immediate `imm8` +/// `b` (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from immediate `imm8` /// are 0 and leaves unchanged all destination bits for which the corresponding immediate bits are 1. /// #[inline] diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs index 2f7af22cb..231d89e33 100644 --- a/library/stdarch/crates/core_arch/src/mod.rs +++ b/library/stdarch/crates/core_arch/src/mod.rs @@ -155,7 +155,7 @@ pub mod arch { /// which support SIMD, or it will not have SIMD at all. For compatibility /// the standard library itself does not use any SIMD internally. /// Determining how best to ship your WebAssembly binary with SIMD is - /// largely left up to you as it can can be pretty nuanced depending on + /// largely left up to you as it can be pretty nuanced depending on /// your situation. /// /// [condsections]: https://github.com/webassembly/conditional-sections diff --git a/library/stdarch/crates/core_arch/src/x86/avx.rs b/library/stdarch/crates/core_arch/src/x86/avx.rs index ad9e68db6..f8e83a35b 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx.rs @@ -2450,7 +2450,7 @@ pub unsafe fn _mm256_set1_epi8(a: i8) -> __m256i { ) } -/// Broadcasts 16-bit integer `a` to all all elements of returned vector. +/// Broadcasts 16-bit integer `a` to all elements of returned vector. /// This intrinsic may generate the `vpbroadcastw`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi16) diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs index 16add3dbb..8638b3136 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx2.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs @@ -1857,7 +1857,9 @@ pub unsafe fn _mm256_maskstore_epi64(mem_addr: *mut i64, mask: __m256i, a: __m25 #[cfg_attr(test, assert_instr(vpmaxsw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epi16(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxsw(a.as_i16x16(), b.as_i16x16())) + let a = a.as_i16x16(); + let b = b.as_i16x16(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed 32-bit integers in `a` and `b`, and returns the packed @@ -1869,7 +1871,9 @@ pub unsafe fn _mm256_max_epi16(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxsd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epi32(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxsd(a.as_i32x8(), b.as_i32x8())) + let a = a.as_i32x8(); + let b = b.as_i32x8(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed 8-bit integers in `a` and `b`, and returns the packed @@ -1881,7 +1885,9 @@ pub unsafe fn _mm256_max_epi32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxsb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epi8(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxsb(a.as_i8x32(), b.as_i8x32())) + let a = a.as_i8x32(); + let b = b.as_i8x32(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed unsigned 16-bit integers in `a` and `b`, and returns @@ -1893,7 +1899,9 @@ pub unsafe fn _mm256_max_epi8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxuw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epu16(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxuw(a.as_u16x16(), b.as_u16x16())) + let a = a.as_u16x16(); + let b = b.as_u16x16(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed unsigned 32-bit integers in `a` and `b`, and returns @@ -1905,7 +1913,9 @@ pub unsafe fn _mm256_max_epu16(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxud))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epu32(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxud(a.as_u32x8(), b.as_u32x8())) + let a = a.as_u32x8(); + let b = b.as_u32x8(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed unsigned 8-bit integers in `a` and `b`, and returns @@ -1917,7 +1927,9 @@ pub unsafe fn _mm256_max_epu32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxub))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epu8(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxub(a.as_u8x32(), b.as_u8x32())) + let a = a.as_u8x32(); + let b = b.as_u8x32(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed 16-bit integers in `a` and `b`, and returns the packed @@ -1929,7 +1941,9 @@ pub unsafe fn _mm256_max_epu8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminsw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epi16(a: __m256i, b: __m256i) -> __m256i { - transmute(pminsw(a.as_i16x16(), b.as_i16x16())) + let a = a.as_i16x16(); + let b = b.as_i16x16(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Compares packed 32-bit integers in `a` and `b`, and returns the packed @@ -1941,7 +1955,9 @@ pub unsafe fn _mm256_min_epi16(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminsd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epi32(a: __m256i, b: __m256i) -> __m256i { - transmute(pminsd(a.as_i32x8(), b.as_i32x8())) + let a = a.as_i32x8(); + let b = b.as_i32x8(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Compares packed 8-bit integers in `a` and `b`, and returns the packed @@ -1953,7 +1969,9 @@ pub unsafe fn _mm256_min_epi32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminsb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epi8(a: __m256i, b: __m256i) -> __m256i { - transmute(pminsb(a.as_i8x32(), b.as_i8x32())) + let a = a.as_i8x32(); + let b = b.as_i8x32(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Compares packed unsigned 16-bit integers in `a` and `b`, and returns @@ -1965,7 +1983,9 @@ pub unsafe fn _mm256_min_epi8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminuw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epu16(a: __m256i, b: __m256i) -> __m256i { - transmute(pminuw(a.as_u16x16(), b.as_u16x16())) + let a = a.as_u16x16(); + let b = b.as_u16x16(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Compares packed unsigned 32-bit integers in `a` and `b`, and returns @@ -1977,7 +1997,9 @@ pub unsafe fn _mm256_min_epu16(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminud))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epu32(a: __m256i, b: __m256i) -> __m256i { - transmute(pminud(a.as_u32x8(), b.as_u32x8())) + let a = a.as_u32x8(); + let b = b.as_u32x8(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Compares packed unsigned 8-bit integers in `a` and `b`, and returns @@ -1989,7 +2011,9 @@ pub unsafe fn _mm256_min_epu32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminub))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i { - transmute(pminub(a.as_u8x32(), b.as_u8x32())) + let a = a.as_u8x32(); + let b = b.as_u8x32(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Creates mask from the most significant bit of each 8-bit element in `a`, @@ -2001,7 +2025,9 @@ pub unsafe fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmovmskb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_movemask_epi8(a: __m256i) -> i32 { - simd_bitmask::<_, u32>(a.as_i8x32()) as i32 + let z = i8x32::splat(0); + let m: i8x32 = simd_lt(a.as_i8x32(), z); + simd_bitmask::<_, u32>(m) as i32 } /// Computes the sum of absolute differences (SADs) of quadruplets of unsigned @@ -3618,30 +3644,6 @@ extern "C" { fn maskstoreq(mem_addr: *mut i8, mask: i64x2, a: i64x2); #[link_name = "llvm.x86.avx2.maskstore.q.256"] fn maskstoreq256(mem_addr: *mut i8, mask: i64x4, a: i64x4); - #[link_name = "llvm.x86.avx2.pmaxs.w"] - fn pmaxsw(a: i16x16, b: i16x16) -> i16x16; - #[link_name = "llvm.x86.avx2.pmaxs.d"] - fn pmaxsd(a: i32x8, b: i32x8) -> i32x8; - #[link_name = "llvm.x86.avx2.pmaxs.b"] - fn pmaxsb(a: i8x32, b: i8x32) -> i8x32; - #[link_name = "llvm.x86.avx2.pmaxu.w"] - fn pmaxuw(a: u16x16, b: u16x16) -> u16x16; - #[link_name = "llvm.x86.avx2.pmaxu.d"] - fn pmaxud(a: u32x8, b: u32x8) -> u32x8; - #[link_name = "llvm.x86.avx2.pmaxu.b"] - fn pmaxub(a: u8x32, b: u8x32) -> u8x32; - #[link_name = "llvm.x86.avx2.pmins.w"] - fn pminsw(a: i16x16, b: i16x16) -> i16x16; - #[link_name = "llvm.x86.avx2.pmins.d"] - fn pminsd(a: i32x8, b: i32x8) -> i32x8; - #[link_name = "llvm.x86.avx2.pmins.b"] - fn pminsb(a: i8x32, b: i8x32) -> i8x32; - #[link_name = "llvm.x86.avx2.pminu.w"] - fn pminuw(a: u16x16, b: u16x16) -> u16x16; - #[link_name = "llvm.x86.avx2.pminu.d"] - fn pminud(a: u32x8, b: u32x8) -> u32x8; - #[link_name = "llvm.x86.avx2.pminu.b"] - fn pminub(a: u8x32, b: u8x32) -> u8x32; #[link_name = "llvm.x86.avx2.mpsadbw"] fn mpsadbw(a: u8x32, b: u8x32, imm8: i32) -> u16x16; #[link_name = "llvm.x86.avx2.pmulhu.w"] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs b/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs index e9977e018..b21ededab 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs @@ -80,7 +80,7 @@ pub unsafe fn _mm256_cvtne2ps_pbh(a: __m256, b: __m256) -> __m256bh { } /// Convert packed single-precision (32-bit) floating-point elements in two vectors a and b -/// to packed BF16 (16-bit) floating-point elements and and store the results in single vector +/// to packed BF16 (16-bit) floating-point elements and store the results in single vector /// dst using writemask k (elements are copied from src when the corresponding mask bit is not set). /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654&avx512techs=AVX512_BF16&text=_mm256_mask_cvtne2ps_pbh) #[inline] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs index 3c9df3912..1099ee2cb 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs @@ -303,7 +303,7 @@ pub unsafe fn _mm_mask_popcnt_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __ } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_bitshuffle_epi64_mask) @@ -315,7 +315,7 @@ pub unsafe fn _mm512_bitshuffle_epi64_mask(b: __m512i, c: __m512i) -> __mmask64 } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. @@ -330,7 +330,7 @@ pub unsafe fn _mm512_mask_bitshuffle_epi64_mask(k: __mmask64, b: __m512i, c: __m } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_bitshuffle_epi64_mask) @@ -342,7 +342,7 @@ pub unsafe fn _mm256_bitshuffle_epi64_mask(b: __m256i, c: __m256i) -> __mmask32 } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. @@ -357,7 +357,7 @@ pub unsafe fn _mm256_mask_bitshuffle_epi64_mask(k: __mmask32, b: __m256i, c: __m } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_bitshuffle_epi64_mask) @@ -369,7 +369,7 @@ pub unsafe fn _mm_bitshuffle_epi64_mask(b: __m128i, c: __m128i) -> __mmask16 { } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs index 49d78ed60..fbf71dfc4 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs @@ -7450,7 +7450,7 @@ pub unsafe fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i { transmute(simd_select_bitmask(k, r, zero)) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst. +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shufflelo_epi16&expand=5221) #[inline] @@ -7501,7 +7501,7 @@ pub unsafe fn _mm512_shufflelo_epi16(a: __m512i) -> __m512i { transmute(r) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shufflelo_epi16&expand=5219) #[inline] @@ -7518,7 +7518,7 @@ pub unsafe fn _mm512_mask_shufflelo_epi16( transmute(simd_select_bitmask(k, r.as_i16x32(), src.as_i16x32())) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shufflelo_epi16&expand=5220) #[inline] @@ -7532,7 +7532,7 @@ pub unsafe fn _mm512_maskz_shufflelo_epi16(k: __mmask32, a: __m transmute(simd_select_bitmask(k, r.as_i16x32(), zero)) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shufflelo_epi16&expand=5216) #[inline] @@ -7549,7 +7549,7 @@ pub unsafe fn _mm256_mask_shufflelo_epi16( transmute(simd_select_bitmask(k, shuffle.as_i16x16(), src.as_i16x16())) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shufflelo_epi16&expand=5217) #[inline] @@ -7563,7 +7563,7 @@ pub unsafe fn _mm256_maskz_shufflelo_epi16(k: __mmask16, a: __m transmute(simd_select_bitmask(k, shuffle.as_i16x16(), zero)) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shufflelo_epi16&expand=5213) #[inline] @@ -7580,7 +7580,7 @@ pub unsafe fn _mm_mask_shufflelo_epi16( transmute(simd_select_bitmask(k, shuffle.as_i16x8(), src.as_i16x8())) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shufflelo_epi16&expand=5214) #[inline] @@ -7594,7 +7594,7 @@ pub unsafe fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i transmute(simd_select_bitmask(k, shuffle.as_i16x8(), zero)) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst. +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shufflehi_epi16&expand=5212) #[inline] @@ -7645,7 +7645,7 @@ pub unsafe fn _mm512_shufflehi_epi16(a: __m512i) -> __m512i { transmute(r) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shufflehi_epi16&expand=5210) #[inline] @@ -7662,7 +7662,7 @@ pub unsafe fn _mm512_mask_shufflehi_epi16( transmute(simd_select_bitmask(k, r.as_i16x32(), src.as_i16x32())) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shufflehi_epi16&expand=5211) #[inline] @@ -7676,7 +7676,7 @@ pub unsafe fn _mm512_maskz_shufflehi_epi16(k: __mmask32, a: __m transmute(simd_select_bitmask(k, r.as_i16x32(), zero)) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shufflehi_epi16&expand=5207) #[inline] @@ -7693,7 +7693,7 @@ pub unsafe fn _mm256_mask_shufflehi_epi16( transmute(simd_select_bitmask(k, shuffle.as_i16x16(), src.as_i16x16())) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shufflehi_epi16&expand=5208) #[inline] @@ -7707,7 +7707,7 @@ pub unsafe fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m transmute(simd_select_bitmask(k, shuffle.as_i16x16(), zero)) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shufflehi_epi16&expand=5204) #[inline] @@ -7724,7 +7724,7 @@ pub unsafe fn _mm_mask_shufflehi_epi16( transmute(simd_select_bitmask(k, shuffle.as_i16x8(), src.as_i16x8())) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shufflehi_epi16&expand=5205) #[inline] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs index f70a28466..0ddb51283 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs @@ -26268,7 +26268,7 @@ pub unsafe fn _mm512_set1_epi8(a: i8) -> __m512i { transmute(i8x64::splat(a)) } -/// Broadcast the low packed 16-bit integer from a to all all elements of dst. +/// Broadcast the low packed 16-bit integer from a to all elements of dst. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_epi16&expand=4944) #[inline] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512gfni.rs b/library/stdarch/crates/core_arch/src/x86/avx512gfni.rs deleted file mode 100644 index 66fd1c2e1..000000000 --- a/library/stdarch/crates/core_arch/src/x86/avx512gfni.rs +++ /dev/null @@ -1,1492 +0,0 @@ -//! Galois Field New Instructions (GFNI) -//! -//! The intrinsics here correspond to those in the `immintrin.h` C header. -//! -//! The reference is [Intel 64 and IA-32 Architectures Software Developer's -//! Manual Volume 2: Instruction Set Reference, A-Z][intel64_ref]. -//! -//! [intel64_ref]: http://www.intel.de/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf - -use crate::core_arch::simd::i8x16; -use crate::core_arch::simd::i8x32; -use crate::core_arch::simd::i8x64; -use crate::core_arch::simd_llvm::simd_select_bitmask; -use crate::core_arch::x86::__m128i; -use crate::core_arch::x86::__m256i; -use crate::core_arch::x86::__m512i; -use crate::core_arch::x86::__mmask16; -use crate::core_arch::x86::__mmask32; -use crate::core_arch::x86::__mmask64; -use crate::core_arch::x86::_mm256_setzero_si256; -use crate::core_arch::x86::_mm512_setzero_si512; -use crate::core_arch::x86::_mm_setzero_si128; -use crate::core_arch::x86::m128iExt; -use crate::core_arch::x86::m256iExt; -use crate::core_arch::x86::m512iExt; -use crate::mem::transmute; - -#[cfg(test)] -use stdarch_test::assert_instr; - -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.x86.vgf2p8affineinvqb.512"] - fn vgf2p8affineinvqb_512(x: i8x64, a: i8x64, imm8: u8) -> i8x64; - #[link_name = "llvm.x86.vgf2p8affineinvqb.256"] - fn vgf2p8affineinvqb_256(x: i8x32, a: i8x32, imm8: u8) -> i8x32; - #[link_name = "llvm.x86.vgf2p8affineinvqb.128"] - fn vgf2p8affineinvqb_128(x: i8x16, a: i8x16, imm8: u8) -> i8x16; - #[link_name = "llvm.x86.vgf2p8affineqb.512"] - fn vgf2p8affineqb_512(x: i8x64, a: i8x64, imm8: u8) -> i8x64; - #[link_name = "llvm.x86.vgf2p8affineqb.256"] - fn vgf2p8affineqb_256(x: i8x32, a: i8x32, imm8: u8) -> i8x32; - #[link_name = "llvm.x86.vgf2p8affineqb.128"] - fn vgf2p8affineqb_128(x: i8x16, a: i8x16, imm8: u8) -> i8x16; - #[link_name = "llvm.x86.vgf2p8mulb.512"] - fn vgf2p8mulb_512(a: i8x64, b: i8x64) -> i8x64; - #[link_name = "llvm.x86.vgf2p8mulb.256"] - fn vgf2p8mulb_256(a: i8x32, b: i8x32) -> i8x32; - #[link_name = "llvm.x86.vgf2p8mulb.128"] - fn vgf2p8mulb_128(a: i8x16, b: i8x16) -> i8x16; -} - -// LLVM requires AVX512BW for a lot of these instructions, see -// https://github.com/llvm/llvm-project/blob/release/9.x/clang/include/clang/Basic/BuiltinsX86.def#L457 -// however our tests also require the target feature list to match Intel's -// which *doesn't* require AVX512BW but only AVX512F, so we added the redundant AVX512F -// requirement (for now) -// also see -// https://github.com/llvm/llvm-project/blob/release/9.x/clang/lib/Headers/gfniintrin.h -// for forcing GFNI, BW and optionally VL extension - -/// Performs a multiplication in GF(2^8) on the packed bytes. -/// The field is in polynomial representation with the reduction polynomial -/// x^8 + x^4 + x^3 + x + 1. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8mul_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] -pub unsafe fn _mm512_gf2p8mul_epi8(a: __m512i, b: __m512i) -> __m512i { - transmute(vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64())) -} - -/// Performs a multiplication in GF(2^8) on the packed bytes. -/// The field is in polynomial representation with the reduction polynomial -/// x^8 + x^4 + x^3 + x + 1. -/// -/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8mul_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] -pub unsafe fn _mm512_mask_gf2p8mul_epi8( - src: __m512i, - k: __mmask64, - a: __m512i, - b: __m512i, -) -> __m512i { - transmute(simd_select_bitmask( - k, - vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64()), - src.as_i8x64(), - )) -} - -/// Performs a multiplication in GF(2^8) on the packed bytes. -/// The field is in polynomial representation with the reduction polynomial -/// x^8 + x^4 + x^3 + x + 1. -/// -/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8mul_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] -pub unsafe fn _mm512_maskz_gf2p8mul_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { - let zero = _mm512_setzero_si512().as_i8x64(); - transmute(simd_select_bitmask( - k, - vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64()), - zero, - )) -} - -/// Performs a multiplication in GF(2^8) on the packed bytes. -/// The field is in polynomial representation with the reduction polynomial -/// x^8 + x^4 + x^3 + x + 1. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8mul_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] -pub unsafe fn _mm256_gf2p8mul_epi8(a: __m256i, b: __m256i) -> __m256i { - transmute(vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32())) -} - -/// Performs a multiplication in GF(2^8) on the packed bytes. -/// The field is in polynomial representation with the reduction polynomial -/// x^8 + x^4 + x^3 + x + 1. -/// -/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8mul_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] -pub unsafe fn _mm256_mask_gf2p8mul_epi8( - src: __m256i, - k: __mmask32, - a: __m256i, - b: __m256i, -) -> __m256i { - transmute(simd_select_bitmask( - k, - vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32()), - src.as_i8x32(), - )) -} - -/// Performs a multiplication in GF(2^8) on the packed bytes. -/// The field is in polynomial representation with the reduction polynomial -/// x^8 + x^4 + x^3 + x + 1. -/// -/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8mul_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] -pub unsafe fn _mm256_maskz_gf2p8mul_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { - let zero = _mm256_setzero_si256().as_i8x32(); - transmute(simd_select_bitmask( - k, - vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32()), - zero, - )) -} - -/// Performs a multiplication in GF(2^8) on the packed bytes. -/// The field is in polynomial representation with the reduction polynomial -/// x^8 + x^4 + x^3 + x + 1. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8mul_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] -pub unsafe fn _mm_gf2p8mul_epi8(a: __m128i, b: __m128i) -> __m128i { - transmute(vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16())) -} - -/// Performs a multiplication in GF(2^8) on the packed bytes. -/// The field is in polynomial representation with the reduction polynomial -/// x^8 + x^4 + x^3 + x + 1. -/// -/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8mul_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] -pub unsafe fn _mm_mask_gf2p8mul_epi8( - src: __m128i, - k: __mmask16, - a: __m128i, - b: __m128i, -) -> __m128i { - transmute(simd_select_bitmask( - k, - vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16()), - src.as_i8x16(), - )) -} - -/// Performs a multiplication in GF(2^8) on the packed bytes. -/// The field is in polynomial representation with the reduction polynomial -/// x^8 + x^4 + x^3 + x + 1. -/// -/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8mul_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] -pub unsafe fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { - let zero = _mm_setzero_si128().as_i8x16(); - transmute(simd_select_bitmask( - k, - vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16()), - zero, - )) -} - -/// Performs an affine transformation on the packed bytes in x. -/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8affine_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn _mm512_gf2p8affine_epi64_epi8(x: __m512i, a: __m512i) -> __m512i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x64(); - let a = a.as_i8x64(); - let r = vgf2p8affineqb_512(x, a, b); - transmute(r) -} - -/// Performs an affine transformation on the packed bytes in x. -/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8affine_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] -#[rustc_legacy_const_generics(3)] -pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8( - k: __mmask64, - x: __m512i, - a: __m512i, -) -> __m512i { - static_assert_imm8!(B); - let b = B as u8; - let zero = _mm512_setzero_si512().as_i8x64(); - let x = x.as_i8x64(); - let a = a.as_i8x64(); - let r = vgf2p8affineqb_512(x, a, b); - transmute(simd_select_bitmask(k, r, zero)) -} - -/// Performs an affine transformation on the packed bytes in x. -/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8affine_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] -#[rustc_legacy_const_generics(4)] -pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8( - src: __m512i, - k: __mmask64, - x: __m512i, - a: __m512i, -) -> __m512i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x64(); - let a = a.as_i8x64(); - let r = vgf2p8affineqb_512(x, a, b); - transmute(simd_select_bitmask(k, r, src.as_i8x64())) -} - -/// Performs an affine transformation on the packed bytes in x. -/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8affine_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn _mm256_gf2p8affine_epi64_epi8(x: __m256i, a: __m256i) -> __m256i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x32(); - let a = a.as_i8x32(); - let r = vgf2p8affineqb_256(x, a, b); - transmute(r) -} - -/// Performs an affine transformation on the packed bytes in x. -/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8affine_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] -#[rustc_legacy_const_generics(3)] -pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8( - k: __mmask32, - x: __m256i, - a: __m256i, -) -> __m256i { - static_assert_imm8!(B); - let b = B as u8; - let zero = _mm256_setzero_si256().as_i8x32(); - let x = x.as_i8x32(); - let a = a.as_i8x32(); - let r = vgf2p8affineqb_256(x, a, b); - transmute(simd_select_bitmask(k, r, zero)) -} - -/// Performs an affine transformation on the packed bytes in x. -/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8affine_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] -#[rustc_legacy_const_generics(4)] -pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8( - src: __m256i, - k: __mmask32, - x: __m256i, - a: __m256i, -) -> __m256i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x32(); - let a = a.as_i8x32(); - let r = vgf2p8affineqb_256(x, a, b); - transmute(simd_select_bitmask(k, r, src.as_i8x32())) -} - -/// Performs an affine transformation on the packed bytes in x. -/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affine_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn _mm_gf2p8affine_epi64_epi8(x: __m128i, a: __m128i) -> __m128i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x16(); - let a = a.as_i8x16(); - let r = vgf2p8affineqb_128(x, a, b); - transmute(r) -} - -/// Performs an affine transformation on the packed bytes in x. -/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8affine_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] -#[rustc_legacy_const_generics(3)] -pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8( - k: __mmask16, - x: __m128i, - a: __m128i, -) -> __m128i { - static_assert_imm8!(B); - let b = B as u8; - let zero = _mm_setzero_si128().as_i8x16(); - let x = x.as_i8x16(); - let a = a.as_i8x16(); - let r = vgf2p8affineqb_128(x, a, b); - transmute(simd_select_bitmask(k, r, zero)) -} - -/// Performs an affine transformation on the packed bytes in x. -/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8affine_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] -#[rustc_legacy_const_generics(4)] -pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8( - src: __m128i, - k: __mmask16, - x: __m128i, - a: __m128i, -) -> __m128i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x16(); - let a = a.as_i8x16(); - let r = vgf2p8affineqb_128(x, a, b); - transmute(simd_select_bitmask(k, r, src.as_i8x16())) -} - -/// Performs an affine transformation on the inverted packed bytes in x. -/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. -/// The inverse of 0 is 0. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8affineinv_epi64_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8(x: __m512i, a: __m512i) -> __m512i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x64(); - let a = a.as_i8x64(); - let r = vgf2p8affineinvqb_512(x, a, b); - transmute(r) -} - -/// Performs an affine transformation on the inverted packed bytes in x. -/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. -/// The inverse of 0 is 0. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8affineinv_epi64_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] -#[rustc_legacy_const_generics(3)] -pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8( - k: __mmask64, - x: __m512i, - a: __m512i, -) -> __m512i { - static_assert_imm8!(B); - let b = B as u8; - let zero = _mm512_setzero_si512().as_i8x64(); - let x = x.as_i8x64(); - let a = a.as_i8x64(); - let r = vgf2p8affineinvqb_512(x, a, b); - transmute(simd_select_bitmask(k, r, zero)) -} - -/// Performs an affine transformation on the inverted packed bytes in x. -/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. -/// The inverse of 0 is 0. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8affineinv_epi64_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] -#[rustc_legacy_const_generics(4)] -pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8( - src: __m512i, - k: __mmask64, - x: __m512i, - a: __m512i, -) -> __m512i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x64(); - let a = a.as_i8x64(); - let r = vgf2p8affineinvqb_512(x, a, b); - transmute(simd_select_bitmask(k, r, src.as_i8x64())) -} - -/// Performs an affine transformation on the inverted packed bytes in x. -/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. -/// The inverse of 0 is 0. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8affineinv_epi64_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8(x: __m256i, a: __m256i) -> __m256i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x32(); - let a = a.as_i8x32(); - let r = vgf2p8affineinvqb_256(x, a, b); - transmute(r) -} - -/// Performs an affine transformation on the inverted packed bytes in x. -/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. -/// The inverse of 0 is 0. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8affineinv_epi64_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] -#[rustc_legacy_const_generics(3)] -pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8( - k: __mmask32, - x: __m256i, - a: __m256i, -) -> __m256i { - static_assert_imm8!(B); - let b = B as u8; - let zero = _mm256_setzero_si256().as_i8x32(); - let x = x.as_i8x32(); - let a = a.as_i8x32(); - let r = vgf2p8affineinvqb_256(x, a, b); - transmute(simd_select_bitmask(k, r, zero)) -} - -/// Performs an affine transformation on the inverted packed bytes in x. -/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. -/// The inverse of 0 is 0. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8affineinv_epi64_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] -#[rustc_legacy_const_generics(4)] -pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8( - src: __m256i, - k: __mmask32, - x: __m256i, - a: __m256i, -) -> __m256i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x32(); - let a = a.as_i8x32(); - let r = vgf2p8affineinvqb_256(x, a, b); - transmute(simd_select_bitmask(k, r, src.as_i8x32())) -} - -/// Performs an affine transformation on the inverted packed bytes in x. -/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. -/// The inverse of 0 is 0. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affineinv_epi64_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn _mm_gf2p8affineinv_epi64_epi8(x: __m128i, a: __m128i) -> __m128i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x16(); - let a = a.as_i8x16(); - let r = vgf2p8affineinvqb_128(x, a, b); - transmute(r) -} - -/// Performs an affine transformation on the inverted packed bytes in x. -/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. -/// The inverse of 0 is 0. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8affineinv_epi64_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] -#[rustc_legacy_const_generics(3)] -pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8( - k: __mmask16, - x: __m128i, - a: __m128i, -) -> __m128i { - static_assert_imm8!(B); - let b = B as u8; - let zero = _mm_setzero_si128().as_i8x16(); - let x = x.as_i8x16(); - let a = a.as_i8x16(); - let r = vgf2p8affineinvqb_128(x, a, b); - transmute(simd_select_bitmask(k, r, zero)) -} - -/// Performs an affine transformation on the inverted packed bytes in x. -/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix -/// and b being a constant 8-bit immediate value. -/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. -/// The inverse of 0 is 0. -/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. -/// -/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. -/// Otherwise the computation result is written into the result. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8affineinv_epi64_epi8) -#[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] -#[rustc_legacy_const_generics(4)] -pub unsafe fn _mm_mask_gf2p8affineinv_epi64_epi8( - src: __m128i, - k: __mmask16, - x: __m128i, - a: __m128i, -) -> __m128i { - static_assert_imm8!(B); - let b = B as u8; - let x = x.as_i8x16(); - let a = a.as_i8x16(); - let r = vgf2p8affineinvqb_128(x, a, b); - transmute(simd_select_bitmask(k, r, src.as_i8x16())) -} - -#[cfg(test)] -mod tests { - // The constants in the tests below are just bit patterns. They should not - // be interpreted as integers; signedness does not make sense for them, but - // __mXXXi happens to be defined in terms of signed integers. - #![allow(overflowing_literals)] - - use core::hint::black_box; - use core::intrinsics::size_of; - use stdarch_test::simd_test; - - use crate::core_arch::x86::*; - - fn mulbyte(left: u8, right: u8) -> u8 { - // this implementation follows the description in - // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8mul_epi8 - const REDUCTION_POLYNOMIAL: u16 = 0x11b; - let left: u16 = left.into(); - let right: u16 = right.into(); - let mut carryless_product: u16 = 0; - - // Carryless multiplication - for i in 0..8 { - if ((left >> i) & 0x01) != 0 { - carryless_product ^= right << i; - } - } - - // reduction, adding in "0" where appropriate to clear out high bits - // note that REDUCTION_POLYNOMIAL is zero in this context - for i in (8..=14).rev() { - if ((carryless_product >> i) & 0x01) != 0 { - carryless_product ^= REDUCTION_POLYNOMIAL << (i - 8); - } - } - - carryless_product as u8 - } - - const NUM_TEST_WORDS_512: usize = 4; - const NUM_TEST_WORDS_256: usize = NUM_TEST_WORDS_512 * 2; - const NUM_TEST_WORDS_128: usize = NUM_TEST_WORDS_256 * 2; - const NUM_TEST_ENTRIES: usize = NUM_TEST_WORDS_512 * 64; - const NUM_TEST_WORDS_64: usize = NUM_TEST_WORDS_128 * 2; - const NUM_BYTES: usize = 256; - const NUM_BYTES_WORDS_128: usize = NUM_BYTES / 16; - const NUM_BYTES_WORDS_256: usize = NUM_BYTES_WORDS_128 / 2; - const NUM_BYTES_WORDS_512: usize = NUM_BYTES_WORDS_256 / 2; - - fn parity(input: u8) -> u8 { - let mut accumulator = 0; - for i in 0..8 { - accumulator ^= (input >> i) & 0x01; - } - accumulator - } - - fn mat_vec_multiply_affine(matrix: u64, x: u8, b: u8) -> u8 { - // this implementation follows the description in - // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affine_epi64_epi8 - let mut accumulator = 0; - - for bit in 0..8 { - accumulator |= parity(x & matrix.to_le_bytes()[bit]) << (7 - bit); - } - - accumulator ^ b - } - - fn generate_affine_mul_test_data( - immediate: u8, - ) -> ( - [u64; NUM_TEST_WORDS_64], - [u8; NUM_TEST_ENTRIES], - [u8; NUM_TEST_ENTRIES], - ) { - let mut left: [u64; NUM_TEST_WORDS_64] = [0; NUM_TEST_WORDS_64]; - let mut right: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; - let mut result: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; - - for i in 0..NUM_TEST_WORDS_64 { - left[i] = (i as u64) * 103 * 101; - for j in 0..8 { - let j64 = j as u64; - right[i * 8 + j] = ((left[i] + j64) % 256) as u8; - result[i * 8 + j] = mat_vec_multiply_affine(left[i], right[i * 8 + j], immediate); - } - } - - (left, right, result) - } - - fn generate_inv_tests_data() -> ([u8; NUM_BYTES], [u8; NUM_BYTES]) { - let mut input: [u8; NUM_BYTES] = [0; NUM_BYTES]; - let mut result: [u8; NUM_BYTES] = [0; NUM_BYTES]; - - for i in 0..NUM_BYTES { - input[i] = (i % 256) as u8; - result[i] = if i == 0 { 0 } else { 1 }; - } - - (input, result) - } - - const AES_S_BOX: [u8; NUM_BYTES] = [ - 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, - 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, - 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, - 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, - 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, - 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, - 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, - 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, - 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, - 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, - 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, - 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, - 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, - 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, - 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, - 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, - 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, - 0x16, - ]; - - fn generate_byte_mul_test_data() -> ( - [u8; NUM_TEST_ENTRIES], - [u8; NUM_TEST_ENTRIES], - [u8; NUM_TEST_ENTRIES], - ) { - let mut left: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; - let mut right: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; - let mut result: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; - - for i in 0..NUM_TEST_ENTRIES { - left[i] = (i % 256) as u8; - right[i] = left[i].wrapping_mul(101); - result[i] = mulbyte(left[i], right[i]); - } - - (left, right, result) - } - - #[target_feature(enable = "sse2")] - unsafe fn load_m128i_word(data: &[T], word_index: usize) -> __m128i { - let byte_offset = word_index * 16 / size_of::(); - let pointer = data.as_ptr().add(byte_offset) as *const __m128i; - _mm_loadu_si128(black_box(pointer)) - } - - #[target_feature(enable = "avx")] - unsafe fn load_m256i_word(data: &[T], word_index: usize) -> __m256i { - let byte_offset = word_index * 32 / size_of::(); - let pointer = data.as_ptr().add(byte_offset) as *const __m256i; - _mm256_loadu_si256(black_box(pointer)) - } - - #[target_feature(enable = "avx512f")] - unsafe fn load_m512i_word(data: &[T], word_index: usize) -> __m512i { - let byte_offset = word_index * 64 / size_of::(); - let pointer = data.as_ptr().add(byte_offset) as *const i32; - _mm512_loadu_si512(black_box(pointer)) - } - - #[simd_test(enable = "avx512gfni,avx512bw")] - unsafe fn test_mm512_gf2p8mul_epi8() { - let (left, right, expected) = generate_byte_mul_test_data(); - - for i in 0..NUM_TEST_WORDS_512 { - let left = load_m512i_word(&left, i); - let right = load_m512i_word(&right, i); - let expected = load_m512i_word(&expected, i); - let result = _mm512_gf2p8mul_epi8(left, right); - assert_eq_m512i(result, expected); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw")] - unsafe fn test_mm512_maskz_gf2p8mul_epi8() { - let (left, right, _expected) = generate_byte_mul_test_data(); - - for i in 0..NUM_TEST_WORDS_512 { - let left = load_m512i_word(&left, i); - let right = load_m512i_word(&right, i); - let result_zero = _mm512_maskz_gf2p8mul_epi8(0, left, right); - assert_eq_m512i(result_zero, _mm512_setzero_si512()); - let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; - let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; - let expected_result = _mm512_gf2p8mul_epi8(left, right); - let result_masked = _mm512_maskz_gf2p8mul_epi8(mask_bytes, left, right); - let expected_masked = - _mm512_mask_blend_epi32(mask_words, _mm512_setzero_si512(), expected_result); - assert_eq_m512i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw")] - unsafe fn test_mm512_mask_gf2p8mul_epi8() { - let (left, right, _expected) = generate_byte_mul_test_data(); - - for i in 0..NUM_TEST_WORDS_512 { - let left = load_m512i_word(&left, i); - let right = load_m512i_word(&right, i); - let result_left = _mm512_mask_gf2p8mul_epi8(left, 0, left, right); - assert_eq_m512i(result_left, left); - let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; - let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; - let expected_result = _mm512_gf2p8mul_epi8(left, right); - let result_masked = _mm512_mask_gf2p8mul_epi8(left, mask_bytes, left, right); - let expected_masked = _mm512_mask_blend_epi32(mask_words, left, expected_result); - assert_eq_m512i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm256_gf2p8mul_epi8() { - let (left, right, expected) = generate_byte_mul_test_data(); - - for i in 0..NUM_TEST_WORDS_256 { - let left = load_m256i_word(&left, i); - let right = load_m256i_word(&right, i); - let expected = load_m256i_word(&expected, i); - let result = _mm256_gf2p8mul_epi8(left, right); - assert_eq_m256i(result, expected); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_gf2p8mul_epi8() { - let (left, right, _expected) = generate_byte_mul_test_data(); - - for i in 0..NUM_TEST_WORDS_256 { - let left = load_m256i_word(&left, i); - let right = load_m256i_word(&right, i); - let result_zero = _mm256_maskz_gf2p8mul_epi8(0, left, right); - assert_eq_m256i(result_zero, _mm256_setzero_si256()); - let mask_bytes: __mmask32 = 0x0F_F0_FF_00; - const MASK_WORDS: i32 = 0b01_10_11_00; - let expected_result = _mm256_gf2p8mul_epi8(left, right); - let result_masked = _mm256_maskz_gf2p8mul_epi8(mask_bytes, left, right); - let expected_masked = - _mm256_blend_epi32::(_mm256_setzero_si256(), expected_result); - assert_eq_m256i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm256_mask_gf2p8mul_epi8() { - let (left, right, _expected) = generate_byte_mul_test_data(); - - for i in 0..NUM_TEST_WORDS_256 { - let left = load_m256i_word(&left, i); - let right = load_m256i_word(&right, i); - let result_left = _mm256_mask_gf2p8mul_epi8(left, 0, left, right); - assert_eq_m256i(result_left, left); - let mask_bytes: __mmask32 = 0x0F_F0_FF_00; - const MASK_WORDS: i32 = 0b01_10_11_00; - let expected_result = _mm256_gf2p8mul_epi8(left, right); - let result_masked = _mm256_mask_gf2p8mul_epi8(left, mask_bytes, left, right); - let expected_masked = _mm256_blend_epi32::(left, expected_result); - assert_eq_m256i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm_gf2p8mul_epi8() { - let (left, right, expected) = generate_byte_mul_test_data(); - - for i in 0..NUM_TEST_WORDS_128 { - let left = load_m128i_word(&left, i); - let right = load_m128i_word(&right, i); - let expected = load_m128i_word(&expected, i); - let result = _mm_gf2p8mul_epi8(left, right); - assert_eq_m128i(result, expected); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm_maskz_gf2p8mul_epi8() { - let (left, right, _expected) = generate_byte_mul_test_data(); - - for i in 0..NUM_TEST_WORDS_128 { - let left = load_m128i_word(&left, i); - let right = load_m128i_word(&right, i); - let result_zero = _mm_maskz_gf2p8mul_epi8(0, left, right); - assert_eq_m128i(result_zero, _mm_setzero_si128()); - let mask_bytes: __mmask16 = 0x0F_F0; - const MASK_WORDS: i32 = 0b01_10; - let expected_result = _mm_gf2p8mul_epi8(left, right); - let result_masked = _mm_maskz_gf2p8mul_epi8(mask_bytes, left, right); - let expected_masked = - _mm_blend_epi32::(_mm_setzero_si128(), expected_result); - assert_eq_m128i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm_mask_gf2p8mul_epi8() { - let (left, right, _expected) = generate_byte_mul_test_data(); - - for i in 0..NUM_TEST_WORDS_128 { - let left = load_m128i_word(&left, i); - let right = load_m128i_word(&right, i); - let result_left = _mm_mask_gf2p8mul_epi8(left, 0, left, right); - assert_eq_m128i(result_left, left); - let mask_bytes: __mmask16 = 0x0F_F0; - const MASK_WORDS: i32 = 0b01_10; - let expected_result = _mm_gf2p8mul_epi8(left, right); - let result_masked = _mm_mask_gf2p8mul_epi8(left, mask_bytes, left, right); - let expected_masked = _mm_blend_epi32::(left, expected_result); - assert_eq_m128i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw")] - unsafe fn test_mm512_gf2p8affine_epi64_epi8() { - let identity: i64 = 0x01_02_04_08_10_20_40_80; - const IDENTITY_BYTE: i32 = 0; - let constant: i64 = 0; - const CONSTANT_BYTE: i32 = 0x63; - let identity = _mm512_set1_epi64(identity); - let constant = _mm512_set1_epi64(constant); - let constant_reference = _mm512_set1_epi8(CONSTANT_BYTE as i8); - - let (bytes, more_bytes, _) = generate_byte_mul_test_data(); - let (matrices, vectors, references) = generate_affine_mul_test_data(IDENTITY_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_512 { - let data = load_m512i_word(&bytes, i); - let result = _mm512_gf2p8affine_epi64_epi8::(data, identity); - assert_eq_m512i(result, data); - let result = _mm512_gf2p8affine_epi64_epi8::(data, constant); - assert_eq_m512i(result, constant_reference); - let data = load_m512i_word(&more_bytes, i); - let result = _mm512_gf2p8affine_epi64_epi8::(data, identity); - assert_eq_m512i(result, data); - let result = _mm512_gf2p8affine_epi64_epi8::(data, constant); - assert_eq_m512i(result, constant_reference); - - let matrix = load_m512i_word(&matrices, i); - let vector = load_m512i_word(&vectors, i); - let reference = load_m512i_word(&references, i); - - let result = _mm512_gf2p8affine_epi64_epi8::(vector, matrix); - assert_eq_m512i(result, reference); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw")] - unsafe fn test_mm512_maskz_gf2p8affine_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_512 { - let matrix = load_m512i_word(&matrices, i); - let vector = load_m512i_word(&vectors, i); - let result_zero = - _mm512_maskz_gf2p8affine_epi64_epi8::(0, vector, matrix); - assert_eq_m512i(result_zero, _mm512_setzero_si512()); - let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; - let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; - let expected_result = _mm512_gf2p8affine_epi64_epi8::(vector, matrix); - let result_masked = - _mm512_maskz_gf2p8affine_epi64_epi8::(mask_bytes, vector, matrix); - let expected_masked = - _mm512_mask_blend_epi32(mask_words, _mm512_setzero_si512(), expected_result); - assert_eq_m512i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw")] - unsafe fn test_mm512_mask_gf2p8affine_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_512 { - let left = load_m512i_word(&vectors, i); - let right = load_m512i_word(&matrices, i); - let result_left = - _mm512_mask_gf2p8affine_epi64_epi8::(left, 0, left, right); - assert_eq_m512i(result_left, left); - let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; - let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; - let expected_result = _mm512_gf2p8affine_epi64_epi8::(left, right); - let result_masked = - _mm512_mask_gf2p8affine_epi64_epi8::(left, mask_bytes, left, right); - let expected_masked = _mm512_mask_blend_epi32(mask_words, left, expected_result); - assert_eq_m512i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm256_gf2p8affine_epi64_epi8() { - let identity: i64 = 0x01_02_04_08_10_20_40_80; - const IDENTITY_BYTE: i32 = 0; - let constant: i64 = 0; - const CONSTANT_BYTE: i32 = 0x63; - let identity = _mm256_set1_epi64x(identity); - let constant = _mm256_set1_epi64x(constant); - let constant_reference = _mm256_set1_epi8(CONSTANT_BYTE as i8); - - let (bytes, more_bytes, _) = generate_byte_mul_test_data(); - let (matrices, vectors, references) = generate_affine_mul_test_data(IDENTITY_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_256 { - let data = load_m256i_word(&bytes, i); - let result = _mm256_gf2p8affine_epi64_epi8::(data, identity); - assert_eq_m256i(result, data); - let result = _mm256_gf2p8affine_epi64_epi8::(data, constant); - assert_eq_m256i(result, constant_reference); - let data = load_m256i_word(&more_bytes, i); - let result = _mm256_gf2p8affine_epi64_epi8::(data, identity); - assert_eq_m256i(result, data); - let result = _mm256_gf2p8affine_epi64_epi8::(data, constant); - assert_eq_m256i(result, constant_reference); - - let matrix = load_m256i_word(&matrices, i); - let vector = load_m256i_word(&vectors, i); - let reference = load_m256i_word(&references, i); - - let result = _mm256_gf2p8affine_epi64_epi8::(vector, matrix); - assert_eq_m256i(result, reference); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_gf2p8affine_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_256 { - let matrix = load_m256i_word(&matrices, i); - let vector = load_m256i_word(&vectors, i); - let result_zero = - _mm256_maskz_gf2p8affine_epi64_epi8::(0, vector, matrix); - assert_eq_m256i(result_zero, _mm256_setzero_si256()); - let mask_bytes: __mmask32 = 0xFF_0F_F0_00; - const MASK_WORDS: i32 = 0b11_01_10_00; - let expected_result = _mm256_gf2p8affine_epi64_epi8::(vector, matrix); - let result_masked = - _mm256_maskz_gf2p8affine_epi64_epi8::(mask_bytes, vector, matrix); - let expected_masked = - _mm256_blend_epi32::(_mm256_setzero_si256(), expected_result); - assert_eq_m256i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm256_mask_gf2p8affine_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_256 { - let left = load_m256i_word(&vectors, i); - let right = load_m256i_word(&matrices, i); - let result_left = - _mm256_mask_gf2p8affine_epi64_epi8::(left, 0, left, right); - assert_eq_m256i(result_left, left); - let mask_bytes: __mmask32 = 0xFF_0F_F0_00; - const MASK_WORDS: i32 = 0b11_01_10_00; - let expected_result = _mm256_gf2p8affine_epi64_epi8::(left, right); - let result_masked = - _mm256_mask_gf2p8affine_epi64_epi8::(left, mask_bytes, left, right); - let expected_masked = _mm256_blend_epi32::(left, expected_result); - assert_eq_m256i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm_gf2p8affine_epi64_epi8() { - let identity: i64 = 0x01_02_04_08_10_20_40_80; - const IDENTITY_BYTE: i32 = 0; - let constant: i64 = 0; - const CONSTANT_BYTE: i32 = 0x63; - let identity = _mm_set1_epi64x(identity); - let constant = _mm_set1_epi64x(constant); - let constant_reference = _mm_set1_epi8(CONSTANT_BYTE as i8); - - let (bytes, more_bytes, _) = generate_byte_mul_test_data(); - let (matrices, vectors, references) = generate_affine_mul_test_data(IDENTITY_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_128 { - let data = load_m128i_word(&bytes, i); - let result = _mm_gf2p8affine_epi64_epi8::(data, identity); - assert_eq_m128i(result, data); - let result = _mm_gf2p8affine_epi64_epi8::(data, constant); - assert_eq_m128i(result, constant_reference); - let data = load_m128i_word(&more_bytes, i); - let result = _mm_gf2p8affine_epi64_epi8::(data, identity); - assert_eq_m128i(result, data); - let result = _mm_gf2p8affine_epi64_epi8::(data, constant); - assert_eq_m128i(result, constant_reference); - - let matrix = load_m128i_word(&matrices, i); - let vector = load_m128i_word(&vectors, i); - let reference = load_m128i_word(&references, i); - - let result = _mm_gf2p8affine_epi64_epi8::(vector, matrix); - assert_eq_m128i(result, reference); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm_maskz_gf2p8affine_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_128 { - let matrix = load_m128i_word(&matrices, i); - let vector = load_m128i_word(&vectors, i); - let result_zero = _mm_maskz_gf2p8affine_epi64_epi8::(0, vector, matrix); - assert_eq_m128i(result_zero, _mm_setzero_si128()); - let mask_bytes: __mmask16 = 0x0F_F0; - const MASK_WORDS: i32 = 0b01_10; - let expected_result = _mm_gf2p8affine_epi64_epi8::(vector, matrix); - let result_masked = - _mm_maskz_gf2p8affine_epi64_epi8::(mask_bytes, vector, matrix); - let expected_masked = - _mm_blend_epi32::(_mm_setzero_si128(), expected_result); - assert_eq_m128i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm_mask_gf2p8affine_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_128 { - let left = load_m128i_word(&vectors, i); - let right = load_m128i_word(&matrices, i); - let result_left = - _mm_mask_gf2p8affine_epi64_epi8::(left, 0, left, right); - assert_eq_m128i(result_left, left); - let mask_bytes: __mmask16 = 0x0F_F0; - const MASK_WORDS: i32 = 0b01_10; - let expected_result = _mm_gf2p8affine_epi64_epi8::(left, right); - let result_masked = - _mm_mask_gf2p8affine_epi64_epi8::(left, mask_bytes, left, right); - let expected_masked = _mm_blend_epi32::(left, expected_result); - assert_eq_m128i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw")] - unsafe fn test_mm512_gf2p8affineinv_epi64_epi8() { - let identity: i64 = 0x01_02_04_08_10_20_40_80; - const IDENTITY_BYTE: i32 = 0; - const CONSTANT_BYTE: i32 = 0x63; - let identity = _mm512_set1_epi64(identity); - - // validate inversion - let (inputs, results) = generate_inv_tests_data(); - - for i in 0..NUM_BYTES_WORDS_512 { - let input = load_m512i_word(&inputs, i); - let reference = load_m512i_word(&results, i); - let result = _mm512_gf2p8affineinv_epi64_epi8::(input, identity); - let remultiplied = _mm512_gf2p8mul_epi8(result, input); - assert_eq_m512i(remultiplied, reference); - } - - // validate subsequent affine operation - let (matrices, vectors, _affine_expected) = - generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_512 { - let vector = load_m512i_word(&vectors, i); - let matrix = load_m512i_word(&matrices, i); - - let inv_vec = _mm512_gf2p8affineinv_epi64_epi8::(vector, identity); - let reference = _mm512_gf2p8affine_epi64_epi8::(inv_vec, matrix); - let result = _mm512_gf2p8affineinv_epi64_epi8::(vector, matrix); - assert_eq_m512i(result, reference); - } - - // validate everything by virtue of checking against the AES SBox - const AES_S_BOX_MATRIX: i64 = 0xF1_E3_C7_8F_1F_3E_7C_F8; - let sbox_matrix = _mm512_set1_epi64(AES_S_BOX_MATRIX); - - for i in 0..NUM_BYTES_WORDS_512 { - let reference = load_m512i_word(&AES_S_BOX, i); - let input = load_m512i_word(&inputs, i); - let result = _mm512_gf2p8affineinv_epi64_epi8::(input, sbox_matrix); - assert_eq_m512i(result, reference); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw")] - unsafe fn test_mm512_maskz_gf2p8affineinv_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_512 { - let matrix = load_m512i_word(&matrices, i); - let vector = load_m512i_word(&vectors, i); - let result_zero = - _mm512_maskz_gf2p8affineinv_epi64_epi8::(0, vector, matrix); - assert_eq_m512i(result_zero, _mm512_setzero_si512()); - let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; - let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; - let expected_result = _mm512_gf2p8affineinv_epi64_epi8::(vector, matrix); - let result_masked = - _mm512_maskz_gf2p8affineinv_epi64_epi8::(mask_bytes, vector, matrix); - let expected_masked = - _mm512_mask_blend_epi32(mask_words, _mm512_setzero_si512(), expected_result); - assert_eq_m512i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw")] - unsafe fn test_mm512_mask_gf2p8affineinv_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_512 { - let left = load_m512i_word(&vectors, i); - let right = load_m512i_word(&matrices, i); - let result_left = - _mm512_mask_gf2p8affineinv_epi64_epi8::(left, 0, left, right); - assert_eq_m512i(result_left, left); - let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; - let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; - let expected_result = _mm512_gf2p8affineinv_epi64_epi8::(left, right); - let result_masked = _mm512_mask_gf2p8affineinv_epi64_epi8::( - left, mask_bytes, left, right, - ); - let expected_masked = _mm512_mask_blend_epi32(mask_words, left, expected_result); - assert_eq_m512i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm256_gf2p8affineinv_epi64_epi8() { - let identity: i64 = 0x01_02_04_08_10_20_40_80; - const IDENTITY_BYTE: i32 = 0; - const CONSTANT_BYTE: i32 = 0x63; - let identity = _mm256_set1_epi64x(identity); - - // validate inversion - let (inputs, results) = generate_inv_tests_data(); - - for i in 0..NUM_BYTES_WORDS_256 { - let input = load_m256i_word(&inputs, i); - let reference = load_m256i_word(&results, i); - let result = _mm256_gf2p8affineinv_epi64_epi8::(input, identity); - let remultiplied = _mm256_gf2p8mul_epi8(result, input); - assert_eq_m256i(remultiplied, reference); - } - - // validate subsequent affine operation - let (matrices, vectors, _affine_expected) = - generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_256 { - let vector = load_m256i_word(&vectors, i); - let matrix = load_m256i_word(&matrices, i); - - let inv_vec = _mm256_gf2p8affineinv_epi64_epi8::(vector, identity); - let reference = _mm256_gf2p8affine_epi64_epi8::(inv_vec, matrix); - let result = _mm256_gf2p8affineinv_epi64_epi8::(vector, matrix); - assert_eq_m256i(result, reference); - } - - // validate everything by virtue of checking against the AES SBox - const AES_S_BOX_MATRIX: i64 = 0xF1_E3_C7_8F_1F_3E_7C_F8; - let sbox_matrix = _mm256_set1_epi64x(AES_S_BOX_MATRIX); - - for i in 0..NUM_BYTES_WORDS_256 { - let reference = load_m256i_word(&AES_S_BOX, i); - let input = load_m256i_word(&inputs, i); - let result = _mm256_gf2p8affineinv_epi64_epi8::(input, sbox_matrix); - assert_eq_m256i(result, reference); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_gf2p8affineinv_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_256 { - let matrix = load_m256i_word(&matrices, i); - let vector = load_m256i_word(&vectors, i); - let result_zero = - _mm256_maskz_gf2p8affineinv_epi64_epi8::(0, vector, matrix); - assert_eq_m256i(result_zero, _mm256_setzero_si256()); - let mask_bytes: __mmask32 = 0xFF_0F_F0_00; - const MASK_WORDS: i32 = 0b11_01_10_00; - let expected_result = _mm256_gf2p8affineinv_epi64_epi8::(vector, matrix); - let result_masked = - _mm256_maskz_gf2p8affineinv_epi64_epi8::(mask_bytes, vector, matrix); - let expected_masked = - _mm256_blend_epi32::(_mm256_setzero_si256(), expected_result); - assert_eq_m256i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm256_mask_gf2p8affineinv_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_256 { - let left = load_m256i_word(&vectors, i); - let right = load_m256i_word(&matrices, i); - let result_left = - _mm256_mask_gf2p8affineinv_epi64_epi8::(left, 0, left, right); - assert_eq_m256i(result_left, left); - let mask_bytes: __mmask32 = 0xFF_0F_F0_00; - const MASK_WORDS: i32 = 0b11_01_10_00; - let expected_result = _mm256_gf2p8affineinv_epi64_epi8::(left, right); - let result_masked = _mm256_mask_gf2p8affineinv_epi64_epi8::( - left, mask_bytes, left, right, - ); - let expected_masked = _mm256_blend_epi32::(left, expected_result); - assert_eq_m256i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm_gf2p8affineinv_epi64_epi8() { - let identity: i64 = 0x01_02_04_08_10_20_40_80; - const IDENTITY_BYTE: i32 = 0; - const CONSTANT_BYTE: i32 = 0x63; - let identity = _mm_set1_epi64x(identity); - - // validate inversion - let (inputs, results) = generate_inv_tests_data(); - - for i in 0..NUM_BYTES_WORDS_128 { - let input = load_m128i_word(&inputs, i); - let reference = load_m128i_word(&results, i); - let result = _mm_gf2p8affineinv_epi64_epi8::(input, identity); - let remultiplied = _mm_gf2p8mul_epi8(result, input); - assert_eq_m128i(remultiplied, reference); - } - - // validate subsequent affine operation - let (matrices, vectors, _affine_expected) = - generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_128 { - let vector = load_m128i_word(&vectors, i); - let matrix = load_m128i_word(&matrices, i); - - let inv_vec = _mm_gf2p8affineinv_epi64_epi8::(vector, identity); - let reference = _mm_gf2p8affine_epi64_epi8::(inv_vec, matrix); - let result = _mm_gf2p8affineinv_epi64_epi8::(vector, matrix); - assert_eq_m128i(result, reference); - } - - // validate everything by virtue of checking against the AES SBox - const AES_S_BOX_MATRIX: i64 = 0xF1_E3_C7_8F_1F_3E_7C_F8; - let sbox_matrix = _mm_set1_epi64x(AES_S_BOX_MATRIX); - - for i in 0..NUM_BYTES_WORDS_128 { - let reference = load_m128i_word(&AES_S_BOX, i); - let input = load_m128i_word(&inputs, i); - let result = _mm_gf2p8affineinv_epi64_epi8::(input, sbox_matrix); - assert_eq_m128i(result, reference); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm_maskz_gf2p8affineinv_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_128 { - let matrix = load_m128i_word(&matrices, i); - let vector = load_m128i_word(&vectors, i); - let result_zero = - _mm_maskz_gf2p8affineinv_epi64_epi8::(0, vector, matrix); - assert_eq_m128i(result_zero, _mm_setzero_si128()); - let mask_bytes: __mmask16 = 0x0F_F0; - const MASK_WORDS: i32 = 0b01_10; - let expected_result = _mm_gf2p8affineinv_epi64_epi8::(vector, matrix); - let result_masked = - _mm_maskz_gf2p8affineinv_epi64_epi8::(mask_bytes, vector, matrix); - let expected_masked = - _mm_blend_epi32::(_mm_setzero_si128(), expected_result); - assert_eq_m128i(result_masked, expected_masked); - } - } - - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] - unsafe fn test_mm_mask_gf2p8affineinv_epi64_epi8() { - const CONSTANT_BYTE: i32 = 0x63; - let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); - - for i in 0..NUM_TEST_WORDS_128 { - let left = load_m128i_word(&vectors, i); - let right = load_m128i_word(&matrices, i); - let result_left = - _mm_mask_gf2p8affineinv_epi64_epi8::(left, 0, left, right); - assert_eq_m128i(result_left, left); - let mask_bytes: __mmask16 = 0x0F_F0; - const MASK_WORDS: i32 = 0b01_10; - let expected_result = _mm_gf2p8affineinv_epi64_epi8::(left, right); - let result_masked = - _mm_mask_gf2p8affineinv_epi64_epi8::(left, mask_bytes, left, right); - let expected_masked = _mm_blend_epi32::(left, expected_result); - assert_eq_m128i(result_masked, expected_masked); - } - } -} diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vaes.rs b/library/stdarch/crates/core_arch/src/x86/avx512vaes.rs deleted file mode 100644 index 676de312b..000000000 --- a/library/stdarch/crates/core_arch/src/x86/avx512vaes.rs +++ /dev/null @@ -1,332 +0,0 @@ -//! Vectorized AES Instructions (VAES) -//! -//! The intrinsics here correspond to those in the `immintrin.h` C header. -//! -//! The reference is [Intel 64 and IA-32 Architectures Software Developer's -//! Manual Volume 2: Instruction Set Reference, A-Z][intel64_ref]. -//! -//! [intel64_ref]: http://www.intel.de/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf - -use crate::core_arch::x86::__m256i; -use crate::core_arch::x86::__m512i; - -#[cfg(test)] -use stdarch_test::assert_instr; - -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.x86.aesni.aesenc.256"] - fn aesenc_256(a: __m256i, round_key: __m256i) -> __m256i; - #[link_name = "llvm.x86.aesni.aesenclast.256"] - fn aesenclast_256(a: __m256i, round_key: __m256i) -> __m256i; - #[link_name = "llvm.x86.aesni.aesdec.256"] - fn aesdec_256(a: __m256i, round_key: __m256i) -> __m256i; - #[link_name = "llvm.x86.aesni.aesdeclast.256"] - fn aesdeclast_256(a: __m256i, round_key: __m256i) -> __m256i; - #[link_name = "llvm.x86.aesni.aesenc.512"] - fn aesenc_512(a: __m512i, round_key: __m512i) -> __m512i; - #[link_name = "llvm.x86.aesni.aesenclast.512"] - fn aesenclast_512(a: __m512i, round_key: __m512i) -> __m512i; - #[link_name = "llvm.x86.aesni.aesdec.512"] - fn aesdec_512(a: __m512i, round_key: __m512i) -> __m512i; - #[link_name = "llvm.x86.aesni.aesdeclast.512"] - fn aesdeclast_512(a: __m512i, round_key: __m512i) -> __m512i; -} - -/// Performs one round of an AES encryption flow on each 128-bit word (state) in `a` using -/// the corresponding 128-bit word (key) in `round_key`. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesenc_epi128) -#[inline] -#[target_feature(enable = "avx512vaes,avx512vl")] -#[cfg_attr(test, assert_instr(vaesenc))] -pub unsafe fn _mm256_aesenc_epi128(a: __m256i, round_key: __m256i) -> __m256i { - aesenc_256(a, round_key) -} - -/// Performs the last round of an AES encryption flow on each 128-bit word (state) in `a` using -/// the corresponding 128-bit word (key) in `round_key`. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesenclast_epi128) -#[inline] -#[target_feature(enable = "avx512vaes,avx512vl")] -#[cfg_attr(test, assert_instr(vaesenclast))] -pub unsafe fn _mm256_aesenclast_epi128(a: __m256i, round_key: __m256i) -> __m256i { - aesenclast_256(a, round_key) -} - -/// Performs one round of an AES decryption flow on each 128-bit word (state) in `a` using -/// the corresponding 128-bit word (key) in `round_key`. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesdec_epi128) -#[inline] -#[target_feature(enable = "avx512vaes,avx512vl")] -#[cfg_attr(test, assert_instr(vaesdec))] -pub unsafe fn _mm256_aesdec_epi128(a: __m256i, round_key: __m256i) -> __m256i { - aesdec_256(a, round_key) -} - -/// Performs the last round of an AES decryption flow on each 128-bit word (state) in `a` using -/// the corresponding 128-bit word (key) in `round_key`. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesdeclast_epi128) -#[inline] -#[target_feature(enable = "avx512vaes,avx512vl")] -#[cfg_attr(test, assert_instr(vaesdeclast))] -pub unsafe fn _mm256_aesdeclast_epi128(a: __m256i, round_key: __m256i) -> __m256i { - aesdeclast_256(a, round_key) -} - -/// Performs one round of an AES encryption flow on each 128-bit word (state) in `a` using -/// the corresponding 128-bit word (key) in `round_key`. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesenc_epi128) -#[inline] -#[target_feature(enable = "avx512vaes,avx512f")] -#[cfg_attr(test, assert_instr(vaesenc))] -pub unsafe fn _mm512_aesenc_epi128(a: __m512i, round_key: __m512i) -> __m512i { - aesenc_512(a, round_key) -} - -/// Performs the last round of an AES encryption flow on each 128-bit word (state) in `a` using -/// the corresponding 128-bit word (key) in `round_key`. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesenclast_epi128) -#[inline] -#[target_feature(enable = "avx512vaes,avx512f")] -#[cfg_attr(test, assert_instr(vaesenclast))] -pub unsafe fn _mm512_aesenclast_epi128(a: __m512i, round_key: __m512i) -> __m512i { - aesenclast_512(a, round_key) -} - -/// Performs one round of an AES decryption flow on each 128-bit word (state) in `a` using -/// the corresponding 128-bit word (key) in `round_key`. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesdec_epi128) -#[inline] -#[target_feature(enable = "avx512vaes,avx512f")] -#[cfg_attr(test, assert_instr(vaesdec))] -pub unsafe fn _mm512_aesdec_epi128(a: __m512i, round_key: __m512i) -> __m512i { - aesdec_512(a, round_key) -} - -/// Performs the last round of an AES decryption flow on each 128-bit word (state) in `a` using -/// the corresponding 128-bit word (key) in `round_key`. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesdeclast_epi128) -#[inline] -#[target_feature(enable = "avx512vaes,avx512f")] -#[cfg_attr(test, assert_instr(vaesdeclast))] -pub unsafe fn _mm512_aesdeclast_epi128(a: __m512i, round_key: __m512i) -> __m512i { - aesdeclast_512(a, round_key) -} - -#[cfg(test)] -mod tests { - // The constants in the tests below are just bit patterns. They should not - // be interpreted as integers; signedness does not make sense for them, but - // __mXXXi happens to be defined in terms of signed integers. - #![allow(overflowing_literals)] - - use stdarch_test::simd_test; - - use crate::core_arch::x86::*; - - // the first parts of these tests are straight ports from the AES-NI tests - // the second parts directly compare the two, for inputs that are different across lanes - // and "more random" than the standard test vectors - // ideally we'd be using quickcheck here instead - - #[target_feature(enable = "avx2")] - unsafe fn helper_for_256_avx512vaes( - linear: unsafe fn(__m128i, __m128i) -> __m128i, - vectorized: unsafe fn(__m256i, __m256i) -> __m256i, - ) { - let a = _mm256_set_epi64x( - 0xDCB4DB3657BF0B7D, - 0x18DB0601068EDD9F, - 0xB76B908233200DC5, - 0xE478235FA8E22D5E, - ); - let k = _mm256_set_epi64x( - 0x672F6F105A94CEA7, - 0x8298B8FFCA5F829C, - 0xA3927047B3FB61D8, - 0x978093862CDE7187, - ); - let mut a_decomp = [_mm_setzero_si128(); 2]; - a_decomp[0] = _mm256_extracti128_si256::<0>(a); - a_decomp[1] = _mm256_extracti128_si256::<1>(a); - let mut k_decomp = [_mm_setzero_si128(); 2]; - k_decomp[0] = _mm256_extracti128_si256::<0>(k); - k_decomp[1] = _mm256_extracti128_si256::<1>(k); - let r = vectorized(a, k); - let mut e_decomp = [_mm_setzero_si128(); 2]; - for i in 0..2 { - e_decomp[i] = linear(a_decomp[i], k_decomp[i]); - } - assert_eq_m128i(_mm256_extracti128_si256::<0>(r), e_decomp[0]); - assert_eq_m128i(_mm256_extracti128_si256::<1>(r), e_decomp[1]); - } - - #[target_feature(enable = "sse2")] - unsafe fn setup_state_key(broadcast: unsafe fn(__m128i) -> T) -> (T, T) { - // Constants taken from https://msdn.microsoft.com/en-us/library/cc664949.aspx. - let a = _mm_set_epi64x(0x0123456789abcdef, 0x8899aabbccddeeff); - let k = _mm_set_epi64x(0x1133557799bbddff, 0x0022446688aaccee); - (broadcast(a), broadcast(k)) - } - - #[target_feature(enable = "avx2")] - unsafe fn setup_state_key_256() -> (__m256i, __m256i) { - setup_state_key(_mm256_broadcastsi128_si256) - } - - #[target_feature(enable = "avx512f")] - unsafe fn setup_state_key_512() -> (__m512i, __m512i) { - setup_state_key(_mm512_broadcast_i32x4) - } - - #[simd_test(enable = "avx512vaes,avx512vl")] - unsafe fn test_mm256_aesdec_epi128() { - // Constants taken from https://msdn.microsoft.com/en-us/library/cc664949.aspx. - let (a, k) = setup_state_key_256(); - let e = _mm_set_epi64x(0x044e4f5176fec48f, 0xb57ecfa381da39ee); - let e = _mm256_broadcastsi128_si256(e); - let r = _mm256_aesdec_epi128(a, k); - assert_eq_m256i(r, e); - - helper_for_256_avx512vaes(_mm_aesdec_si128, _mm256_aesdec_epi128); - } - - #[simd_test(enable = "avx512vaes,avx512vl")] - unsafe fn test_mm256_aesdeclast_epi128() { - // Constants taken from https://msdn.microsoft.com/en-us/library/cc714178.aspx. - let (a, k) = setup_state_key_256(); - let e = _mm_set_epi64x(0x36cad57d9072bf9e, 0xf210dd981fa4a493); - let e = _mm256_broadcastsi128_si256(e); - let r = _mm256_aesdeclast_epi128(a, k); - assert_eq_m256i(r, e); - - helper_for_256_avx512vaes(_mm_aesdeclast_si128, _mm256_aesdeclast_epi128); - } - - #[simd_test(enable = "avx512vaes,avx512vl")] - unsafe fn test_mm256_aesenc_epi128() { - // Constants taken from https://msdn.microsoft.com/en-us/library/cc664810.aspx. - // they are repeated appropriately - let (a, k) = setup_state_key_256(); - let e = _mm_set_epi64x(0x16ab0e57dfc442ed, 0x28e4ee1884504333); - let e = _mm256_broadcastsi128_si256(e); - let r = _mm256_aesenc_epi128(a, k); - assert_eq_m256i(r, e); - - helper_for_256_avx512vaes(_mm_aesenc_si128, _mm256_aesenc_epi128); - } - - #[simd_test(enable = "avx512vaes,avx512vl")] - unsafe fn test_mm256_aesenclast_epi128() { - // Constants taken from https://msdn.microsoft.com/en-us/library/cc714136.aspx. - let (a, k) = setup_state_key_256(); - let e = _mm_set_epi64x(0xb6dd7df25d7ab320, 0x4b04f98cf4c860f8); - let e = _mm256_broadcastsi128_si256(e); - let r = _mm256_aesenclast_epi128(a, k); - assert_eq_m256i(r, e); - - helper_for_256_avx512vaes(_mm_aesenclast_si128, _mm256_aesenclast_epi128); - } - - #[target_feature(enable = "avx512f")] - unsafe fn helper_for_512_avx512vaes( - linear: unsafe fn(__m128i, __m128i) -> __m128i, - vectorized: unsafe fn(__m512i, __m512i) -> __m512i, - ) { - let a = _mm512_set_epi64( - 0xDCB4DB3657BF0B7D, - 0x18DB0601068EDD9F, - 0xB76B908233200DC5, - 0xE478235FA8E22D5E, - 0xAB05CFFA2621154C, - 0x1171B47A186174C9, - 0x8C6B6C0E7595CEC9, - 0xBE3E7D4934E961BD, - ); - let k = _mm512_set_epi64( - 0x672F6F105A94CEA7, - 0x8298B8FFCA5F829C, - 0xA3927047B3FB61D8, - 0x978093862CDE7187, - 0xB1927AB22F31D0EC, - 0xA9A5DA619BE4D7AF, - 0xCA2590F56884FDC6, - 0x19BE9F660038BDB5, - ); - let mut a_decomp = [_mm_setzero_si128(); 4]; - a_decomp[0] = _mm512_extracti32x4_epi32::<0>(a); - a_decomp[1] = _mm512_extracti32x4_epi32::<1>(a); - a_decomp[2] = _mm512_extracti32x4_epi32::<2>(a); - a_decomp[3] = _mm512_extracti32x4_epi32::<3>(a); - let mut k_decomp = [_mm_setzero_si128(); 4]; - k_decomp[0] = _mm512_extracti32x4_epi32::<0>(k); - k_decomp[1] = _mm512_extracti32x4_epi32::<1>(k); - k_decomp[2] = _mm512_extracti32x4_epi32::<2>(k); - k_decomp[3] = _mm512_extracti32x4_epi32::<3>(k); - let r = vectorized(a, k); - let mut e_decomp = [_mm_setzero_si128(); 4]; - for i in 0..4 { - e_decomp[i] = linear(a_decomp[i], k_decomp[i]); - } - assert_eq_m128i(_mm512_extracti32x4_epi32::<0>(r), e_decomp[0]); - assert_eq_m128i(_mm512_extracti32x4_epi32::<1>(r), e_decomp[1]); - assert_eq_m128i(_mm512_extracti32x4_epi32::<2>(r), e_decomp[2]); - assert_eq_m128i(_mm512_extracti32x4_epi32::<3>(r), e_decomp[3]); - } - - #[simd_test(enable = "avx512vaes,avx512f")] - unsafe fn test_mm512_aesdec_epi128() { - // Constants taken from https://msdn.microsoft.com/en-us/library/cc664949.aspx. - let (a, k) = setup_state_key_512(); - let e = _mm_set_epi64x(0x044e4f5176fec48f, 0xb57ecfa381da39ee); - let e = _mm512_broadcast_i32x4(e); - let r = _mm512_aesdec_epi128(a, k); - assert_eq_m512i(r, e); - - helper_for_512_avx512vaes(_mm_aesdec_si128, _mm512_aesdec_epi128); - } - - #[simd_test(enable = "avx512vaes,avx512f")] - unsafe fn test_mm512_aesdeclast_epi128() { - // Constants taken from https://msdn.microsoft.com/en-us/library/cc714178.aspx. - let (a, k) = setup_state_key_512(); - let e = _mm_set_epi64x(0x36cad57d9072bf9e, 0xf210dd981fa4a493); - let e = _mm512_broadcast_i32x4(e); - let r = _mm512_aesdeclast_epi128(a, k); - assert_eq_m512i(r, e); - - helper_for_512_avx512vaes(_mm_aesdeclast_si128, _mm512_aesdeclast_epi128); - } - - #[simd_test(enable = "avx512vaes,avx512f")] - unsafe fn test_mm512_aesenc_epi128() { - // Constants taken from https://msdn.microsoft.com/en-us/library/cc664810.aspx. - let (a, k) = setup_state_key_512(); - let e = _mm_set_epi64x(0x16ab0e57dfc442ed, 0x28e4ee1884504333); - let e = _mm512_broadcast_i32x4(e); - let r = _mm512_aesenc_epi128(a, k); - assert_eq_m512i(r, e); - - helper_for_512_avx512vaes(_mm_aesenc_si128, _mm512_aesenc_epi128); - } - - #[simd_test(enable = "avx512vaes,avx512f")] - unsafe fn test_mm512_aesenclast_epi128() { - // Constants taken from https://msdn.microsoft.com/en-us/library/cc714136.aspx. - let (a, k) = setup_state_key_512(); - let e = _mm_set_epi64x(0xb6dd7df25d7ab320, 0x4b04f98cf4c860f8); - let e = _mm512_broadcast_i32x4(e); - let r = _mm512_aesenclast_epi128(a, k); - assert_eq_m512i(r, e); - - helper_for_512_avx512vaes(_mm_aesenclast_si128, _mm512_aesenclast_epi128); - } -} diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vpclmulqdq.rs b/library/stdarch/crates/core_arch/src/x86/avx512vpclmulqdq.rs deleted file mode 100644 index 9bfeb903a..000000000 --- a/library/stdarch/crates/core_arch/src/x86/avx512vpclmulqdq.rs +++ /dev/null @@ -1,258 +0,0 @@ -//! Vectorized Carry-less Multiplication (VCLMUL) -//! -//! The reference is [Intel 64 and IA-32 Architectures Software Developer's -//! Manual Volume 2: Instruction Set Reference, A-Z][intel64_ref] (p. 4-241). -//! -//! [intel64_ref]: http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf - -use crate::core_arch::x86::__m256i; -use crate::core_arch::x86::__m512i; - -#[cfg(test)] -use stdarch_test::assert_instr; - -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.x86.pclmulqdq.256"] - fn pclmulqdq_256(a: __m256i, round_key: __m256i, imm8: u8) -> __m256i; - #[link_name = "llvm.x86.pclmulqdq.512"] - fn pclmulqdq_512(a: __m512i, round_key: __m512i, imm8: u8) -> __m512i; -} - -// for some odd reason on x86_64 we generate the correct long name instructions -// but on i686 we generate the short name + imm8 -// so we need to special-case on that... - -/// Performs a carry-less multiplication of two 64-bit polynomials over the -/// finite field GF(2^k) - in each of the 4 128-bit lanes. -/// -/// The immediate byte is used for determining which halves of each lane `a` and `b` -/// should be used. Immediate bits other than 0 and 4 are ignored. -/// All lanes share immediate byte. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_clmulepi64_epi128) -#[inline] -#[target_feature(enable = "avx512vpclmulqdq,avx512f")] -// technically according to Intel's documentation we don't need avx512f here, however LLVM gets confused otherwise -#[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn _mm512_clmulepi64_epi128(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); - pclmulqdq_512(a, b, IMM8 as u8) -} - -/// Performs a carry-less multiplication of two 64-bit polynomials over the -/// finite field GF(2^k) - in each of the 2 128-bit lanes. -/// -/// The immediate byte is used for determining which halves of each lane `a` and `b` -/// should be used. Immediate bits other than 0 and 4 are ignored. -/// All lanes share immediate byte. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_clmulepi64_epi128) -#[inline] -#[target_feature(enable = "avx512vpclmulqdq,avx512vl")] -#[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn _mm256_clmulepi64_epi128(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); - pclmulqdq_256(a, b, IMM8 as u8) -} - -#[cfg(test)] -mod tests { - // The constants in the tests below are just bit patterns. They should not - // be interpreted as integers; signedness does not make sense for them, but - // __mXXXi happens to be defined in terms of signed integers. - #![allow(overflowing_literals)] - - use stdarch_test::simd_test; - - use crate::core_arch::x86::*; - - macro_rules! verify_kat_pclmul { - ($broadcast:ident, $clmul:ident, $assert:ident) => { - // Constants taken from https://software.intel.com/sites/default/files/managed/72/cc/clmul-wp-rev-2.02-2014-04-20.pdf - let a = _mm_set_epi64x(0x7b5b546573745665, 0x63746f725d53475d); - let a = $broadcast(a); - let b = _mm_set_epi64x(0x4869285368617929, 0x5b477565726f6e5d); - let b = $broadcast(b); - let r00 = _mm_set_epi64x(0x1d4d84c85c3440c0, 0x929633d5d36f0451); - let r00 = $broadcast(r00); - let r01 = _mm_set_epi64x(0x1bd17c8d556ab5a1, 0x7fa540ac2a281315); - let r01 = $broadcast(r01); - let r10 = _mm_set_epi64x(0x1a2bf6db3a30862f, 0xbabf262df4b7d5c9); - let r10 = $broadcast(r10); - let r11 = _mm_set_epi64x(0x1d1e1f2c592e7c45, 0xd66ee03e410fd4ed); - let r11 = $broadcast(r11); - - $assert($clmul::<0x00>(a, b), r00); - $assert($clmul::<0x10>(a, b), r01); - $assert($clmul::<0x01>(a, b), r10); - $assert($clmul::<0x11>(a, b), r11); - - let a0 = _mm_set_epi64x(0x0000000000000000, 0x8000000000000000); - let a0 = $broadcast(a0); - let r = _mm_set_epi64x(0x4000000000000000, 0x0000000000000000); - let r = $broadcast(r); - $assert($clmul::<0x00>(a0, a0), r); - } - } - - macro_rules! unroll { - ($target:ident[4] = $op:ident::<4>($source:ident);) => { - $target[3] = $op::<3>($source); - $target[2] = $op::<2>($source); - unroll! {$target[2] = $op::<2>($source);} - }; - ($target:ident[2] = $op:ident::<2>($source:ident);) => { - $target[1] = $op::<1>($source); - $target[0] = $op::<0>($source); - }; - (assert_eq_m128i($op:ident::<4>($vec_res:ident),$lin_res:ident[4]);) => { - assert_eq_m128i($op::<3>($vec_res), $lin_res[3]); - assert_eq_m128i($op::<2>($vec_res), $lin_res[2]); - unroll! {assert_eq_m128i($op::<2>($vec_res),$lin_res[2]);} - }; - (assert_eq_m128i($op:ident::<2>($vec_res:ident),$lin_res:ident[2]);) => { - assert_eq_m128i($op::<1>($vec_res), $lin_res[1]); - assert_eq_m128i($op::<0>($vec_res), $lin_res[0]); - }; - } - - // this function tests one of the possible 4 instances - // with different inputs across lanes - #[target_feature(enable = "avx512vpclmulqdq,avx512f")] - unsafe fn verify_512_helper( - linear: unsafe fn(__m128i, __m128i) -> __m128i, - vectorized: unsafe fn(__m512i, __m512i) -> __m512i, - ) { - let a = _mm512_set_epi64( - 0xDCB4DB3657BF0B7D, - 0x18DB0601068EDD9F, - 0xB76B908233200DC5, - 0xE478235FA8E22D5E, - 0xAB05CFFA2621154C, - 0x1171B47A186174C9, - 0x8C6B6C0E7595CEC9, - 0xBE3E7D4934E961BD, - ); - let b = _mm512_set_epi64( - 0x672F6F105A94CEA7, - 0x8298B8FFCA5F829C, - 0xA3927047B3FB61D8, - 0x978093862CDE7187, - 0xB1927AB22F31D0EC, - 0xA9A5DA619BE4D7AF, - 0xCA2590F56884FDC6, - 0x19BE9F660038BDB5, - ); - - let mut a_decomp = [_mm_setzero_si128(); 4]; - unroll! {a_decomp[4] = _mm512_extracti32x4_epi32::<4>(a);} - let mut b_decomp = [_mm_setzero_si128(); 4]; - unroll! {b_decomp[4] = _mm512_extracti32x4_epi32::<4>(b);} - - let r = vectorized(a, b); - let mut e_decomp = [_mm_setzero_si128(); 4]; - for i in 0..4 { - e_decomp[i] = linear(a_decomp[i], b_decomp[i]); - } - unroll! {assert_eq_m128i(_mm512_extracti32x4_epi32::<4>(r),e_decomp[4]);} - } - - // this function tests one of the possible 4 instances - // with different inputs across lanes for the VL version - #[target_feature(enable = "avx512vpclmulqdq,avx512vl")] - unsafe fn verify_256_helper( - linear: unsafe fn(__m128i, __m128i) -> __m128i, - vectorized: unsafe fn(__m256i, __m256i) -> __m256i, - ) { - let a = _mm512_set_epi64( - 0xDCB4DB3657BF0B7D, - 0x18DB0601068EDD9F, - 0xB76B908233200DC5, - 0xE478235FA8E22D5E, - 0xAB05CFFA2621154C, - 0x1171B47A186174C9, - 0x8C6B6C0E7595CEC9, - 0xBE3E7D4934E961BD, - ); - let b = _mm512_set_epi64( - 0x672F6F105A94CEA7, - 0x8298B8FFCA5F829C, - 0xA3927047B3FB61D8, - 0x978093862CDE7187, - 0xB1927AB22F31D0EC, - 0xA9A5DA619BE4D7AF, - 0xCA2590F56884FDC6, - 0x19BE9F660038BDB5, - ); - - let mut a_decomp = [_mm_setzero_si128(); 2]; - unroll! {a_decomp[2] = _mm512_extracti32x4_epi32::<2>(a);} - let mut b_decomp = [_mm_setzero_si128(); 2]; - unroll! {b_decomp[2] = _mm512_extracti32x4_epi32::<2>(b);} - - let r = vectorized( - _mm512_extracti64x4_epi64::<0>(a), - _mm512_extracti64x4_epi64::<0>(b), - ); - let mut e_decomp = [_mm_setzero_si128(); 2]; - for i in 0..2 { - e_decomp[i] = linear(a_decomp[i], b_decomp[i]); - } - unroll! {assert_eq_m128i(_mm256_extracti128_si256::<2>(r),e_decomp[2]);} - } - - #[simd_test(enable = "avx512vpclmulqdq,avx512f")] - unsafe fn test_mm512_clmulepi64_epi128() { - verify_kat_pclmul!( - _mm512_broadcast_i32x4, - _mm512_clmulepi64_epi128, - assert_eq_m512i - ); - - verify_512_helper( - |a, b| _mm_clmulepi64_si128::<0x00>(a, b), - |a, b| _mm512_clmulepi64_epi128::<0x00>(a, b), - ); - verify_512_helper( - |a, b| _mm_clmulepi64_si128::<0x01>(a, b), - |a, b| _mm512_clmulepi64_epi128::<0x01>(a, b), - ); - verify_512_helper( - |a, b| _mm_clmulepi64_si128::<0x10>(a, b), - |a, b| _mm512_clmulepi64_epi128::<0x10>(a, b), - ); - verify_512_helper( - |a, b| _mm_clmulepi64_si128::<0x11>(a, b), - |a, b| _mm512_clmulepi64_epi128::<0x11>(a, b), - ); - } - - #[simd_test(enable = "avx512vpclmulqdq,avx512vl")] - unsafe fn test_mm256_clmulepi64_epi128() { - verify_kat_pclmul!( - _mm256_broadcastsi128_si256, - _mm256_clmulepi64_epi128, - assert_eq_m256i - ); - - verify_256_helper( - |a, b| _mm_clmulepi64_si128::<0x00>(a, b), - |a, b| _mm256_clmulepi64_epi128::<0x00>(a, b), - ); - verify_256_helper( - |a, b| _mm_clmulepi64_si128::<0x01>(a, b), - |a, b| _mm256_clmulepi64_epi128::<0x01>(a, b), - ); - verify_256_helper( - |a, b| _mm_clmulepi64_si128::<0x10>(a, b), - |a, b| _mm256_clmulepi64_epi128::<0x10>(a, b), - ); - verify_256_helper( - |a, b| _mm_clmulepi64_si128::<0x11>(a, b), - |a, b| _mm256_clmulepi64_epi128::<0x11>(a, b), - ); - } -} diff --git a/library/stdarch/crates/core_arch/src/x86/gfni.rs b/library/stdarch/crates/core_arch/src/x86/gfni.rs new file mode 100644 index 000000000..679b2548a --- /dev/null +++ b/library/stdarch/crates/core_arch/src/x86/gfni.rs @@ -0,0 +1,1492 @@ +//! Galois Field New Instructions (GFNI) +//! +//! The intrinsics here correspond to those in the `immintrin.h` C header. +//! +//! The reference is [Intel 64 and IA-32 Architectures Software Developer's +//! Manual Volume 2: Instruction Set Reference, A-Z][intel64_ref]. +//! +//! [intel64_ref]: http://www.intel.de/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf + +use crate::core_arch::simd::i8x16; +use crate::core_arch::simd::i8x32; +use crate::core_arch::simd::i8x64; +use crate::core_arch::simd_llvm::simd_select_bitmask; +use crate::core_arch::x86::__m128i; +use crate::core_arch::x86::__m256i; +use crate::core_arch::x86::__m512i; +use crate::core_arch::x86::__mmask16; +use crate::core_arch::x86::__mmask32; +use crate::core_arch::x86::__mmask64; +use crate::core_arch::x86::_mm256_setzero_si256; +use crate::core_arch::x86::_mm512_setzero_si512; +use crate::core_arch::x86::_mm_setzero_si128; +use crate::core_arch::x86::m128iExt; +use crate::core_arch::x86::m256iExt; +use crate::core_arch::x86::m512iExt; +use crate::mem::transmute; + +#[cfg(test)] +use stdarch_test::assert_instr; + +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.x86.vgf2p8affineinvqb.512"] + fn vgf2p8affineinvqb_512(x: i8x64, a: i8x64, imm8: u8) -> i8x64; + #[link_name = "llvm.x86.vgf2p8affineinvqb.256"] + fn vgf2p8affineinvqb_256(x: i8x32, a: i8x32, imm8: u8) -> i8x32; + #[link_name = "llvm.x86.vgf2p8affineinvqb.128"] + fn vgf2p8affineinvqb_128(x: i8x16, a: i8x16, imm8: u8) -> i8x16; + #[link_name = "llvm.x86.vgf2p8affineqb.512"] + fn vgf2p8affineqb_512(x: i8x64, a: i8x64, imm8: u8) -> i8x64; + #[link_name = "llvm.x86.vgf2p8affineqb.256"] + fn vgf2p8affineqb_256(x: i8x32, a: i8x32, imm8: u8) -> i8x32; + #[link_name = "llvm.x86.vgf2p8affineqb.128"] + fn vgf2p8affineqb_128(x: i8x16, a: i8x16, imm8: u8) -> i8x16; + #[link_name = "llvm.x86.vgf2p8mulb.512"] + fn vgf2p8mulb_512(a: i8x64, b: i8x64) -> i8x64; + #[link_name = "llvm.x86.vgf2p8mulb.256"] + fn vgf2p8mulb_256(a: i8x32, b: i8x32) -> i8x32; + #[link_name = "llvm.x86.vgf2p8mulb.128"] + fn vgf2p8mulb_128(a: i8x16, b: i8x16) -> i8x16; +} + +// LLVM requires AVX512BW for a lot of these instructions, see +// https://github.com/llvm/llvm-project/blob/release/9.x/clang/include/clang/Basic/BuiltinsX86.def#L457 +// however our tests also require the target feature list to match Intel's +// which *doesn't* require AVX512BW but only AVX512F, so we added the redundant AVX512F +// requirement (for now) +// also see +// https://github.com/llvm/llvm-project/blob/release/9.x/clang/lib/Headers/gfniintrin.h +// for forcing GFNI, BW and optionally VL extension + +/// Performs a multiplication in GF(2^8) on the packed bytes. +/// The field is in polynomial representation with the reduction polynomial +/// x^8 + x^4 + x^3 + x + 1. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8mul_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vgf2p8mulb))] +pub unsafe fn _mm512_gf2p8mul_epi8(a: __m512i, b: __m512i) -> __m512i { + transmute(vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64())) +} + +/// Performs a multiplication in GF(2^8) on the packed bytes. +/// The field is in polynomial representation with the reduction polynomial +/// x^8 + x^4 + x^3 + x + 1. +/// +/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8mul_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vgf2p8mulb))] +pub unsafe fn _mm512_mask_gf2p8mul_epi8( + src: __m512i, + k: __mmask64, + a: __m512i, + b: __m512i, +) -> __m512i { + transmute(simd_select_bitmask( + k, + vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64()), + src.as_i8x64(), + )) +} + +/// Performs a multiplication in GF(2^8) on the packed bytes. +/// The field is in polynomial representation with the reduction polynomial +/// x^8 + x^4 + x^3 + x + 1. +/// +/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8mul_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vgf2p8mulb))] +pub unsafe fn _mm512_maskz_gf2p8mul_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { + let zero = _mm512_setzero_si512().as_i8x64(); + transmute(simd_select_bitmask( + k, + vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64()), + zero, + )) +} + +/// Performs a multiplication in GF(2^8) on the packed bytes. +/// The field is in polynomial representation with the reduction polynomial +/// x^8 + x^4 + x^3 + x + 1. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8mul_epi8) +#[inline] +#[target_feature(enable = "gfni,avx")] +#[cfg_attr(test, assert_instr(vgf2p8mulb))] +pub unsafe fn _mm256_gf2p8mul_epi8(a: __m256i, b: __m256i) -> __m256i { + transmute(vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32())) +} + +/// Performs a multiplication in GF(2^8) on the packed bytes. +/// The field is in polynomial representation with the reduction polynomial +/// x^8 + x^4 + x^3 + x + 1. +/// +/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8mul_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8mulb))] +pub unsafe fn _mm256_mask_gf2p8mul_epi8( + src: __m256i, + k: __mmask32, + a: __m256i, + b: __m256i, +) -> __m256i { + transmute(simd_select_bitmask( + k, + vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32()), + src.as_i8x32(), + )) +} + +/// Performs a multiplication in GF(2^8) on the packed bytes. +/// The field is in polynomial representation with the reduction polynomial +/// x^8 + x^4 + x^3 + x + 1. +/// +/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8mul_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8mulb))] +pub unsafe fn _mm256_maskz_gf2p8mul_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { + let zero = _mm256_setzero_si256().as_i8x32(); + transmute(simd_select_bitmask( + k, + vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32()), + zero, + )) +} + +/// Performs a multiplication in GF(2^8) on the packed bytes. +/// The field is in polynomial representation with the reduction polynomial +/// x^8 + x^4 + x^3 + x + 1. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8mul_epi8) +#[inline] +#[target_feature(enable = "gfni")] +#[cfg_attr(test, assert_instr(gf2p8mulb))] +pub unsafe fn _mm_gf2p8mul_epi8(a: __m128i, b: __m128i) -> __m128i { + transmute(vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16())) +} + +/// Performs a multiplication in GF(2^8) on the packed bytes. +/// The field is in polynomial representation with the reduction polynomial +/// x^8 + x^4 + x^3 + x + 1. +/// +/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8mul_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8mulb))] +pub unsafe fn _mm_mask_gf2p8mul_epi8( + src: __m128i, + k: __mmask16, + a: __m128i, + b: __m128i, +) -> __m128i { + transmute(simd_select_bitmask( + k, + vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16()), + src.as_i8x16(), + )) +} + +/// Performs a multiplication in GF(2^8) on the packed bytes. +/// The field is in polynomial representation with the reduction polynomial +/// x^8 + x^4 + x^3 + x + 1. +/// +/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8mul_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8mulb))] +pub unsafe fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { + let zero = _mm_setzero_si128().as_i8x16(); + transmute(simd_select_bitmask( + k, + vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16()), + zero, + )) +} + +/// Performs an affine transformation on the packed bytes in x. +/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8affine_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn _mm512_gf2p8affine_epi64_epi8(x: __m512i, a: __m512i) -> __m512i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x64(); + let a = a.as_i8x64(); + let r = vgf2p8affineqb_512(x, a, b); + transmute(r) +} + +/// Performs an affine transformation on the packed bytes in x. +/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8affine_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] +#[rustc_legacy_const_generics(3)] +pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8( + k: __mmask64, + x: __m512i, + a: __m512i, +) -> __m512i { + static_assert_imm8!(B); + let b = B as u8; + let zero = _mm512_setzero_si512().as_i8x64(); + let x = x.as_i8x64(); + let a = a.as_i8x64(); + let r = vgf2p8affineqb_512(x, a, b); + transmute(simd_select_bitmask(k, r, zero)) +} + +/// Performs an affine transformation on the packed bytes in x. +/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8affine_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] +#[rustc_legacy_const_generics(4)] +pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8( + src: __m512i, + k: __mmask64, + x: __m512i, + a: __m512i, +) -> __m512i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x64(); + let a = a.as_i8x64(); + let r = vgf2p8affineqb_512(x, a, b); + transmute(simd_select_bitmask(k, r, src.as_i8x64())) +} + +/// Performs an affine transformation on the packed bytes in x. +/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8affine_epi8) +#[inline] +#[target_feature(enable = "gfni,avx")] +#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn _mm256_gf2p8affine_epi64_epi8(x: __m256i, a: __m256i) -> __m256i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x32(); + let a = a.as_i8x32(); + let r = vgf2p8affineqb_256(x, a, b); + transmute(r) +} + +/// Performs an affine transformation on the packed bytes in x. +/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8affine_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] +#[rustc_legacy_const_generics(3)] +pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8( + k: __mmask32, + x: __m256i, + a: __m256i, +) -> __m256i { + static_assert_imm8!(B); + let b = B as u8; + let zero = _mm256_setzero_si256().as_i8x32(); + let x = x.as_i8x32(); + let a = a.as_i8x32(); + let r = vgf2p8affineqb_256(x, a, b); + transmute(simd_select_bitmask(k, r, zero)) +} + +/// Performs an affine transformation on the packed bytes in x. +/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8affine_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] +#[rustc_legacy_const_generics(4)] +pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8( + src: __m256i, + k: __mmask32, + x: __m256i, + a: __m256i, +) -> __m256i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x32(); + let a = a.as_i8x32(); + let r = vgf2p8affineqb_256(x, a, b); + transmute(simd_select_bitmask(k, r, src.as_i8x32())) +} + +/// Performs an affine transformation on the packed bytes in x. +/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affine_epi8) +#[inline] +#[target_feature(enable = "gfni")] +#[cfg_attr(test, assert_instr(gf2p8affineqb, B = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn _mm_gf2p8affine_epi64_epi8(x: __m128i, a: __m128i) -> __m128i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x16(); + let a = a.as_i8x16(); + let r = vgf2p8affineqb_128(x, a, b); + transmute(r) +} + +/// Performs an affine transformation on the packed bytes in x. +/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8affine_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] +#[rustc_legacy_const_generics(3)] +pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8( + k: __mmask16, + x: __m128i, + a: __m128i, +) -> __m128i { + static_assert_imm8!(B); + let b = B as u8; + let zero = _mm_setzero_si128().as_i8x16(); + let x = x.as_i8x16(); + let a = a.as_i8x16(); + let r = vgf2p8affineqb_128(x, a, b); + transmute(simd_select_bitmask(k, r, zero)) +} + +/// Performs an affine transformation on the packed bytes in x. +/// That is computes a*x+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8affine_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] +#[rustc_legacy_const_generics(4)] +pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8( + src: __m128i, + k: __mmask16, + x: __m128i, + a: __m128i, +) -> __m128i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x16(); + let a = a.as_i8x16(); + let r = vgf2p8affineqb_128(x, a, b); + transmute(simd_select_bitmask(k, r, src.as_i8x16())) +} + +/// Performs an affine transformation on the inverted packed bytes in x. +/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. +/// The inverse of 0 is 0. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8affineinv_epi64_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8(x: __m512i, a: __m512i) -> __m512i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x64(); + let a = a.as_i8x64(); + let r = vgf2p8affineinvqb_512(x, a, b); + transmute(r) +} + +/// Performs an affine transformation on the inverted packed bytes in x. +/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. +/// The inverse of 0 is 0. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8affineinv_epi64_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] +#[rustc_legacy_const_generics(3)] +pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8( + k: __mmask64, + x: __m512i, + a: __m512i, +) -> __m512i { + static_assert_imm8!(B); + let b = B as u8; + let zero = _mm512_setzero_si512().as_i8x64(); + let x = x.as_i8x64(); + let a = a.as_i8x64(); + let r = vgf2p8affineinvqb_512(x, a, b); + transmute(simd_select_bitmask(k, r, zero)) +} + +/// Performs an affine transformation on the inverted packed bytes in x. +/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. +/// The inverse of 0 is 0. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8affineinv_epi64_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] +#[rustc_legacy_const_generics(4)] +pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8( + src: __m512i, + k: __mmask64, + x: __m512i, + a: __m512i, +) -> __m512i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x64(); + let a = a.as_i8x64(); + let r = vgf2p8affineinvqb_512(x, a, b); + transmute(simd_select_bitmask(k, r, src.as_i8x64())) +} + +/// Performs an affine transformation on the inverted packed bytes in x. +/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. +/// The inverse of 0 is 0. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8affineinv_epi64_epi8) +#[inline] +#[target_feature(enable = "gfni,avx")] +#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8(x: __m256i, a: __m256i) -> __m256i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x32(); + let a = a.as_i8x32(); + let r = vgf2p8affineinvqb_256(x, a, b); + transmute(r) +} + +/// Performs an affine transformation on the inverted packed bytes in x. +/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. +/// The inverse of 0 is 0. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8affineinv_epi64_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] +#[rustc_legacy_const_generics(3)] +pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8( + k: __mmask32, + x: __m256i, + a: __m256i, +) -> __m256i { + static_assert_imm8!(B); + let b = B as u8; + let zero = _mm256_setzero_si256().as_i8x32(); + let x = x.as_i8x32(); + let a = a.as_i8x32(); + let r = vgf2p8affineinvqb_256(x, a, b); + transmute(simd_select_bitmask(k, r, zero)) +} + +/// Performs an affine transformation on the inverted packed bytes in x. +/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. +/// The inverse of 0 is 0. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8affineinv_epi64_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] +#[rustc_legacy_const_generics(4)] +pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8( + src: __m256i, + k: __mmask32, + x: __m256i, + a: __m256i, +) -> __m256i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x32(); + let a = a.as_i8x32(); + let r = vgf2p8affineinvqb_256(x, a, b); + transmute(simd_select_bitmask(k, r, src.as_i8x32())) +} + +/// Performs an affine transformation on the inverted packed bytes in x. +/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. +/// The inverse of 0 is 0. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affineinv_epi64_epi8) +#[inline] +#[target_feature(enable = "gfni")] +#[cfg_attr(test, assert_instr(gf2p8affineinvqb, B = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn _mm_gf2p8affineinv_epi64_epi8(x: __m128i, a: __m128i) -> __m128i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x16(); + let a = a.as_i8x16(); + let r = vgf2p8affineinvqb_128(x, a, b); + transmute(r) +} + +/// Performs an affine transformation on the inverted packed bytes in x. +/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. +/// The inverse of 0 is 0. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8affineinv_epi64_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] +#[rustc_legacy_const_generics(3)] +pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8( + k: __mmask16, + x: __m128i, + a: __m128i, +) -> __m128i { + static_assert_imm8!(B); + let b = B as u8; + let zero = _mm_setzero_si128().as_i8x16(); + let x = x.as_i8x16(); + let a = a.as_i8x16(); + let r = vgf2p8affineinvqb_128(x, a, b); + transmute(simd_select_bitmask(k, r, zero)) +} + +/// Performs an affine transformation on the inverted packed bytes in x. +/// That is computes a*inv(x)+b over the Galois Field 2^8 for each packed byte with a being a 8x8 bit matrix +/// and b being a constant 8-bit immediate value. +/// The inverse of a byte is defined with respect to the reduction polynomial x^8+x^4+x^3+x+1. +/// The inverse of 0 is 0. +/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a. +/// +/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set. +/// Otherwise the computation result is written into the result. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8affineinv_epi64_epi8) +#[inline] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] +#[rustc_legacy_const_generics(4)] +pub unsafe fn _mm_mask_gf2p8affineinv_epi64_epi8( + src: __m128i, + k: __mmask16, + x: __m128i, + a: __m128i, +) -> __m128i { + static_assert_imm8!(B); + let b = B as u8; + let x = x.as_i8x16(); + let a = a.as_i8x16(); + let r = vgf2p8affineinvqb_128(x, a, b); + transmute(simd_select_bitmask(k, r, src.as_i8x16())) +} + +#[cfg(test)] +mod tests { + // The constants in the tests below are just bit patterns. They should not + // be interpreted as integers; signedness does not make sense for them, but + // __mXXXi happens to be defined in terms of signed integers. + #![allow(overflowing_literals)] + + use core::hint::black_box; + use core::intrinsics::size_of; + use stdarch_test::simd_test; + + use crate::core_arch::x86::*; + + fn mulbyte(left: u8, right: u8) -> u8 { + // this implementation follows the description in + // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8mul_epi8 + const REDUCTION_POLYNOMIAL: u16 = 0x11b; + let left: u16 = left.into(); + let right: u16 = right.into(); + let mut carryless_product: u16 = 0; + + // Carryless multiplication + for i in 0..8 { + if ((left >> i) & 0x01) != 0 { + carryless_product ^= right << i; + } + } + + // reduction, adding in "0" where appropriate to clear out high bits + // note that REDUCTION_POLYNOMIAL is zero in this context + for i in (8..=14).rev() { + if ((carryless_product >> i) & 0x01) != 0 { + carryless_product ^= REDUCTION_POLYNOMIAL << (i - 8); + } + } + + carryless_product as u8 + } + + const NUM_TEST_WORDS_512: usize = 4; + const NUM_TEST_WORDS_256: usize = NUM_TEST_WORDS_512 * 2; + const NUM_TEST_WORDS_128: usize = NUM_TEST_WORDS_256 * 2; + const NUM_TEST_ENTRIES: usize = NUM_TEST_WORDS_512 * 64; + const NUM_TEST_WORDS_64: usize = NUM_TEST_WORDS_128 * 2; + const NUM_BYTES: usize = 256; + const NUM_BYTES_WORDS_128: usize = NUM_BYTES / 16; + const NUM_BYTES_WORDS_256: usize = NUM_BYTES_WORDS_128 / 2; + const NUM_BYTES_WORDS_512: usize = NUM_BYTES_WORDS_256 / 2; + + fn parity(input: u8) -> u8 { + let mut accumulator = 0; + for i in 0..8 { + accumulator ^= (input >> i) & 0x01; + } + accumulator + } + + fn mat_vec_multiply_affine(matrix: u64, x: u8, b: u8) -> u8 { + // this implementation follows the description in + // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affine_epi64_epi8 + let mut accumulator = 0; + + for bit in 0..8 { + accumulator |= parity(x & matrix.to_le_bytes()[bit]) << (7 - bit); + } + + accumulator ^ b + } + + fn generate_affine_mul_test_data( + immediate: u8, + ) -> ( + [u64; NUM_TEST_WORDS_64], + [u8; NUM_TEST_ENTRIES], + [u8; NUM_TEST_ENTRIES], + ) { + let mut left: [u64; NUM_TEST_WORDS_64] = [0; NUM_TEST_WORDS_64]; + let mut right: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; + let mut result: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; + + for i in 0..NUM_TEST_WORDS_64 { + left[i] = (i as u64) * 103 * 101; + for j in 0..8 { + let j64 = j as u64; + right[i * 8 + j] = ((left[i] + j64) % 256) as u8; + result[i * 8 + j] = mat_vec_multiply_affine(left[i], right[i * 8 + j], immediate); + } + } + + (left, right, result) + } + + fn generate_inv_tests_data() -> ([u8; NUM_BYTES], [u8; NUM_BYTES]) { + let mut input: [u8; NUM_BYTES] = [0; NUM_BYTES]; + let mut result: [u8; NUM_BYTES] = [0; NUM_BYTES]; + + for i in 0..NUM_BYTES { + input[i] = (i % 256) as u8; + result[i] = if i == 0 { 0 } else { 1 }; + } + + (input, result) + } + + const AES_S_BOX: [u8; NUM_BYTES] = [ + 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, + 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, + 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, + 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, + 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, + 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, + 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, + 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, + 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, + 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, + 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, + 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, + 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, + 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, + 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, + 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, + 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, + 0x16, + ]; + + fn generate_byte_mul_test_data() -> ( + [u8; NUM_TEST_ENTRIES], + [u8; NUM_TEST_ENTRIES], + [u8; NUM_TEST_ENTRIES], + ) { + let mut left: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; + let mut right: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; + let mut result: [u8; NUM_TEST_ENTRIES] = [0; NUM_TEST_ENTRIES]; + + for i in 0..NUM_TEST_ENTRIES { + left[i] = (i % 256) as u8; + right[i] = left[i].wrapping_mul(101); + result[i] = mulbyte(left[i], right[i]); + } + + (left, right, result) + } + + #[target_feature(enable = "sse2")] + unsafe fn load_m128i_word(data: &[T], word_index: usize) -> __m128i { + let byte_offset = word_index * 16 / size_of::(); + let pointer = data.as_ptr().add(byte_offset) as *const __m128i; + _mm_loadu_si128(black_box(pointer)) + } + + #[target_feature(enable = "avx")] + unsafe fn load_m256i_word(data: &[T], word_index: usize) -> __m256i { + let byte_offset = word_index * 32 / size_of::(); + let pointer = data.as_ptr().add(byte_offset) as *const __m256i; + _mm256_loadu_si256(black_box(pointer)) + } + + #[target_feature(enable = "avx512f")] + unsafe fn load_m512i_word(data: &[T], word_index: usize) -> __m512i { + let byte_offset = word_index * 64 / size_of::(); + let pointer = data.as_ptr().add(byte_offset) as *const i32; + _mm512_loadu_si512(black_box(pointer)) + } + + #[simd_test(enable = "gfni,avx512bw")] + unsafe fn test_mm512_gf2p8mul_epi8() { + let (left, right, expected) = generate_byte_mul_test_data(); + + for i in 0..NUM_TEST_WORDS_512 { + let left = load_m512i_word(&left, i); + let right = load_m512i_word(&right, i); + let expected = load_m512i_word(&expected, i); + let result = _mm512_gf2p8mul_epi8(left, right); + assert_eq_m512i(result, expected); + } + } + + #[simd_test(enable = "gfni,avx512bw")] + unsafe fn test_mm512_maskz_gf2p8mul_epi8() { + let (left, right, _expected) = generate_byte_mul_test_data(); + + for i in 0..NUM_TEST_WORDS_512 { + let left = load_m512i_word(&left, i); + let right = load_m512i_word(&right, i); + let result_zero = _mm512_maskz_gf2p8mul_epi8(0, left, right); + assert_eq_m512i(result_zero, _mm512_setzero_si512()); + let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; + let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; + let expected_result = _mm512_gf2p8mul_epi8(left, right); + let result_masked = _mm512_maskz_gf2p8mul_epi8(mask_bytes, left, right); + let expected_masked = + _mm512_mask_blend_epi32(mask_words, _mm512_setzero_si512(), expected_result); + assert_eq_m512i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw")] + unsafe fn test_mm512_mask_gf2p8mul_epi8() { + let (left, right, _expected) = generate_byte_mul_test_data(); + + for i in 0..NUM_TEST_WORDS_512 { + let left = load_m512i_word(&left, i); + let right = load_m512i_word(&right, i); + let result_left = _mm512_mask_gf2p8mul_epi8(left, 0, left, right); + assert_eq_m512i(result_left, left); + let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; + let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; + let expected_result = _mm512_gf2p8mul_epi8(left, right); + let result_masked = _mm512_mask_gf2p8mul_epi8(left, mask_bytes, left, right); + let expected_masked = _mm512_mask_blend_epi32(mask_words, left, expected_result); + assert_eq_m512i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm256_gf2p8mul_epi8() { + let (left, right, expected) = generate_byte_mul_test_data(); + + for i in 0..NUM_TEST_WORDS_256 { + let left = load_m256i_word(&left, i); + let right = load_m256i_word(&right, i); + let expected = load_m256i_word(&expected, i); + let result = _mm256_gf2p8mul_epi8(left, right); + assert_eq_m256i(result, expected); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm256_maskz_gf2p8mul_epi8() { + let (left, right, _expected) = generate_byte_mul_test_data(); + + for i in 0..NUM_TEST_WORDS_256 { + let left = load_m256i_word(&left, i); + let right = load_m256i_word(&right, i); + let result_zero = _mm256_maskz_gf2p8mul_epi8(0, left, right); + assert_eq_m256i(result_zero, _mm256_setzero_si256()); + let mask_bytes: __mmask32 = 0x0F_F0_FF_00; + const MASK_WORDS: i32 = 0b01_10_11_00; + let expected_result = _mm256_gf2p8mul_epi8(left, right); + let result_masked = _mm256_maskz_gf2p8mul_epi8(mask_bytes, left, right); + let expected_masked = + _mm256_blend_epi32::(_mm256_setzero_si256(), expected_result); + assert_eq_m256i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm256_mask_gf2p8mul_epi8() { + let (left, right, _expected) = generate_byte_mul_test_data(); + + for i in 0..NUM_TEST_WORDS_256 { + let left = load_m256i_word(&left, i); + let right = load_m256i_word(&right, i); + let result_left = _mm256_mask_gf2p8mul_epi8(left, 0, left, right); + assert_eq_m256i(result_left, left); + let mask_bytes: __mmask32 = 0x0F_F0_FF_00; + const MASK_WORDS: i32 = 0b01_10_11_00; + let expected_result = _mm256_gf2p8mul_epi8(left, right); + let result_masked = _mm256_mask_gf2p8mul_epi8(left, mask_bytes, left, right); + let expected_masked = _mm256_blend_epi32::(left, expected_result); + assert_eq_m256i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm_gf2p8mul_epi8() { + let (left, right, expected) = generate_byte_mul_test_data(); + + for i in 0..NUM_TEST_WORDS_128 { + let left = load_m128i_word(&left, i); + let right = load_m128i_word(&right, i); + let expected = load_m128i_word(&expected, i); + let result = _mm_gf2p8mul_epi8(left, right); + assert_eq_m128i(result, expected); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm_maskz_gf2p8mul_epi8() { + let (left, right, _expected) = generate_byte_mul_test_data(); + + for i in 0..NUM_TEST_WORDS_128 { + let left = load_m128i_word(&left, i); + let right = load_m128i_word(&right, i); + let result_zero = _mm_maskz_gf2p8mul_epi8(0, left, right); + assert_eq_m128i(result_zero, _mm_setzero_si128()); + let mask_bytes: __mmask16 = 0x0F_F0; + const MASK_WORDS: i32 = 0b01_10; + let expected_result = _mm_gf2p8mul_epi8(left, right); + let result_masked = _mm_maskz_gf2p8mul_epi8(mask_bytes, left, right); + let expected_masked = + _mm_blend_epi32::(_mm_setzero_si128(), expected_result); + assert_eq_m128i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm_mask_gf2p8mul_epi8() { + let (left, right, _expected) = generate_byte_mul_test_data(); + + for i in 0..NUM_TEST_WORDS_128 { + let left = load_m128i_word(&left, i); + let right = load_m128i_word(&right, i); + let result_left = _mm_mask_gf2p8mul_epi8(left, 0, left, right); + assert_eq_m128i(result_left, left); + let mask_bytes: __mmask16 = 0x0F_F0; + const MASK_WORDS: i32 = 0b01_10; + let expected_result = _mm_gf2p8mul_epi8(left, right); + let result_masked = _mm_mask_gf2p8mul_epi8(left, mask_bytes, left, right); + let expected_masked = _mm_blend_epi32::(left, expected_result); + assert_eq_m128i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw")] + unsafe fn test_mm512_gf2p8affine_epi64_epi8() { + let identity: i64 = 0x01_02_04_08_10_20_40_80; + const IDENTITY_BYTE: i32 = 0; + let constant: i64 = 0; + const CONSTANT_BYTE: i32 = 0x63; + let identity = _mm512_set1_epi64(identity); + let constant = _mm512_set1_epi64(constant); + let constant_reference = _mm512_set1_epi8(CONSTANT_BYTE as i8); + + let (bytes, more_bytes, _) = generate_byte_mul_test_data(); + let (matrices, vectors, references) = generate_affine_mul_test_data(IDENTITY_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_512 { + let data = load_m512i_word(&bytes, i); + let result = _mm512_gf2p8affine_epi64_epi8::(data, identity); + assert_eq_m512i(result, data); + let result = _mm512_gf2p8affine_epi64_epi8::(data, constant); + assert_eq_m512i(result, constant_reference); + let data = load_m512i_word(&more_bytes, i); + let result = _mm512_gf2p8affine_epi64_epi8::(data, identity); + assert_eq_m512i(result, data); + let result = _mm512_gf2p8affine_epi64_epi8::(data, constant); + assert_eq_m512i(result, constant_reference); + + let matrix = load_m512i_word(&matrices, i); + let vector = load_m512i_word(&vectors, i); + let reference = load_m512i_word(&references, i); + + let result = _mm512_gf2p8affine_epi64_epi8::(vector, matrix); + assert_eq_m512i(result, reference); + } + } + + #[simd_test(enable = "gfni,avx512bw")] + unsafe fn test_mm512_maskz_gf2p8affine_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_512 { + let matrix = load_m512i_word(&matrices, i); + let vector = load_m512i_word(&vectors, i); + let result_zero = + _mm512_maskz_gf2p8affine_epi64_epi8::(0, vector, matrix); + assert_eq_m512i(result_zero, _mm512_setzero_si512()); + let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; + let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; + let expected_result = _mm512_gf2p8affine_epi64_epi8::(vector, matrix); + let result_masked = + _mm512_maskz_gf2p8affine_epi64_epi8::(mask_bytes, vector, matrix); + let expected_masked = + _mm512_mask_blend_epi32(mask_words, _mm512_setzero_si512(), expected_result); + assert_eq_m512i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw")] + unsafe fn test_mm512_mask_gf2p8affine_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_512 { + let left = load_m512i_word(&vectors, i); + let right = load_m512i_word(&matrices, i); + let result_left = + _mm512_mask_gf2p8affine_epi64_epi8::(left, 0, left, right); + assert_eq_m512i(result_left, left); + let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; + let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; + let expected_result = _mm512_gf2p8affine_epi64_epi8::(left, right); + let result_masked = + _mm512_mask_gf2p8affine_epi64_epi8::(left, mask_bytes, left, right); + let expected_masked = _mm512_mask_blend_epi32(mask_words, left, expected_result); + assert_eq_m512i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm256_gf2p8affine_epi64_epi8() { + let identity: i64 = 0x01_02_04_08_10_20_40_80; + const IDENTITY_BYTE: i32 = 0; + let constant: i64 = 0; + const CONSTANT_BYTE: i32 = 0x63; + let identity = _mm256_set1_epi64x(identity); + let constant = _mm256_set1_epi64x(constant); + let constant_reference = _mm256_set1_epi8(CONSTANT_BYTE as i8); + + let (bytes, more_bytes, _) = generate_byte_mul_test_data(); + let (matrices, vectors, references) = generate_affine_mul_test_data(IDENTITY_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_256 { + let data = load_m256i_word(&bytes, i); + let result = _mm256_gf2p8affine_epi64_epi8::(data, identity); + assert_eq_m256i(result, data); + let result = _mm256_gf2p8affine_epi64_epi8::(data, constant); + assert_eq_m256i(result, constant_reference); + let data = load_m256i_word(&more_bytes, i); + let result = _mm256_gf2p8affine_epi64_epi8::(data, identity); + assert_eq_m256i(result, data); + let result = _mm256_gf2p8affine_epi64_epi8::(data, constant); + assert_eq_m256i(result, constant_reference); + + let matrix = load_m256i_word(&matrices, i); + let vector = load_m256i_word(&vectors, i); + let reference = load_m256i_word(&references, i); + + let result = _mm256_gf2p8affine_epi64_epi8::(vector, matrix); + assert_eq_m256i(result, reference); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm256_maskz_gf2p8affine_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_256 { + let matrix = load_m256i_word(&matrices, i); + let vector = load_m256i_word(&vectors, i); + let result_zero = + _mm256_maskz_gf2p8affine_epi64_epi8::(0, vector, matrix); + assert_eq_m256i(result_zero, _mm256_setzero_si256()); + let mask_bytes: __mmask32 = 0xFF_0F_F0_00; + const MASK_WORDS: i32 = 0b11_01_10_00; + let expected_result = _mm256_gf2p8affine_epi64_epi8::(vector, matrix); + let result_masked = + _mm256_maskz_gf2p8affine_epi64_epi8::(mask_bytes, vector, matrix); + let expected_masked = + _mm256_blend_epi32::(_mm256_setzero_si256(), expected_result); + assert_eq_m256i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm256_mask_gf2p8affine_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_256 { + let left = load_m256i_word(&vectors, i); + let right = load_m256i_word(&matrices, i); + let result_left = + _mm256_mask_gf2p8affine_epi64_epi8::(left, 0, left, right); + assert_eq_m256i(result_left, left); + let mask_bytes: __mmask32 = 0xFF_0F_F0_00; + const MASK_WORDS: i32 = 0b11_01_10_00; + let expected_result = _mm256_gf2p8affine_epi64_epi8::(left, right); + let result_masked = + _mm256_mask_gf2p8affine_epi64_epi8::(left, mask_bytes, left, right); + let expected_masked = _mm256_blend_epi32::(left, expected_result); + assert_eq_m256i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm_gf2p8affine_epi64_epi8() { + let identity: i64 = 0x01_02_04_08_10_20_40_80; + const IDENTITY_BYTE: i32 = 0; + let constant: i64 = 0; + const CONSTANT_BYTE: i32 = 0x63; + let identity = _mm_set1_epi64x(identity); + let constant = _mm_set1_epi64x(constant); + let constant_reference = _mm_set1_epi8(CONSTANT_BYTE as i8); + + let (bytes, more_bytes, _) = generate_byte_mul_test_data(); + let (matrices, vectors, references) = generate_affine_mul_test_data(IDENTITY_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_128 { + let data = load_m128i_word(&bytes, i); + let result = _mm_gf2p8affine_epi64_epi8::(data, identity); + assert_eq_m128i(result, data); + let result = _mm_gf2p8affine_epi64_epi8::(data, constant); + assert_eq_m128i(result, constant_reference); + let data = load_m128i_word(&more_bytes, i); + let result = _mm_gf2p8affine_epi64_epi8::(data, identity); + assert_eq_m128i(result, data); + let result = _mm_gf2p8affine_epi64_epi8::(data, constant); + assert_eq_m128i(result, constant_reference); + + let matrix = load_m128i_word(&matrices, i); + let vector = load_m128i_word(&vectors, i); + let reference = load_m128i_word(&references, i); + + let result = _mm_gf2p8affine_epi64_epi8::(vector, matrix); + assert_eq_m128i(result, reference); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm_maskz_gf2p8affine_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_128 { + let matrix = load_m128i_word(&matrices, i); + let vector = load_m128i_word(&vectors, i); + let result_zero = _mm_maskz_gf2p8affine_epi64_epi8::(0, vector, matrix); + assert_eq_m128i(result_zero, _mm_setzero_si128()); + let mask_bytes: __mmask16 = 0x0F_F0; + const MASK_WORDS: i32 = 0b01_10; + let expected_result = _mm_gf2p8affine_epi64_epi8::(vector, matrix); + let result_masked = + _mm_maskz_gf2p8affine_epi64_epi8::(mask_bytes, vector, matrix); + let expected_masked = + _mm_blend_epi32::(_mm_setzero_si128(), expected_result); + assert_eq_m128i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm_mask_gf2p8affine_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_128 { + let left = load_m128i_word(&vectors, i); + let right = load_m128i_word(&matrices, i); + let result_left = + _mm_mask_gf2p8affine_epi64_epi8::(left, 0, left, right); + assert_eq_m128i(result_left, left); + let mask_bytes: __mmask16 = 0x0F_F0; + const MASK_WORDS: i32 = 0b01_10; + let expected_result = _mm_gf2p8affine_epi64_epi8::(left, right); + let result_masked = + _mm_mask_gf2p8affine_epi64_epi8::(left, mask_bytes, left, right); + let expected_masked = _mm_blend_epi32::(left, expected_result); + assert_eq_m128i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw")] + unsafe fn test_mm512_gf2p8affineinv_epi64_epi8() { + let identity: i64 = 0x01_02_04_08_10_20_40_80; + const IDENTITY_BYTE: i32 = 0; + const CONSTANT_BYTE: i32 = 0x63; + let identity = _mm512_set1_epi64(identity); + + // validate inversion + let (inputs, results) = generate_inv_tests_data(); + + for i in 0..NUM_BYTES_WORDS_512 { + let input = load_m512i_word(&inputs, i); + let reference = load_m512i_word(&results, i); + let result = _mm512_gf2p8affineinv_epi64_epi8::(input, identity); + let remultiplied = _mm512_gf2p8mul_epi8(result, input); + assert_eq_m512i(remultiplied, reference); + } + + // validate subsequent affine operation + let (matrices, vectors, _affine_expected) = + generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_512 { + let vector = load_m512i_word(&vectors, i); + let matrix = load_m512i_word(&matrices, i); + + let inv_vec = _mm512_gf2p8affineinv_epi64_epi8::(vector, identity); + let reference = _mm512_gf2p8affine_epi64_epi8::(inv_vec, matrix); + let result = _mm512_gf2p8affineinv_epi64_epi8::(vector, matrix); + assert_eq_m512i(result, reference); + } + + // validate everything by virtue of checking against the AES SBox + const AES_S_BOX_MATRIX: i64 = 0xF1_E3_C7_8F_1F_3E_7C_F8; + let sbox_matrix = _mm512_set1_epi64(AES_S_BOX_MATRIX); + + for i in 0..NUM_BYTES_WORDS_512 { + let reference = load_m512i_word(&AES_S_BOX, i); + let input = load_m512i_word(&inputs, i); + let result = _mm512_gf2p8affineinv_epi64_epi8::(input, sbox_matrix); + assert_eq_m512i(result, reference); + } + } + + #[simd_test(enable = "gfni,avx512bw")] + unsafe fn test_mm512_maskz_gf2p8affineinv_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_512 { + let matrix = load_m512i_word(&matrices, i); + let vector = load_m512i_word(&vectors, i); + let result_zero = + _mm512_maskz_gf2p8affineinv_epi64_epi8::(0, vector, matrix); + assert_eq_m512i(result_zero, _mm512_setzero_si512()); + let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; + let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; + let expected_result = _mm512_gf2p8affineinv_epi64_epi8::(vector, matrix); + let result_masked = + _mm512_maskz_gf2p8affineinv_epi64_epi8::(mask_bytes, vector, matrix); + let expected_masked = + _mm512_mask_blend_epi32(mask_words, _mm512_setzero_si512(), expected_result); + assert_eq_m512i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw")] + unsafe fn test_mm512_mask_gf2p8affineinv_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_512 { + let left = load_m512i_word(&vectors, i); + let right = load_m512i_word(&matrices, i); + let result_left = + _mm512_mask_gf2p8affineinv_epi64_epi8::(left, 0, left, right); + assert_eq_m512i(result_left, left); + let mask_bytes: __mmask64 = 0x0F_0F_0F_0F_FF_FF_00_00; + let mask_words: __mmask16 = 0b01_01_01_01_11_11_00_00; + let expected_result = _mm512_gf2p8affineinv_epi64_epi8::(left, right); + let result_masked = _mm512_mask_gf2p8affineinv_epi64_epi8::( + left, mask_bytes, left, right, + ); + let expected_masked = _mm512_mask_blend_epi32(mask_words, left, expected_result); + assert_eq_m512i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm256_gf2p8affineinv_epi64_epi8() { + let identity: i64 = 0x01_02_04_08_10_20_40_80; + const IDENTITY_BYTE: i32 = 0; + const CONSTANT_BYTE: i32 = 0x63; + let identity = _mm256_set1_epi64x(identity); + + // validate inversion + let (inputs, results) = generate_inv_tests_data(); + + for i in 0..NUM_BYTES_WORDS_256 { + let input = load_m256i_word(&inputs, i); + let reference = load_m256i_word(&results, i); + let result = _mm256_gf2p8affineinv_epi64_epi8::(input, identity); + let remultiplied = _mm256_gf2p8mul_epi8(result, input); + assert_eq_m256i(remultiplied, reference); + } + + // validate subsequent affine operation + let (matrices, vectors, _affine_expected) = + generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_256 { + let vector = load_m256i_word(&vectors, i); + let matrix = load_m256i_word(&matrices, i); + + let inv_vec = _mm256_gf2p8affineinv_epi64_epi8::(vector, identity); + let reference = _mm256_gf2p8affine_epi64_epi8::(inv_vec, matrix); + let result = _mm256_gf2p8affineinv_epi64_epi8::(vector, matrix); + assert_eq_m256i(result, reference); + } + + // validate everything by virtue of checking against the AES SBox + const AES_S_BOX_MATRIX: i64 = 0xF1_E3_C7_8F_1F_3E_7C_F8; + let sbox_matrix = _mm256_set1_epi64x(AES_S_BOX_MATRIX); + + for i in 0..NUM_BYTES_WORDS_256 { + let reference = load_m256i_word(&AES_S_BOX, i); + let input = load_m256i_word(&inputs, i); + let result = _mm256_gf2p8affineinv_epi64_epi8::(input, sbox_matrix); + assert_eq_m256i(result, reference); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm256_maskz_gf2p8affineinv_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_256 { + let matrix = load_m256i_word(&matrices, i); + let vector = load_m256i_word(&vectors, i); + let result_zero = + _mm256_maskz_gf2p8affineinv_epi64_epi8::(0, vector, matrix); + assert_eq_m256i(result_zero, _mm256_setzero_si256()); + let mask_bytes: __mmask32 = 0xFF_0F_F0_00; + const MASK_WORDS: i32 = 0b11_01_10_00; + let expected_result = _mm256_gf2p8affineinv_epi64_epi8::(vector, matrix); + let result_masked = + _mm256_maskz_gf2p8affineinv_epi64_epi8::(mask_bytes, vector, matrix); + let expected_masked = + _mm256_blend_epi32::(_mm256_setzero_si256(), expected_result); + assert_eq_m256i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm256_mask_gf2p8affineinv_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_256 { + let left = load_m256i_word(&vectors, i); + let right = load_m256i_word(&matrices, i); + let result_left = + _mm256_mask_gf2p8affineinv_epi64_epi8::(left, 0, left, right); + assert_eq_m256i(result_left, left); + let mask_bytes: __mmask32 = 0xFF_0F_F0_00; + const MASK_WORDS: i32 = 0b11_01_10_00; + let expected_result = _mm256_gf2p8affineinv_epi64_epi8::(left, right); + let result_masked = _mm256_mask_gf2p8affineinv_epi64_epi8::( + left, mask_bytes, left, right, + ); + let expected_masked = _mm256_blend_epi32::(left, expected_result); + assert_eq_m256i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm_gf2p8affineinv_epi64_epi8() { + let identity: i64 = 0x01_02_04_08_10_20_40_80; + const IDENTITY_BYTE: i32 = 0; + const CONSTANT_BYTE: i32 = 0x63; + let identity = _mm_set1_epi64x(identity); + + // validate inversion + let (inputs, results) = generate_inv_tests_data(); + + for i in 0..NUM_BYTES_WORDS_128 { + let input = load_m128i_word(&inputs, i); + let reference = load_m128i_word(&results, i); + let result = _mm_gf2p8affineinv_epi64_epi8::(input, identity); + let remultiplied = _mm_gf2p8mul_epi8(result, input); + assert_eq_m128i(remultiplied, reference); + } + + // validate subsequent affine operation + let (matrices, vectors, _affine_expected) = + generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_128 { + let vector = load_m128i_word(&vectors, i); + let matrix = load_m128i_word(&matrices, i); + + let inv_vec = _mm_gf2p8affineinv_epi64_epi8::(vector, identity); + let reference = _mm_gf2p8affine_epi64_epi8::(inv_vec, matrix); + let result = _mm_gf2p8affineinv_epi64_epi8::(vector, matrix); + assert_eq_m128i(result, reference); + } + + // validate everything by virtue of checking against the AES SBox + const AES_S_BOX_MATRIX: i64 = 0xF1_E3_C7_8F_1F_3E_7C_F8; + let sbox_matrix = _mm_set1_epi64x(AES_S_BOX_MATRIX); + + for i in 0..NUM_BYTES_WORDS_128 { + let reference = load_m128i_word(&AES_S_BOX, i); + let input = load_m128i_word(&inputs, i); + let result = _mm_gf2p8affineinv_epi64_epi8::(input, sbox_matrix); + assert_eq_m128i(result, reference); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm_maskz_gf2p8affineinv_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_128 { + let matrix = load_m128i_word(&matrices, i); + let vector = load_m128i_word(&vectors, i); + let result_zero = + _mm_maskz_gf2p8affineinv_epi64_epi8::(0, vector, matrix); + assert_eq_m128i(result_zero, _mm_setzero_si128()); + let mask_bytes: __mmask16 = 0x0F_F0; + const MASK_WORDS: i32 = 0b01_10; + let expected_result = _mm_gf2p8affineinv_epi64_epi8::(vector, matrix); + let result_masked = + _mm_maskz_gf2p8affineinv_epi64_epi8::(mask_bytes, vector, matrix); + let expected_masked = + _mm_blend_epi32::(_mm_setzero_si128(), expected_result); + assert_eq_m128i(result_masked, expected_masked); + } + } + + #[simd_test(enable = "gfni,avx512bw,avx512vl")] + unsafe fn test_mm_mask_gf2p8affineinv_epi64_epi8() { + const CONSTANT_BYTE: i32 = 0x63; + let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); + + for i in 0..NUM_TEST_WORDS_128 { + let left = load_m128i_word(&vectors, i); + let right = load_m128i_word(&matrices, i); + let result_left = + _mm_mask_gf2p8affineinv_epi64_epi8::(left, 0, left, right); + assert_eq_m128i(result_left, left); + let mask_bytes: __mmask16 = 0x0F_F0; + const MASK_WORDS: i32 = 0b01_10; + let expected_result = _mm_gf2p8affineinv_epi64_epi8::(left, right); + let result_masked = + _mm_mask_gf2p8affineinv_epi64_epi8::(left, mask_bytes, left, right); + let expected_masked = _mm_blend_epi32::(left, expected_result); + assert_eq_m128i(result_masked, expected_masked); + } + } +} diff --git a/library/stdarch/crates/core_arch/src/x86/mod.rs b/library/stdarch/crates/core_arch/src/x86/mod.rs index 6b50e95b2..37045e40e 100644 --- a/library/stdarch/crates/core_arch/src/x86/mod.rs +++ b/library/stdarch/crates/core_arch/src/x86/mod.rs @@ -835,17 +835,17 @@ pub use self::avx512vnni::*; mod avx512bitalg; pub use self::avx512bitalg::*; -mod avx512gfni; -pub use self::avx512gfni::*; +mod gfni; +pub use self::gfni::*; mod avx512vpopcntdq; pub use self::avx512vpopcntdq::*; -mod avx512vaes; -pub use self::avx512vaes::*; +mod vaes; +pub use self::vaes::*; -mod avx512vpclmulqdq; -pub use self::avx512vpclmulqdq::*; +mod vpclmulqdq; +pub use self::vpclmulqdq::*; mod bt; pub use self::bt::*; diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 03c3a14a5..f21288970 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -1080,10 +1080,7 @@ pub unsafe fn _mm_movelh_ps(a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movemask_ps) #[inline] #[target_feature(enable = "sse")] -// FIXME: LLVM9 trunk has the following bug: -// https://github.com/rust-lang/stdarch/issues/794 -// so we only temporarily test this on i686 and x86_64 but not on i586: -#[cfg_attr(all(test, target_feature = "sse2"), assert_instr(movmskps))] +#[cfg_attr(test, assert_instr(movmskps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_movemask_ps(a: __m128) -> i32 { movmskps(a) diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs index 3e79b3539..cde4bc316 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse2.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs @@ -203,7 +203,9 @@ pub unsafe fn _mm_madd_epi16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxsw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epi16(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxsw(a.as_i16x8(), b.as_i16x8())) + let a = a.as_i16x8(); + let b = b.as_i16x8(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed unsigned 8-bit integers in `a` and `b`, and returns the @@ -215,7 +217,9 @@ pub unsafe fn _mm_max_epi16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxub))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epu8(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxub(a.as_u8x16(), b.as_u8x16())) + let a = a.as_u8x16(); + let b = b.as_u8x16(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed 16-bit integers in `a` and `b`, and returns the packed @@ -227,7 +231,9 @@ pub unsafe fn _mm_max_epu8(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminsw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epi16(a: __m128i, b: __m128i) -> __m128i { - transmute(pminsw(a.as_i16x8(), b.as_i16x8())) + let a = a.as_i16x8(); + let b = b.as_i16x8(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Compares packed unsigned 8-bit integers in `a` and `b`, and returns the @@ -239,7 +245,9 @@ pub unsafe fn _mm_min_epi16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminub))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epu8(a: __m128i, b: __m128i) -> __m128i { - transmute(pminub(a.as_u8x16(), b.as_u8x16())) + let a = a.as_u8x16(); + let b = b.as_u8x16(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Multiplies the packed 16-bit integers in `a` and `b`. @@ -1378,7 +1386,9 @@ pub unsafe fn _mm_insert_epi16(a: __m128i, i: i32) -> __m128i { #[cfg_attr(test, assert_instr(pmovmskb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_movemask_epi8(a: __m128i) -> i32 { - simd_bitmask::<_, u16>(a.as_i8x16()) as u32 as i32 + let z = i8x16::splat(0); + let m: i8x16 = simd_lt(a.as_i8x16(), z); + simd_bitmask::<_, u16>(m) as u32 as i32 } /// Shuffles 32-bit integers in `a` using the control in `IMM8`. @@ -1409,7 +1419,7 @@ pub unsafe fn _mm_shuffle_epi32(a: __m128i) -> __m128i { /// `IMM8`. /// /// Put the results in the high 64 bits of the returned vector, with the low 64 -/// bits being copied from from `a`. +/// bits being copied from `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shufflehi_epi16) #[inline] @@ -1441,7 +1451,7 @@ pub unsafe fn _mm_shufflehi_epi16(a: __m128i) -> __m128i { /// `IMM8`. /// /// Put the results in the low 64 bits of the returned vector, with the high 64 -/// bits being copied from from `a`. +/// bits being copied from `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shufflelo_epi16) #[inline] @@ -2796,14 +2806,6 @@ extern "C" { fn pavgw(a: u16x8, b: u16x8) -> u16x8; #[link_name = "llvm.x86.sse2.pmadd.wd"] fn pmaddwd(a: i16x8, b: i16x8) -> i32x4; - #[link_name = "llvm.x86.sse2.pmaxs.w"] - fn pmaxsw(a: i16x8, b: i16x8) -> i16x8; - #[link_name = "llvm.x86.sse2.pmaxu.b"] - fn pmaxub(a: u8x16, b: u8x16) -> u8x16; - #[link_name = "llvm.x86.sse2.pmins.w"] - fn pminsw(a: i16x8, b: i16x8) -> i16x8; - #[link_name = "llvm.x86.sse2.pminu.b"] - fn pminub(a: u8x16, b: u8x16) -> u8x16; #[link_name = "llvm.x86.sse2.pmulh.w"] fn pmulhw(a: i16x8, b: i16x8) -> i16x8; #[link_name = "llvm.x86.sse2.pmulhu.w"] diff --git a/library/stdarch/crates/core_arch/src/x86/sse41.rs b/library/stdarch/crates/core_arch/src/x86/sse41.rs index 7c59f2702..3162ad7d9 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse41.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse41.rs @@ -281,7 +281,9 @@ pub unsafe fn _mm_insert_epi32(a: __m128i, i: i32) -> __m128i { #[cfg_attr(test, assert_instr(pmaxsb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epi8(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxsb(a.as_i8x16(), b.as_i8x16())) + let a = a.as_i8x16(); + let b = b.as_i8x16(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed unsigned 16-bit integers in `a` and `b`, and returns packed @@ -293,7 +295,9 @@ pub unsafe fn _mm_max_epi8(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxuw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epu16(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxuw(a.as_u16x8(), b.as_u16x8())) + let a = a.as_u16x8(); + let b = b.as_u16x8(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed 32-bit integers in `a` and `b`, and returns packed maximum @@ -305,7 +309,9 @@ pub unsafe fn _mm_max_epu16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxsd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epi32(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxsd(a.as_i32x4(), b.as_i32x4())) + let a = a.as_i32x4(); + let b = b.as_i32x4(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed unsigned 32-bit integers in `a` and `b`, and returns packed @@ -317,7 +323,9 @@ pub unsafe fn _mm_max_epi32(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxud))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epu32(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxud(a.as_u32x4(), b.as_u32x4())) + let a = a.as_u32x4(); + let b = b.as_u32x4(); + transmute(simd_select::(simd_gt(a, b), a, b)) } /// Compares packed 8-bit integers in `a` and `b` and returns packed minimum @@ -329,7 +337,9 @@ pub unsafe fn _mm_max_epu32(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminsb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epi8(a: __m128i, b: __m128i) -> __m128i { - transmute(pminsb(a.as_i8x16(), b.as_i8x16())) + let a = a.as_i8x16(); + let b = b.as_i8x16(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Compares packed unsigned 16-bit integers in `a` and `b`, and returns packed @@ -341,7 +351,9 @@ pub unsafe fn _mm_min_epi8(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminuw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epu16(a: __m128i, b: __m128i) -> __m128i { - transmute(pminuw(a.as_u16x8(), b.as_u16x8())) + let a = a.as_u16x8(); + let b = b.as_u16x8(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Compares packed 32-bit integers in `a` and `b`, and returns packed minimum @@ -353,7 +365,9 @@ pub unsafe fn _mm_min_epu16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminsd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epi32(a: __m128i, b: __m128i) -> __m128i { - transmute(pminsd(a.as_i32x4(), b.as_i32x4())) + let a = a.as_i32x4(); + let b = b.as_i32x4(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Compares packed unsigned 32-bit integers in `a` and `b`, and returns packed @@ -365,7 +379,9 @@ pub unsafe fn _mm_min_epi32(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminud))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epu32(a: __m128i, b: __m128i) -> __m128i { - transmute(pminud(a.as_u32x4(), b.as_u32x4())) + let a = a.as_u32x4(); + let b = b.as_u32x4(); + transmute(simd_select::(simd_lt(a, b), a, b)) } /// Converts packed 32-bit integers from `a` and `b` to packed 16-bit integers @@ -1122,22 +1138,6 @@ extern "C" { fn pblendw(a: i16x8, b: i16x8, imm8: u8) -> i16x8; #[link_name = "llvm.x86.sse41.insertps"] fn insertps(a: __m128, b: __m128, imm8: u8) -> __m128; - #[link_name = "llvm.x86.sse41.pmaxsb"] - fn pmaxsb(a: i8x16, b: i8x16) -> i8x16; - #[link_name = "llvm.x86.sse41.pmaxuw"] - fn pmaxuw(a: u16x8, b: u16x8) -> u16x8; - #[link_name = "llvm.x86.sse41.pmaxsd"] - fn pmaxsd(a: i32x4, b: i32x4) -> i32x4; - #[link_name = "llvm.x86.sse41.pmaxud"] - fn pmaxud(a: u32x4, b: u32x4) -> u32x4; - #[link_name = "llvm.x86.sse41.pminsb"] - fn pminsb(a: i8x16, b: i8x16) -> i8x16; - #[link_name = "llvm.x86.sse41.pminuw"] - fn pminuw(a: u16x8, b: u16x8) -> u16x8; - #[link_name = "llvm.x86.sse41.pminsd"] - fn pminsd(a: i32x4, b: i32x4) -> i32x4; - #[link_name = "llvm.x86.sse41.pminud"] - fn pminud(a: u32x4, b: u32x4) -> u32x4; #[link_name = "llvm.x86.sse41.packusdw"] fn packusdw(a: i32x4, b: i32x4) -> u16x8; #[link_name = "llvm.x86.sse41.dppd"] diff --git a/library/stdarch/crates/core_arch/src/x86/sse42.rs b/library/stdarch/crates/core_arch/src/x86/sse42.rs index f474b0671..4eb12480b 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse42.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse42.rs @@ -614,7 +614,7 @@ mod tests { use crate::core_arch::x86::*; use std::ptr; - // Currently one cannot `load` a &[u8] that is is less than 16 + // Currently one cannot `load` a &[u8] that is less than 16 // in length. This makes loading strings less than 16 in length // a bit difficult. Rather than `load` and mutate the __m128i, // it is easier to memcpy the given string to a local slice with @@ -623,11 +623,7 @@ mod tests { unsafe fn str_to_m128i(s: &[u8]) -> __m128i { assert!(s.len() <= 16); let slice = &mut [0u8; 16]; - ptr::copy_nonoverlapping( - s.get_unchecked(0) as *const u8 as *const u8, - slice.get_unchecked_mut(0) as *mut u8 as *mut u8, - s.len(), - ); + ptr::copy_nonoverlapping(s.as_ptr(), slice.as_mut_ptr(), s.len()); _mm_loadu_si128(slice.as_ptr() as *const _) } diff --git a/library/stdarch/crates/core_arch/src/x86/vaes.rs b/library/stdarch/crates/core_arch/src/x86/vaes.rs new file mode 100644 index 000000000..e09f8a113 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/x86/vaes.rs @@ -0,0 +1,332 @@ +//! Vectorized AES Instructions (VAES) +//! +//! The intrinsics here correspond to those in the `immintrin.h` C header. +//! +//! The reference is [Intel 64 and IA-32 Architectures Software Developer's +//! Manual Volume 2: Instruction Set Reference, A-Z][intel64_ref]. +//! +//! [intel64_ref]: http://www.intel.de/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf + +use crate::core_arch::x86::__m256i; +use crate::core_arch::x86::__m512i; + +#[cfg(test)] +use stdarch_test::assert_instr; + +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.x86.aesni.aesenc.256"] + fn aesenc_256(a: __m256i, round_key: __m256i) -> __m256i; + #[link_name = "llvm.x86.aesni.aesenclast.256"] + fn aesenclast_256(a: __m256i, round_key: __m256i) -> __m256i; + #[link_name = "llvm.x86.aesni.aesdec.256"] + fn aesdec_256(a: __m256i, round_key: __m256i) -> __m256i; + #[link_name = "llvm.x86.aesni.aesdeclast.256"] + fn aesdeclast_256(a: __m256i, round_key: __m256i) -> __m256i; + #[link_name = "llvm.x86.aesni.aesenc.512"] + fn aesenc_512(a: __m512i, round_key: __m512i) -> __m512i; + #[link_name = "llvm.x86.aesni.aesenclast.512"] + fn aesenclast_512(a: __m512i, round_key: __m512i) -> __m512i; + #[link_name = "llvm.x86.aesni.aesdec.512"] + fn aesdec_512(a: __m512i, round_key: __m512i) -> __m512i; + #[link_name = "llvm.x86.aesni.aesdeclast.512"] + fn aesdeclast_512(a: __m512i, round_key: __m512i) -> __m512i; +} + +/// Performs one round of an AES encryption flow on each 128-bit word (state) in `a` using +/// the corresponding 128-bit word (key) in `round_key`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesenc_epi128) +#[inline] +#[target_feature(enable = "vaes")] +#[cfg_attr(test, assert_instr(vaesenc))] +pub unsafe fn _mm256_aesenc_epi128(a: __m256i, round_key: __m256i) -> __m256i { + aesenc_256(a, round_key) +} + +/// Performs the last round of an AES encryption flow on each 128-bit word (state) in `a` using +/// the corresponding 128-bit word (key) in `round_key`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesenclast_epi128) +#[inline] +#[target_feature(enable = "vaes")] +#[cfg_attr(test, assert_instr(vaesenclast))] +pub unsafe fn _mm256_aesenclast_epi128(a: __m256i, round_key: __m256i) -> __m256i { + aesenclast_256(a, round_key) +} + +/// Performs one round of an AES decryption flow on each 128-bit word (state) in `a` using +/// the corresponding 128-bit word (key) in `round_key`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesdec_epi128) +#[inline] +#[target_feature(enable = "vaes")] +#[cfg_attr(test, assert_instr(vaesdec))] +pub unsafe fn _mm256_aesdec_epi128(a: __m256i, round_key: __m256i) -> __m256i { + aesdec_256(a, round_key) +} + +/// Performs the last round of an AES decryption flow on each 128-bit word (state) in `a` using +/// the corresponding 128-bit word (key) in `round_key`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesdeclast_epi128) +#[inline] +#[target_feature(enable = "vaes")] +#[cfg_attr(test, assert_instr(vaesdeclast))] +pub unsafe fn _mm256_aesdeclast_epi128(a: __m256i, round_key: __m256i) -> __m256i { + aesdeclast_256(a, round_key) +} + +/// Performs one round of an AES encryption flow on each 128-bit word (state) in `a` using +/// the corresponding 128-bit word (key) in `round_key`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesenc_epi128) +#[inline] +#[target_feature(enable = "vaes,avx512f")] +#[cfg_attr(test, assert_instr(vaesenc))] +pub unsafe fn _mm512_aesenc_epi128(a: __m512i, round_key: __m512i) -> __m512i { + aesenc_512(a, round_key) +} + +/// Performs the last round of an AES encryption flow on each 128-bit word (state) in `a` using +/// the corresponding 128-bit word (key) in `round_key`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesenclast_epi128) +#[inline] +#[target_feature(enable = "vaes,avx512f")] +#[cfg_attr(test, assert_instr(vaesenclast))] +pub unsafe fn _mm512_aesenclast_epi128(a: __m512i, round_key: __m512i) -> __m512i { + aesenclast_512(a, round_key) +} + +/// Performs one round of an AES decryption flow on each 128-bit word (state) in `a` using +/// the corresponding 128-bit word (key) in `round_key`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesdec_epi128) +#[inline] +#[target_feature(enable = "vaes,avx512f")] +#[cfg_attr(test, assert_instr(vaesdec))] +pub unsafe fn _mm512_aesdec_epi128(a: __m512i, round_key: __m512i) -> __m512i { + aesdec_512(a, round_key) +} + +/// Performs the last round of an AES decryption flow on each 128-bit word (state) in `a` using +/// the corresponding 128-bit word (key) in `round_key`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesdeclast_epi128) +#[inline] +#[target_feature(enable = "vaes,avx512f")] +#[cfg_attr(test, assert_instr(vaesdeclast))] +pub unsafe fn _mm512_aesdeclast_epi128(a: __m512i, round_key: __m512i) -> __m512i { + aesdeclast_512(a, round_key) +} + +#[cfg(test)] +mod tests { + // The constants in the tests below are just bit patterns. They should not + // be interpreted as integers; signedness does not make sense for them, but + // __mXXXi happens to be defined in terms of signed integers. + #![allow(overflowing_literals)] + + use stdarch_test::simd_test; + + use crate::core_arch::x86::*; + + // the first parts of these tests are straight ports from the AES-NI tests + // the second parts directly compare the two, for inputs that are different across lanes + // and "more random" than the standard test vectors + // ideally we'd be using quickcheck here instead + + #[target_feature(enable = "avx2")] + unsafe fn helper_for_256_vaes( + linear: unsafe fn(__m128i, __m128i) -> __m128i, + vectorized: unsafe fn(__m256i, __m256i) -> __m256i, + ) { + let a = _mm256_set_epi64x( + 0xDCB4DB3657BF0B7D, + 0x18DB0601068EDD9F, + 0xB76B908233200DC5, + 0xE478235FA8E22D5E, + ); + let k = _mm256_set_epi64x( + 0x672F6F105A94CEA7, + 0x8298B8FFCA5F829C, + 0xA3927047B3FB61D8, + 0x978093862CDE7187, + ); + let mut a_decomp = [_mm_setzero_si128(); 2]; + a_decomp[0] = _mm256_extracti128_si256::<0>(a); + a_decomp[1] = _mm256_extracti128_si256::<1>(a); + let mut k_decomp = [_mm_setzero_si128(); 2]; + k_decomp[0] = _mm256_extracti128_si256::<0>(k); + k_decomp[1] = _mm256_extracti128_si256::<1>(k); + let r = vectorized(a, k); + let mut e_decomp = [_mm_setzero_si128(); 2]; + for i in 0..2 { + e_decomp[i] = linear(a_decomp[i], k_decomp[i]); + } + assert_eq_m128i(_mm256_extracti128_si256::<0>(r), e_decomp[0]); + assert_eq_m128i(_mm256_extracti128_si256::<1>(r), e_decomp[1]); + } + + #[target_feature(enable = "sse2")] + unsafe fn setup_state_key(broadcast: unsafe fn(__m128i) -> T) -> (T, T) { + // Constants taken from https://msdn.microsoft.com/en-us/library/cc664949.aspx. + let a = _mm_set_epi64x(0x0123456789abcdef, 0x8899aabbccddeeff); + let k = _mm_set_epi64x(0x1133557799bbddff, 0x0022446688aaccee); + (broadcast(a), broadcast(k)) + } + + #[target_feature(enable = "avx2")] + unsafe fn setup_state_key_256() -> (__m256i, __m256i) { + setup_state_key(_mm256_broadcastsi128_si256) + } + + #[target_feature(enable = "avx512f")] + unsafe fn setup_state_key_512() -> (__m512i, __m512i) { + setup_state_key(_mm512_broadcast_i32x4) + } + + #[simd_test(enable = "vaes,avx512vl")] + unsafe fn test_mm256_aesdec_epi128() { + // Constants taken from https://msdn.microsoft.com/en-us/library/cc664949.aspx. + let (a, k) = setup_state_key_256(); + let e = _mm_set_epi64x(0x044e4f5176fec48f, 0xb57ecfa381da39ee); + let e = _mm256_broadcastsi128_si256(e); + let r = _mm256_aesdec_epi128(a, k); + assert_eq_m256i(r, e); + + helper_for_256_vaes(_mm_aesdec_si128, _mm256_aesdec_epi128); + } + + #[simd_test(enable = "vaes,avx512vl")] + unsafe fn test_mm256_aesdeclast_epi128() { + // Constants taken from https://msdn.microsoft.com/en-us/library/cc714178.aspx. + let (a, k) = setup_state_key_256(); + let e = _mm_set_epi64x(0x36cad57d9072bf9e, 0xf210dd981fa4a493); + let e = _mm256_broadcastsi128_si256(e); + let r = _mm256_aesdeclast_epi128(a, k); + assert_eq_m256i(r, e); + + helper_for_256_vaes(_mm_aesdeclast_si128, _mm256_aesdeclast_epi128); + } + + #[simd_test(enable = "vaes,avx512vl")] + unsafe fn test_mm256_aesenc_epi128() { + // Constants taken from https://msdn.microsoft.com/en-us/library/cc664810.aspx. + // they are repeated appropriately + let (a, k) = setup_state_key_256(); + let e = _mm_set_epi64x(0x16ab0e57dfc442ed, 0x28e4ee1884504333); + let e = _mm256_broadcastsi128_si256(e); + let r = _mm256_aesenc_epi128(a, k); + assert_eq_m256i(r, e); + + helper_for_256_vaes(_mm_aesenc_si128, _mm256_aesenc_epi128); + } + + #[simd_test(enable = "vaes,avx512vl")] + unsafe fn test_mm256_aesenclast_epi128() { + // Constants taken from https://msdn.microsoft.com/en-us/library/cc714136.aspx. + let (a, k) = setup_state_key_256(); + let e = _mm_set_epi64x(0xb6dd7df25d7ab320, 0x4b04f98cf4c860f8); + let e = _mm256_broadcastsi128_si256(e); + let r = _mm256_aesenclast_epi128(a, k); + assert_eq_m256i(r, e); + + helper_for_256_vaes(_mm_aesenclast_si128, _mm256_aesenclast_epi128); + } + + #[target_feature(enable = "avx512f")] + unsafe fn helper_for_512_vaes( + linear: unsafe fn(__m128i, __m128i) -> __m128i, + vectorized: unsafe fn(__m512i, __m512i) -> __m512i, + ) { + let a = _mm512_set_epi64( + 0xDCB4DB3657BF0B7D, + 0x18DB0601068EDD9F, + 0xB76B908233200DC5, + 0xE478235FA8E22D5E, + 0xAB05CFFA2621154C, + 0x1171B47A186174C9, + 0x8C6B6C0E7595CEC9, + 0xBE3E7D4934E961BD, + ); + let k = _mm512_set_epi64( + 0x672F6F105A94CEA7, + 0x8298B8FFCA5F829C, + 0xA3927047B3FB61D8, + 0x978093862CDE7187, + 0xB1927AB22F31D0EC, + 0xA9A5DA619BE4D7AF, + 0xCA2590F56884FDC6, + 0x19BE9F660038BDB5, + ); + let mut a_decomp = [_mm_setzero_si128(); 4]; + a_decomp[0] = _mm512_extracti32x4_epi32::<0>(a); + a_decomp[1] = _mm512_extracti32x4_epi32::<1>(a); + a_decomp[2] = _mm512_extracti32x4_epi32::<2>(a); + a_decomp[3] = _mm512_extracti32x4_epi32::<3>(a); + let mut k_decomp = [_mm_setzero_si128(); 4]; + k_decomp[0] = _mm512_extracti32x4_epi32::<0>(k); + k_decomp[1] = _mm512_extracti32x4_epi32::<1>(k); + k_decomp[2] = _mm512_extracti32x4_epi32::<2>(k); + k_decomp[3] = _mm512_extracti32x4_epi32::<3>(k); + let r = vectorized(a, k); + let mut e_decomp = [_mm_setzero_si128(); 4]; + for i in 0..4 { + e_decomp[i] = linear(a_decomp[i], k_decomp[i]); + } + assert_eq_m128i(_mm512_extracti32x4_epi32::<0>(r), e_decomp[0]); + assert_eq_m128i(_mm512_extracti32x4_epi32::<1>(r), e_decomp[1]); + assert_eq_m128i(_mm512_extracti32x4_epi32::<2>(r), e_decomp[2]); + assert_eq_m128i(_mm512_extracti32x4_epi32::<3>(r), e_decomp[3]); + } + + #[simd_test(enable = "vaes,avx512f")] + unsafe fn test_mm512_aesdec_epi128() { + // Constants taken from https://msdn.microsoft.com/en-us/library/cc664949.aspx. + let (a, k) = setup_state_key_512(); + let e = _mm_set_epi64x(0x044e4f5176fec48f, 0xb57ecfa381da39ee); + let e = _mm512_broadcast_i32x4(e); + let r = _mm512_aesdec_epi128(a, k); + assert_eq_m512i(r, e); + + helper_for_512_vaes(_mm_aesdec_si128, _mm512_aesdec_epi128); + } + + #[simd_test(enable = "vaes,avx512f")] + unsafe fn test_mm512_aesdeclast_epi128() { + // Constants taken from https://msdn.microsoft.com/en-us/library/cc714178.aspx. + let (a, k) = setup_state_key_512(); + let e = _mm_set_epi64x(0x36cad57d9072bf9e, 0xf210dd981fa4a493); + let e = _mm512_broadcast_i32x4(e); + let r = _mm512_aesdeclast_epi128(a, k); + assert_eq_m512i(r, e); + + helper_for_512_vaes(_mm_aesdeclast_si128, _mm512_aesdeclast_epi128); + } + + #[simd_test(enable = "vaes,avx512f")] + unsafe fn test_mm512_aesenc_epi128() { + // Constants taken from https://msdn.microsoft.com/en-us/library/cc664810.aspx. + let (a, k) = setup_state_key_512(); + let e = _mm_set_epi64x(0x16ab0e57dfc442ed, 0x28e4ee1884504333); + let e = _mm512_broadcast_i32x4(e); + let r = _mm512_aesenc_epi128(a, k); + assert_eq_m512i(r, e); + + helper_for_512_vaes(_mm_aesenc_si128, _mm512_aesenc_epi128); + } + + #[simd_test(enable = "vaes,avx512f")] + unsafe fn test_mm512_aesenclast_epi128() { + // Constants taken from https://msdn.microsoft.com/en-us/library/cc714136.aspx. + let (a, k) = setup_state_key_512(); + let e = _mm_set_epi64x(0xb6dd7df25d7ab320, 0x4b04f98cf4c860f8); + let e = _mm512_broadcast_i32x4(e); + let r = _mm512_aesenclast_epi128(a, k); + assert_eq_m512i(r, e); + + helper_for_512_vaes(_mm_aesenclast_si128, _mm512_aesenclast_epi128); + } +} diff --git a/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs b/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs new file mode 100644 index 000000000..ea76708b8 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs @@ -0,0 +1,258 @@ +//! Vectorized Carry-less Multiplication (VCLMUL) +//! +//! The reference is [Intel 64 and IA-32 Architectures Software Developer's +//! Manual Volume 2: Instruction Set Reference, A-Z][intel64_ref] (p. 4-241). +//! +//! [intel64_ref]: http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf + +use crate::core_arch::x86::__m256i; +use crate::core_arch::x86::__m512i; + +#[cfg(test)] +use stdarch_test::assert_instr; + +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.x86.pclmulqdq.256"] + fn pclmulqdq_256(a: __m256i, round_key: __m256i, imm8: u8) -> __m256i; + #[link_name = "llvm.x86.pclmulqdq.512"] + fn pclmulqdq_512(a: __m512i, round_key: __m512i, imm8: u8) -> __m512i; +} + +// for some odd reason on x86_64 we generate the correct long name instructions +// but on i686 we generate the short name + imm8 +// so we need to special-case on that... + +/// Performs a carry-less multiplication of two 64-bit polynomials over the +/// finite field GF(2^k) - in each of the 4 128-bit lanes. +/// +/// The immediate byte is used for determining which halves of each lane `a` and `b` +/// should be used. Immediate bits other than 0 and 4 are ignored. +/// All lanes share immediate byte. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_clmulepi64_epi128) +#[inline] +#[target_feature(enable = "vpclmulqdq,avx512f")] +// technically according to Intel's documentation we don't need avx512f here, however LLVM gets confused otherwise +#[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn _mm512_clmulepi64_epi128(a: __m512i, b: __m512i) -> __m512i { + static_assert_imm8!(IMM8); + pclmulqdq_512(a, b, IMM8 as u8) +} + +/// Performs a carry-less multiplication of two 64-bit polynomials over the +/// finite field GF(2^k) - in each of the 2 128-bit lanes. +/// +/// The immediate byte is used for determining which halves of each lane `a` and `b` +/// should be used. Immediate bits other than 0 and 4 are ignored. +/// All lanes share immediate byte. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_clmulepi64_epi128) +#[inline] +#[target_feature(enable = "vpclmulqdq")] +#[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn _mm256_clmulepi64_epi128(a: __m256i, b: __m256i) -> __m256i { + static_assert_imm8!(IMM8); + pclmulqdq_256(a, b, IMM8 as u8) +} + +#[cfg(test)] +mod tests { + // The constants in the tests below are just bit patterns. They should not + // be interpreted as integers; signedness does not make sense for them, but + // __mXXXi happens to be defined in terms of signed integers. + #![allow(overflowing_literals)] + + use stdarch_test::simd_test; + + use crate::core_arch::x86::*; + + macro_rules! verify_kat_pclmul { + ($broadcast:ident, $clmul:ident, $assert:ident) => { + // Constants taken from https://software.intel.com/sites/default/files/managed/72/cc/clmul-wp-rev-2.02-2014-04-20.pdf + let a = _mm_set_epi64x(0x7b5b546573745665, 0x63746f725d53475d); + let a = $broadcast(a); + let b = _mm_set_epi64x(0x4869285368617929, 0x5b477565726f6e5d); + let b = $broadcast(b); + let r00 = _mm_set_epi64x(0x1d4d84c85c3440c0, 0x929633d5d36f0451); + let r00 = $broadcast(r00); + let r01 = _mm_set_epi64x(0x1bd17c8d556ab5a1, 0x7fa540ac2a281315); + let r01 = $broadcast(r01); + let r10 = _mm_set_epi64x(0x1a2bf6db3a30862f, 0xbabf262df4b7d5c9); + let r10 = $broadcast(r10); + let r11 = _mm_set_epi64x(0x1d1e1f2c592e7c45, 0xd66ee03e410fd4ed); + let r11 = $broadcast(r11); + + $assert($clmul::<0x00>(a, b), r00); + $assert($clmul::<0x10>(a, b), r01); + $assert($clmul::<0x01>(a, b), r10); + $assert($clmul::<0x11>(a, b), r11); + + let a0 = _mm_set_epi64x(0x0000000000000000, 0x8000000000000000); + let a0 = $broadcast(a0); + let r = _mm_set_epi64x(0x4000000000000000, 0x0000000000000000); + let r = $broadcast(r); + $assert($clmul::<0x00>(a0, a0), r); + } + } + + macro_rules! unroll { + ($target:ident[4] = $op:ident::<4>($source:ident);) => { + $target[3] = $op::<3>($source); + $target[2] = $op::<2>($source); + unroll! {$target[2] = $op::<2>($source);} + }; + ($target:ident[2] = $op:ident::<2>($source:ident);) => { + $target[1] = $op::<1>($source); + $target[0] = $op::<0>($source); + }; + (assert_eq_m128i($op:ident::<4>($vec_res:ident),$lin_res:ident[4]);) => { + assert_eq_m128i($op::<3>($vec_res), $lin_res[3]); + assert_eq_m128i($op::<2>($vec_res), $lin_res[2]); + unroll! {assert_eq_m128i($op::<2>($vec_res),$lin_res[2]);} + }; + (assert_eq_m128i($op:ident::<2>($vec_res:ident),$lin_res:ident[2]);) => { + assert_eq_m128i($op::<1>($vec_res), $lin_res[1]); + assert_eq_m128i($op::<0>($vec_res), $lin_res[0]); + }; + } + + // this function tests one of the possible 4 instances + // with different inputs across lanes + #[target_feature(enable = "vpclmulqdq,avx512f")] + unsafe fn verify_512_helper( + linear: unsafe fn(__m128i, __m128i) -> __m128i, + vectorized: unsafe fn(__m512i, __m512i) -> __m512i, + ) { + let a = _mm512_set_epi64( + 0xDCB4DB3657BF0B7D, + 0x18DB0601068EDD9F, + 0xB76B908233200DC5, + 0xE478235FA8E22D5E, + 0xAB05CFFA2621154C, + 0x1171B47A186174C9, + 0x8C6B6C0E7595CEC9, + 0xBE3E7D4934E961BD, + ); + let b = _mm512_set_epi64( + 0x672F6F105A94CEA7, + 0x8298B8FFCA5F829C, + 0xA3927047B3FB61D8, + 0x978093862CDE7187, + 0xB1927AB22F31D0EC, + 0xA9A5DA619BE4D7AF, + 0xCA2590F56884FDC6, + 0x19BE9F660038BDB5, + ); + + let mut a_decomp = [_mm_setzero_si128(); 4]; + unroll! {a_decomp[4] = _mm512_extracti32x4_epi32::<4>(a);} + let mut b_decomp = [_mm_setzero_si128(); 4]; + unroll! {b_decomp[4] = _mm512_extracti32x4_epi32::<4>(b);} + + let r = vectorized(a, b); + let mut e_decomp = [_mm_setzero_si128(); 4]; + for i in 0..4 { + e_decomp[i] = linear(a_decomp[i], b_decomp[i]); + } + unroll! {assert_eq_m128i(_mm512_extracti32x4_epi32::<4>(r),e_decomp[4]);} + } + + // this function tests one of the possible 4 instances + // with different inputs across lanes for the VL version + #[target_feature(enable = "vpclmulqdq,avx512vl")] + unsafe fn verify_256_helper( + linear: unsafe fn(__m128i, __m128i) -> __m128i, + vectorized: unsafe fn(__m256i, __m256i) -> __m256i, + ) { + let a = _mm512_set_epi64( + 0xDCB4DB3657BF0B7D, + 0x18DB0601068EDD9F, + 0xB76B908233200DC5, + 0xE478235FA8E22D5E, + 0xAB05CFFA2621154C, + 0x1171B47A186174C9, + 0x8C6B6C0E7595CEC9, + 0xBE3E7D4934E961BD, + ); + let b = _mm512_set_epi64( + 0x672F6F105A94CEA7, + 0x8298B8FFCA5F829C, + 0xA3927047B3FB61D8, + 0x978093862CDE7187, + 0xB1927AB22F31D0EC, + 0xA9A5DA619BE4D7AF, + 0xCA2590F56884FDC6, + 0x19BE9F660038BDB5, + ); + + let mut a_decomp = [_mm_setzero_si128(); 2]; + unroll! {a_decomp[2] = _mm512_extracti32x4_epi32::<2>(a);} + let mut b_decomp = [_mm_setzero_si128(); 2]; + unroll! {b_decomp[2] = _mm512_extracti32x4_epi32::<2>(b);} + + let r = vectorized( + _mm512_extracti64x4_epi64::<0>(a), + _mm512_extracti64x4_epi64::<0>(b), + ); + let mut e_decomp = [_mm_setzero_si128(); 2]; + for i in 0..2 { + e_decomp[i] = linear(a_decomp[i], b_decomp[i]); + } + unroll! {assert_eq_m128i(_mm256_extracti128_si256::<2>(r),e_decomp[2]);} + } + + #[simd_test(enable = "vpclmulqdq,avx512f")] + unsafe fn test_mm512_clmulepi64_epi128() { + verify_kat_pclmul!( + _mm512_broadcast_i32x4, + _mm512_clmulepi64_epi128, + assert_eq_m512i + ); + + verify_512_helper( + |a, b| _mm_clmulepi64_si128::<0x00>(a, b), + |a, b| _mm512_clmulepi64_epi128::<0x00>(a, b), + ); + verify_512_helper( + |a, b| _mm_clmulepi64_si128::<0x01>(a, b), + |a, b| _mm512_clmulepi64_epi128::<0x01>(a, b), + ); + verify_512_helper( + |a, b| _mm_clmulepi64_si128::<0x10>(a, b), + |a, b| _mm512_clmulepi64_epi128::<0x10>(a, b), + ); + verify_512_helper( + |a, b| _mm_clmulepi64_si128::<0x11>(a, b), + |a, b| _mm512_clmulepi64_epi128::<0x11>(a, b), + ); + } + + #[simd_test(enable = "vpclmulqdq,avx512vl")] + unsafe fn test_mm256_clmulepi64_epi128() { + verify_kat_pclmul!( + _mm256_broadcastsi128_si256, + _mm256_clmulepi64_epi128, + assert_eq_m256i + ); + + verify_256_helper( + |a, b| _mm_clmulepi64_si128::<0x00>(a, b), + |a, b| _mm256_clmulepi64_epi128::<0x00>(a, b), + ); + verify_256_helper( + |a, b| _mm_clmulepi64_si128::<0x01>(a, b), + |a, b| _mm256_clmulepi64_epi128::<0x01>(a, b), + ); + verify_256_helper( + |a, b| _mm_clmulepi64_si128::<0x10>(a, b), + |a, b| _mm256_clmulepi64_epi128::<0x10>(a, b), + ); + verify_256_helper( + |a, b| _mm_clmulepi64_si128::<0x11>(a, b), + |a, b| _mm256_clmulepi64_epi128::<0x11>(a, b), + ); + } +} diff --git a/library/stdarch/crates/core_arch/tests/cpu-detection.rs b/library/stdarch/crates/core_arch/tests/cpu-detection.rs index 61f5f0905..08caca738 100644 --- a/library/stdarch/crates/core_arch/tests/cpu-detection.rs +++ b/library/stdarch/crates/core_arch/tests/cpu-detection.rs @@ -31,12 +31,9 @@ fn x86_all() { is_x86_feature_detected!("avx512vpopcntdq") ); println!("avx512vbmi2 {:?}", is_x86_feature_detected!("avx512vbmi2")); - println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni")); - println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes")); - println!( - "avx512vpclmulqdq {:?}", - is_x86_feature_detected!("avx512vpclmulqdq") - ); + println!("gfni {:?}", is_x86_feature_detected!("gfni")); + println!("vaes {:?}", is_x86_feature_detected!("vaes")); + println!("vpclmulqdq {:?}", is_x86_feature_detected!("vpclmulqdq")); println!("avx512vnni {:?}", is_x86_feature_detected!("avx512vnni")); println!( "avx512bitalg {:?}", @@ -61,3 +58,15 @@ fn x86_all() { println!("xsaves: {:?}", is_x86_feature_detected!("xsaves")); println!("xsavec: {:?}", is_x86_feature_detected!("xsavec")); } + +#[test] +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +#[allow(deprecated)] +fn x86_deprecated() { + println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni")); + println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes")); + println!( + "avx512vpclmulqdq {:?}", + is_x86_feature_detected!("avx512vpclmulqdq") + ); +} diff --git a/library/stdarch/crates/intrinsic-test/Cargo.toml b/library/stdarch/crates/intrinsic-test/Cargo.toml index 5fde23c9e..7efbab755 100644 --- a/library/stdarch/crates/intrinsic-test/Cargo.toml +++ b/library/stdarch/crates/intrinsic-test/Cargo.toml @@ -2,7 +2,7 @@ name = "intrinsic-test" version = "0.1.0" authors = ["Jamie Cunliffe "] -edition = "2018" +edition = "2021" [dependencies] lazy_static = "1.4.0" @@ -14,4 +14,4 @@ log = "0.4.11" pretty_env_logger = "0.4.0" rayon = "1.5.0" diff = "0.1.12" -itertools = "0.10.1" \ No newline at end of file +itertools = "0.10.1" diff --git a/library/stdarch/crates/intrinsic-test/missing_arm.txt b/library/stdarch/crates/intrinsic-test/missing_arm.txt index bbc8de584..3d7ead062 100644 --- a/library/stdarch/crates/intrinsic-test/missing_arm.txt +++ b/library/stdarch/crates/intrinsic-test/missing_arm.txt @@ -163,17 +163,6 @@ vcaddq_rot270_f32 vcaddq_rot90_f32 vcadd_rot270_f32 vcadd_rot90_f32 -vcombine_f32 -vcombine_p16 -vcombine_p8 -vcombine_s16 -vcombine_s32 -vcombine_s64 -vcombine_s8 -vcombine_u16 -vcombine_u32 -vcombine_u64 -vcombine_u8 vcvtaq_s32_f32 vcvtaq_u32_f32 vcvta_s32_f32 diff --git a/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs b/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs index d7b066485..7336c9e8b 100644 --- a/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs +++ b/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs @@ -59,8 +59,8 @@ impl Into for ACLEIntrinsicLine { let signature = self.intrinsic; let (ret_ty, remaining) = signature.split_once(' ').unwrap(); - let results = type_from_c(ret_ty) - .unwrap_or_else(|_| panic!("Failed to parse return type: {}", ret_ty)); + let results = + type_from_c(ret_ty).unwrap_or_else(|_| panic!("Failed to parse return type: {ret_ty}")); let (name, args) = remaining.split_once('(').unwrap(); let args = args.trim_end_matches(')'); @@ -177,7 +177,7 @@ fn from_c(pos: usize, s: &str) -> Argument { Argument { pos, name, - ty: type_from_c(s).unwrap_or_else(|_| panic!("Failed to parse type: {}", s)), + ty: type_from_c(s).unwrap_or_else(|_| panic!("Failed to parse type: {s}")), constraints: vec![], } } diff --git a/library/stdarch/crates/intrinsic-test/src/intrinsic.rs b/library/stdarch/crates/intrinsic-test/src/intrinsic.rs index e0645a36b..fb4eb4cb7 100644 --- a/library/stdarch/crates/intrinsic-test/src/intrinsic.rs +++ b/library/stdarch/crates/intrinsic-test/src/intrinsic.rs @@ -109,7 +109,7 @@ impl Intrinsic { pub fn generate_loop_rust(&self, additional: &str, passes: u32) -> String { let constraints = self.arguments.as_constraint_parameters_rust(); let constraints = if !constraints.is_empty() { - format!("::<{}>", constraints) + format!("::<{constraints}>") } else { constraints }; diff --git a/library/stdarch/crates/intrinsic-test/src/main.rs b/library/stdarch/crates/intrinsic-test/src/main.rs index 43f2df08b..dac934574 100644 --- a/library/stdarch/crates/intrinsic-test/src/main.rs +++ b/library/stdarch/crates/intrinsic-test/src/main.rs @@ -58,7 +58,7 @@ fn gen_code_c( pass = gen_code_c( intrinsic, constraints, - format!("{}-{}", name, i), + format!("{name}-{i}"), p64_armv7_workaround ) ) @@ -117,7 +117,7 @@ int main(int argc, char **argv) {{ }}"#, header_files = header_files .iter() - .map(|header| format!("#include <{}>", header)) + .map(|header| format!("#include <{header}>")) .collect::>() .join("\n"), arglists = intrinsic.arguments.gen_arglists_c(PASSES), @@ -148,7 +148,7 @@ fn gen_code_rust(intrinsic: &Intrinsic, constraints: &[&Argument], name: String) name = current.name, ty = current.ty.rust_type(), val = i, - pass = gen_code_rust(intrinsic, constraints, format!("{}-{}", name, i)) + pass = gen_code_rust(intrinsic, constraints, format!("{name}-{i}")) ) }) .collect() @@ -237,7 +237,7 @@ fn build_rust(intrinsics: &Vec, toolchain: &str, a32: bool) -> bool { intrinsics.iter().for_each(|i| { let rust_dir = format!(r#"rust_programs/{}"#, i.name); let _ = std::fs::create_dir_all(&rust_dir); - let rust_filename = format!(r#"{}/main.rs"#, rust_dir); + let rust_filename = format!(r#"{rust_dir}/main.rs"#); let mut file = File::create(&rust_filename).unwrap(); let c_code = generate_rust_program(&i, a32); @@ -355,7 +355,7 @@ fn main() { let filename = matches.value_of("INPUT").unwrap(); let toolchain = matches .value_of("TOOLCHAIN") - .map_or("".into(), |t| format!("+{}", t)); + .map_or("".into(), |t| format!("+{t}")); let cpp_compiler = matches.value_of("CPPCOMPILER").unwrap(); let c_runner = matches.value_of("RUNNER").unwrap_or(""); @@ -443,7 +443,7 @@ fn compare_outputs(intrinsics: &Vec, toolchain: &str, runner: &str, a let (c, rust) = match (c, rust) { (Ok(c), Ok(rust)) => (c, rust), - a => panic!("{:#?}", a), + a => panic!("{a:#?}"), }; if !c.status.success() { @@ -480,20 +480,20 @@ fn compare_outputs(intrinsics: &Vec, toolchain: &str, runner: &str, a intrinsics.iter().for_each(|reason| match reason { FailureReason::Difference(intrinsic, c, rust) => { - println!("Difference for intrinsic: {}", intrinsic); + println!("Difference for intrinsic: {intrinsic}"); let diff = diff::lines(c, rust); diff.iter().for_each(|diff| match diff { - diff::Result::Left(c) => println!("C: {}", c), - diff::Result::Right(rust) => println!("Rust: {}", rust), + diff::Result::Left(c) => println!("C: {c}"), + diff::Result::Right(rust) => println!("Rust: {rust}"), diff::Result::Both(_, _) => (), }); println!("****************************************************************"); } FailureReason::RunC(intrinsic) => { - println!("Failed to run C program for intrinsic {}", intrinsic) + println!("Failed to run C program for intrinsic {intrinsic}") } FailureReason::RunRust(intrinsic) => { - println!("Failed to run rust program for intrinsic {}", intrinsic) + println!("Failed to run rust program for intrinsic {intrinsic}") } }); println!("{} differences found", intrinsics.len()); diff --git a/library/stdarch/crates/intrinsic-test/src/types.rs b/library/stdarch/crates/intrinsic-test/src/types.rs index dd23586e7..7442ad5e6 100644 --- a/library/stdarch/crates/intrinsic-test/src/types.rs +++ b/library/stdarch/crates/intrinsic-test/src/types.rs @@ -25,7 +25,7 @@ impl FromStr for TypeKind { "poly" => Ok(Self::Poly), "uint" | "unsigned" => Ok(Self::UInt), "void" => Ok(Self::Void), - _ => Err(format!("Impossible to parse argument kind {}", s)), + _ => Err(format!("Impossible to parse argument kind {s}")), } } } @@ -199,14 +199,14 @@ impl IntrinsicType { simd_len: Some(simd_len), vec_len: None, .. - } => format!("{}{}x{}_t", kind.c_prefix(), bit_len, simd_len), + } => format!("{}{bit_len}x{simd_len}_t", kind.c_prefix()), IntrinsicType::Type { kind, bit_len: Some(bit_len), simd_len: Some(simd_len), vec_len: Some(vec_len), .. - } => format!("{}{}x{}x{}_t", kind.c_prefix(), bit_len, simd_len, vec_len), + } => format!("{}{bit_len}x{simd_len}x{vec_len}_t", kind.c_prefix()), _ => todo!("{:#?}", self), } } @@ -220,7 +220,7 @@ impl IntrinsicType { simd_len: Some(simd_len), vec_len: Some(_), .. - } => format!("{}{}x{}_t", kind.c_prefix(), bit_len, simd_len), + } => format!("{}{bit_len}x{simd_len}_t", kind.c_prefix()), _ => unreachable!("Shouldn't be called on this type"), } } @@ -234,21 +234,21 @@ impl IntrinsicType { simd_len: None, vec_len: None, .. - } => format!("{}{}", kind.rust_prefix(), bit_len), + } => format!("{}{bit_len}", kind.rust_prefix()), IntrinsicType::Type { kind, bit_len: Some(bit_len), simd_len: Some(simd_len), vec_len: None, .. - } => format!("{}{}x{}_t", kind.c_prefix(), bit_len, simd_len), + } => format!("{}{bit_len}x{simd_len}_t", kind.c_prefix()), IntrinsicType::Type { kind, bit_len: Some(bit_len), simd_len: Some(simd_len), vec_len: Some(vec_len), .. - } => format!("{}{}x{}x{}_t", kind.c_prefix(), bit_len, simd_len, vec_len), + } => format!("{}{bit_len}x{simd_len}x{vec_len}_t", kind.c_prefix()), _ => todo!("{:#?}", self), } } diff --git a/library/stdarch/crates/intrinsic-test/src/values.rs b/library/stdarch/crates/intrinsic-test/src/values.rs index 64b4d9fc9..68dd30d44 100644 --- a/library/stdarch/crates/intrinsic-test/src/values.rs +++ b/library/stdarch/crates/intrinsic-test/src/values.rs @@ -13,7 +13,7 @@ pub fn value_for_array(bits: u32, index: u32) -> String { } else if bits == 64 { format!("{:#X}", VALUES_64[index % VALUES_64.len()]) } else { - panic!("Unknown size: {}", bits); + panic!("Unknown size: {bits}"); } } diff --git a/library/stdarch/crates/simd-test-macro/Cargo.toml b/library/stdarch/crates/simd-test-macro/Cargo.toml index c3ecf981e..cd110c1d3 100644 --- a/library/stdarch/crates/simd-test-macro/Cargo.toml +++ b/library/stdarch/crates/simd-test-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "simd-test-macro" version = "0.1.0" authors = ["Alex Crichton "] -edition = "2018" +edition = "2021" [lib] proc-macro = true diff --git a/library/stdarch/crates/simd-test-macro/src/lib.rs b/library/stdarch/crates/simd-test-macro/src/lib.rs index 9d81a4c5e..2a31dd745 100644 --- a/library/stdarch/crates/simd-test-macro/src/lib.rs +++ b/library/stdarch/crates/simd-test-macro/src/lib.rs @@ -59,7 +59,7 @@ pub fn simd_test( let macro_test = match target .split('-') .next() - .unwrap_or_else(|| panic!("target triple contained no \"-\": {}", target)) + .unwrap_or_else(|| panic!("target triple contained no \"-\": {target}")) { "i686" | "x86_64" | "i586" => "is_x86_feature_detected", "arm" | "armv7" => "is_arm_feature_detected", @@ -82,7 +82,7 @@ pub fn simd_test( force_test = true; "is_mips64_feature_detected" } - t => panic!("unknown target: {}", t), + t => panic!("unknown target: {t}"), }; let macro_test = Ident::new(macro_test, Span::call_site()); diff --git a/library/stdarch/crates/std_detect/Cargo.toml b/library/stdarch/crates/std_detect/Cargo.toml index 3a482564e..589a3900a 100644 --- a/library/stdarch/crates/std_detect/Cargo.toml +++ b/library/stdarch/crates/std_detect/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" keywords = ["std", "run-time", "feature", "detection"] categories = ["hardware-support"] license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" [badges] is-it-maintained-issue-resolution = { repository = "rust-lang/stdarch" } diff --git a/library/stdarch/crates/std_detect/src/detect/arch/x86.rs b/library/stdarch/crates/std_detect/src/detect/arch/x86.rs index 893e1a887..d0bf92d3e 100644 --- a/library/stdarch/crates/std_detect/src/detect/arch/x86.rs +++ b/library/stdarch/crates/std_detect/src/detect/arch/x86.rs @@ -68,9 +68,9 @@ features! { /// * `"avx512vbmi"` /// * `"avx512vpopcntdq"` /// * `"avx512vbmi2"` - /// * `"avx512gfni"` - /// * `"avx512vaes"` - /// * `"avx512vpclmulqdq"` + /// * `"gfni"` + /// * `"vaes"` + /// * `"vpclmulqdq"` /// * `"avx512vnni"` /// * `"avx512bitalg"` /// * `"avx512bf16"` @@ -95,6 +95,9 @@ features! { /// [docs]: https://software.intel.com/sites/landingpage/IntrinsicsGuide #[stable(feature = "simd_x86", since = "1.27.0")] @BIND_FEATURE_NAME: "abm"; "lzcnt"; // abm is a synonym for lzcnt + @BIND_FEATURE_NAME: "avx512gfni"; "gfni"; #[deprecated(since = "1.67.0", note = "the `avx512gfni` feature has been renamed to `gfni`")]; + @BIND_FEATURE_NAME: "avx512vaes"; "vaes"; #[deprecated(since = "1.67.0", note = "the `avx512vaes` feature has been renamed to `vaes`")]; + @BIND_FEATURE_NAME: "avx512vpclmulqdq"; "vpclmulqdq"; #[deprecated(since = "1.67.0", note = "the `avx512vpclmulqdq` feature has been renamed to `vpclmulqdq`")]; @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] aes: "aes"; /// AES (Advanced Encryption Standard New Instructions AES-NI) @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] pclmulqdq: "pclmulqdq"; @@ -150,11 +153,11 @@ features! { /// Quadword) @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vbmi2: "avx512vbmi2"; /// AVX-512 VBMI2 (Additional byte, word, dword and qword capabilities) - @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512gfni: "avx512gfni"; + @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] gfni: "gfni"; /// AVX-512 GFNI (Galois Field New Instruction) - @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vaes: "avx512vaes"; + @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] vaes: "vaes"; /// AVX-512 VAES (Vector AES instruction) - @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vpclmulqdq: "avx512vpclmulqdq"; + @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] vpclmulqdq: "vpclmulqdq"; /// AVX-512 VPCLMULQDQ (Vector PCLMULQDQ instructions) @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vnni: "avx512vnni"; /// AVX-512 VNNI (Vector Neural Network Instructions) diff --git a/library/stdarch/crates/std_detect/src/detect/cache.rs b/library/stdarch/crates/std_detect/src/detect/cache.rs index d01a5ea24..a94c655c3 100644 --- a/library/stdarch/crates/std_detect/src/detect/cache.rs +++ b/library/stdarch/crates/std_detect/src/detect/cache.rs @@ -179,7 +179,7 @@ fn detect_and_initialize() -> Initializer { /// the bit is set, the feature is enabled, and otherwise it is disabled. /// /// If the feature `std_detect_env_override` is enabled looks for the env -/// variable `RUST_STD_DETECT_UNSTABLE` and uses its its content to disable +/// variable `RUST_STD_DETECT_UNSTABLE` and uses its content to disable /// Features that would had been otherwise detected. #[inline] pub(crate) fn test(bit: u32) -> bool { diff --git a/library/stdarch/crates/std_detect/src/detect/macros.rs b/library/stdarch/crates/std_detect/src/detect/macros.rs index a467f9db6..45feec79f 100644 --- a/library/stdarch/crates/std_detect/src/detect/macros.rs +++ b/library/stdarch/crates/std_detect/src/detect/macros.rs @@ -17,7 +17,7 @@ macro_rules! features { @CFG: $cfg:meta; @MACRO_NAME: $macro_name:ident; @MACRO_ATTRS: $(#[$macro_attrs:meta])* - $(@BIND_FEATURE_NAME: $bind_feature:tt; $feature_impl:tt; )* + $(@BIND_FEATURE_NAME: $bind_feature:tt; $feature_impl:tt; $(#[$deprecate_attr:meta];)?)* $(@NO_RUNTIME_DETECTION: $nort_feature:tt; )* $(@FEATURE: #[$stability_attr:meta] $feature:ident: $feature_lit:tt; $(implied by target_features: [$($target_feature_lit:tt),*];)? @@ -35,7 +35,15 @@ macro_rules! features { }; )* $( - ($bind_feature) => { $crate::$macro_name!($feature_impl) }; + ($bind_feature) => { + { + $( + #[$deprecate_attr] macro_rules! deprecated_feature { {} => {}; } + deprecated_feature! {}; + )? + $crate::$macro_name!($feature_impl) + } + }; )* $( ($nort_feature) => { diff --git a/library/stdarch/crates/std_detect/src/detect/mod.rs b/library/stdarch/crates/std_detect/src/detect/mod.rs index 2bca84ca1..9a135c90a 100644 --- a/library/stdarch/crates/std_detect/src/detect/mod.rs +++ b/library/stdarch/crates/std_detect/src/detect/mod.rs @@ -47,7 +47,7 @@ cfg_if! { // On x86/x86_64 no OS specific functionality is required. #[path = "os/x86.rs"] mod os; - } else if #[cfg(all(target_os = "linux", feature = "libc"))] { + } else if #[cfg(all(any(target_os = "linux", target_os = "android"), feature = "libc"))] { #[path = "os/linux/mod.rs"] mod os; } else if #[cfg(all(target_os = "freebsd", feature = "libc"))] { diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs index 6c79ba86d..a75185d43 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs @@ -329,7 +329,7 @@ mod tests { env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv" ); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); println!("HWCAP : 0x{:0x}", v.hwcap); println!("HWCAP2: 0x{:0x}", v.hwcap2); @@ -341,7 +341,7 @@ mod tests { env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv" ); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); println!("HWCAP : 0x{:0x}", v.hwcap); println!("HWCAP2: 0x{:0x}", v.hwcap2); @@ -353,7 +353,7 @@ mod tests { env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-hwcap2-aarch64.auxv" ); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); println!("HWCAP : 0x{:0x}", v.hwcap); println!("HWCAP2: 0x{:0x}", v.hwcap2); diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs index c903903bd..d9e7b28ea 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs @@ -313,7 +313,7 @@ mod tests { #[test] fn linux_rpi3() { let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-rpi3.auxv"); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); assert_eq!(v.hwcap, 4174038); assert_eq!(v.hwcap2, 16); @@ -322,7 +322,7 @@ mod tests { #[test] fn linux_macos_vb() { let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv"); - println!("file: {}", file); + println!("file: {file}"); // The file contains HWCAP but not HWCAP2. In that case, we treat HWCAP2 as zero. let v = auxv_from_file(file).unwrap(); assert_eq!(v.hwcap, 126614527); @@ -332,7 +332,7 @@ mod tests { #[test] fn linux_artificial_aarch64() { let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-artificial-aarch64.auxv"); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); assert_eq!(v.hwcap, 0x0123456789abcdef); assert_eq!(v.hwcap2, 0x02468ace13579bdf); @@ -340,7 +340,7 @@ mod tests { #[test] fn linux_no_hwcap2_aarch64() { let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv"); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); // An absent HWCAP2 is treated as zero, and does not prevent acceptance of HWCAP. assert_ne!(v.hwcap, 0); diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs index 1ec06959a..91a85d58e 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs @@ -1,73 +1,73 @@ -//! Run-time feature detection for RISC-V on Linux. - -use super::auxvec; -use crate::detect::{bit, cache, Feature}; - -/// Read list of supported features from the auxiliary vector. -pub(crate) fn detect_features() -> cache::Initializer { - let mut value = cache::Initializer::default(); - let enable_feature = |value: &mut cache::Initializer, feature, enable| { - if enable { - value.set(feature as u32); - } - }; - let enable_features = |value: &mut cache::Initializer, feature_slice: &[Feature], enable| { - if enable { - for feature in feature_slice { - value.set(*feature as u32); - } - } - }; - - // The values are part of the platform-specific [asm/hwcap.h][hwcap] - // - // [hwcap]: https://github.com/torvalds/linux/blob/master/arch/riscv/include/asm/hwcap.h - let auxv = auxvec::auxv().expect("read auxvec"); // should not fail on RISC-V platform - enable_feature( - &mut value, - Feature::a, - bit::test(auxv.hwcap, (b'a' - b'a').into()), - ); - enable_feature( - &mut value, - Feature::c, - bit::test(auxv.hwcap, (b'c' - b'a').into()), - ); - enable_features( - &mut value, - &[Feature::d, Feature::f, Feature::zicsr], - bit::test(auxv.hwcap, (b'd' - b'a').into()), - ); - enable_features( - &mut value, - &[Feature::f, Feature::zicsr], - bit::test(auxv.hwcap, (b'f' - b'a').into()), - ); - let has_i = bit::test(auxv.hwcap, (b'i' - b'a').into()); - // If future RV128I is supported, implement with `enable_feature` here - #[cfg(target_pointer_width = "64")] - enable_feature(&mut value, Feature::rv64i, has_i); - #[cfg(target_pointer_width = "32")] - enable_feature(&mut value, Feature::rv32i, has_i); - #[cfg(target_pointer_width = "32")] - enable_feature( - &mut value, - Feature::rv32e, - bit::test(auxv.hwcap, (b'e' - b'a').into()), - ); - enable_feature( - &mut value, - Feature::h, - bit::test(auxv.hwcap, (b'h' - b'a').into()), - ); - enable_feature( - &mut value, - Feature::m, - bit::test(auxv.hwcap, (b'm' - b'a').into()), - ); - // FIXME: Auxvec does not show supervisor feature support, but this mode may be useful - // to detect when Rust is used to write Linux kernel modules. - // These should be more than Auxvec way to detect supervisor features. - - value -} +//! Run-time feature detection for RISC-V on Linux. + +use super::auxvec; +use crate::detect::{bit, cache, Feature}; + +/// Read list of supported features from the auxiliary vector. +pub(crate) fn detect_features() -> cache::Initializer { + let mut value = cache::Initializer::default(); + let enable_feature = |value: &mut cache::Initializer, feature, enable| { + if enable { + value.set(feature as u32); + } + }; + let enable_features = |value: &mut cache::Initializer, feature_slice: &[Feature], enable| { + if enable { + for feature in feature_slice { + value.set(*feature as u32); + } + } + }; + + // The values are part of the platform-specific [asm/hwcap.h][hwcap] + // + // [hwcap]: https://github.com/torvalds/linux/blob/master/arch/riscv/include/asm/hwcap.h + let auxv = auxvec::auxv().expect("read auxvec"); // should not fail on RISC-V platform + enable_feature( + &mut value, + Feature::a, + bit::test(auxv.hwcap, (b'a' - b'a').into()), + ); + enable_feature( + &mut value, + Feature::c, + bit::test(auxv.hwcap, (b'c' - b'a').into()), + ); + enable_features( + &mut value, + &[Feature::d, Feature::f, Feature::zicsr], + bit::test(auxv.hwcap, (b'd' - b'a').into()), + ); + enable_features( + &mut value, + &[Feature::f, Feature::zicsr], + bit::test(auxv.hwcap, (b'f' - b'a').into()), + ); + let has_i = bit::test(auxv.hwcap, (b'i' - b'a').into()); + // If future RV128I is supported, implement with `enable_feature` here + #[cfg(target_pointer_width = "64")] + enable_feature(&mut value, Feature::rv64i, has_i); + #[cfg(target_pointer_width = "32")] + enable_feature(&mut value, Feature::rv32i, has_i); + #[cfg(target_pointer_width = "32")] + enable_feature( + &mut value, + Feature::rv32e, + bit::test(auxv.hwcap, (b'e' - b'a').into()), + ); + enable_feature( + &mut value, + Feature::h, + bit::test(auxv.hwcap, (b'h' - b'a').into()), + ); + enable_feature( + &mut value, + Feature::m, + bit::test(auxv.hwcap, (b'm' - b'a').into()), + ); + // FIXME: Auxvec does not show supervisor feature support, but this mode may be useful + // to detect when Rust is used to write Linux kernel modules. + // These should be more than Auxvec way to detect supervisor features. + + value +} diff --git a/library/stdarch/crates/std_detect/src/detect/os/x86.rs b/library/stdarch/crates/std_detect/src/detect/os/x86.rs index ea5f595ec..08f48cd17 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/x86.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/x86.rs @@ -211,10 +211,10 @@ pub(crate) fn detect_features() -> cache::Initializer { enable(extended_features_ecx, 1, Feature::avx512vbmi); enable(extended_features_ecx, 5, Feature::avx512bf16); enable(extended_features_ecx, 6, Feature::avx512vbmi2); - enable(extended_features_ecx, 8, Feature::avx512gfni); + enable(extended_features_ecx, 8, Feature::gfni); enable(extended_features_ecx, 8, Feature::avx512vp2intersect); - enable(extended_features_ecx, 9, Feature::avx512vaes); - enable(extended_features_ecx, 10, Feature::avx512vpclmulqdq); + enable(extended_features_ecx, 9, Feature::vaes); + enable(extended_features_ecx, 10, Feature::vpclmulqdq); enable(extended_features_ecx, 11, Feature::avx512vnni); enable(extended_features_ecx, 12, Feature::avx512bitalg); enable(extended_features_ecx, 14, Feature::avx512vpopcntdq); diff --git a/library/stdarch/crates/std_detect/tests/cpu-detection.rs b/library/stdarch/crates/std_detect/tests/cpu-detection.rs index ca8bf28f4..02ad77a63 100644 --- a/library/stdarch/crates/std_detect/tests/cpu-detection.rs +++ b/library/stdarch/crates/std_detect/tests/cpu-detection.rs @@ -15,7 +15,7 @@ extern crate std_detect; #[test] fn all() { for (f, e) in std_detect::detect::features() { - println!("{}: {}", f, e); + println!("{f}: {e}"); } } @@ -132,12 +132,9 @@ fn x86_all() { is_x86_feature_detected!("avx512vpopcntdq") ); println!("avx512vbmi2 {:?}", is_x86_feature_detected!("avx512vbmi2")); - println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni")); - println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes")); - println!( - "avx512vpclmulqdq {:?}", - is_x86_feature_detected!("avx512vpclmulqdq") - ); + println!("gfni {:?}", is_x86_feature_detected!("gfni")); + println!("vaes {:?}", is_x86_feature_detected!("vaes")); + println!("vpclmulqdq {:?}", is_x86_feature_detected!("vpclmulqdq")); println!("avx512vnni {:?}", is_x86_feature_detected!("avx512vnni")); println!( "avx512bitalg {:?}", diff --git a/library/stdarch/crates/std_detect/tests/x86-specific.rs b/library/stdarch/crates/std_detect/tests/x86-specific.rs index 59e9a62fd..e481620c7 100644 --- a/library/stdarch/crates/std_detect/tests/x86-specific.rs +++ b/library/stdarch/crates/std_detect/tests/x86-specific.rs @@ -36,12 +36,9 @@ fn dump() { is_x86_feature_detected!("avx512vpopcntdq") ); println!("avx512vbmi2 {:?}", is_x86_feature_detected!("avx512vbmi2")); - println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni")); - println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes")); - println!( - "avx512vpclmulqdq {:?}", - is_x86_feature_detected!("avx512vpclmulqdq") - ); + println!("gfni {:?}", is_x86_feature_detected!("gfni")); + println!("vaes {:?}", is_x86_feature_detected!("vaes")); + println!("vpclmulqdq {:?}", is_x86_feature_detected!("vpclmulqdq")); println!("avx512vnni {:?}", is_x86_feature_detected!("avx512vnni")); println!( "avx512bitalg {:?}", diff --git a/library/stdarch/crates/stdarch-gen/Cargo.toml b/library/stdarch/crates/stdarch-gen/Cargo.toml index b339672f4..a5d19b199 100644 --- a/library/stdarch/crates/stdarch-gen/Cargo.toml +++ b/library/stdarch/crates/stdarch-gen/Cargo.toml @@ -2,7 +2,7 @@ name = "stdarch-gen" version = "0.1.0" authors = ["Heinz Gies "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/library/stdarch/crates/stdarch-gen/src/main.rs b/library/stdarch/crates/stdarch-gen/src/main.rs index d2f865753..750e88091 100644 --- a/library/stdarch/crates/stdarch-gen/src/main.rs +++ b/library/stdarch/crates/stdarch-gen/src/main.rs @@ -59,7 +59,7 @@ fn type_len(t: &str) -> usize { "4_" => 4, "8_" => 8, "16" => 16, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } else if s.len() == 3 { s[1].parse::().unwrap() * type_sub_len(t) @@ -77,7 +77,7 @@ fn type_sub_len(t: &str) -> usize { "2_t" => 2, "3_t" => 3, "4_t" => 4, - _ => panic!("unknown type len: {}", t), + _ => panic!("unknown type len: {t}"), } } } @@ -92,7 +92,7 @@ fn type_bits(t: &str) -> usize { | "float32x4_t" | "f32" => 32, "int64x1_t" | "int64x2_t" | "uint64x1_t" | "uint64x2_t" | "poly64x1_t" | "poly64x2_t" | "i64" | "u64" | "float64x1_t" | "float64x2_t" | "f64" => 64, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -105,7 +105,7 @@ fn type_exp_len(t: &str, base_len: usize) -> usize { 4 => 2, 8 => 3, 16 => 4, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -118,7 +118,7 @@ fn type_bits_exp_len(t: &str) -> usize { "int32x2_t" | "int32x4_t" | "uint32x2_t" | "uint32x4_t" | "i32" | "u32" => 5, "int64x1_t" | "int64x2_t" | "uint64x1_t" | "uint64x2_t" | "poly64x1_t" | "poly64x2_t" | "i64" | "u64" => 6, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -243,7 +243,7 @@ fn type_to_suffix(t: &str) -> &str { "p8" => "b_p8", "p16" => "h_p16", "p128" => "q_p128", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -297,7 +297,7 @@ fn type_to_n_suffix(t: &str) -> &str { "u16" => "h_n_u16", "u32" => "s_n_u32", "u64" => "d_n_u64", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -325,7 +325,7 @@ fn type_to_noq_n_suffix(t: &str) -> &str { "u16" => "h_n_u16", "u32" => "s_n_u32", "u64" => "d_n_u64", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -354,7 +354,7 @@ fn type_to_rot_suffix(c_name: &str, suf: &str) -> String { if suf.starts_with("q") { format!("{}q_{}{}", ns[0], ns[1], &suf[1..]) } else { - format!("{}{}", c_name, suf) + format!("{c_name}{suf}") } } @@ -426,7 +426,7 @@ fn type_to_noq_suffix(t: &str) -> &str { "poly16x4_t" | "poly16x8_t" => "_p16", "poly64x1_t" | "poly64x2_t" | "p64" => "_p64", "p128" => "_p128", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -521,7 +521,7 @@ fn type_to_global_type(t: &str) -> &str { "p16" => "p16", "p64" => "p64", "p128" => "p128", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -530,7 +530,7 @@ fn type_to_sub_type(t: &str) -> String { match s.len() { 2 => String::from(t), 3 => format!("{}x{}_t", s[0], s[1]), - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -547,9 +547,9 @@ fn type_to_native_type(t: &str) -> String { "uin" => format!("u{}", &s[0][4..]), "flo" => format!("f{}", &s[0][5..]), "pol" => format!("u{}", &s[0][4..]), - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), }, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -566,7 +566,7 @@ fn native_type_to_type(t: &str) -> &str { "f16" => "float16x4_t", "f32" => "float32x2_t", "f64" => "float64x1_t", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -583,7 +583,7 @@ fn native_type_to_long_type(t: &str) -> &str { "f16" => "float16x8_t", "f32" => "float32x4_t", "f64" => "float64x2_t", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -601,7 +601,7 @@ fn type_to_half(t: &str) -> &str { "poly16x8_t" => "poly16x4_t", "float32x4_t" => "float32x2_t", "float64x2_t" => "float64x1_t", - _ => panic!("unknown half type for {}", t), + _ => panic!("unknown half type for {t}"), } } @@ -624,7 +624,7 @@ fn transpose1(x: usize) -> &'static str { 4 => "[0, 4, 2, 6]", 8 => "[0, 8, 2, 10, 4, 12, 6, 14]", 16 => "[0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]", - _ => panic!("unknown transpose order of len {}", x), + _ => panic!("unknown transpose order of len {x}"), } } @@ -634,7 +634,7 @@ fn transpose2(x: usize) -> &'static str { 4 => "[1, 5, 3, 7]", 8 => "[1, 9, 3, 11, 5, 13, 7, 15]", 16 => "[1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]", - _ => panic!("unknown transpose order of len {}", x), + _ => panic!("unknown transpose order of len {x}"), } } @@ -644,7 +644,7 @@ fn zip1(x: usize) -> &'static str { 4 => "[0, 4, 1, 5]", 8 => "[0, 8, 1, 9, 2, 10, 3, 11]", 16 => "[0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]", - _ => panic!("unknown zip order of len {}", x), + _ => panic!("unknown zip order of len {x}"), } } @@ -654,7 +654,7 @@ fn zip2(x: usize) -> &'static str { 4 => "[2, 6, 3, 7]", 8 => "[4, 12, 5, 13, 6, 14, 7, 15]", 16 => "[8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]", - _ => panic!("unknown zip order of len {}", x), + _ => panic!("unknown zip order of len {x}"), } } @@ -664,7 +664,7 @@ fn unzip1(x: usize) -> &'static str { 4 => "[0, 2, 4, 6]", 8 => "[0, 2, 4, 6, 8, 10, 12, 14]", 16 => "[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]", - _ => panic!("unknown unzip order of len {}", x), + _ => panic!("unknown unzip order of len {x}"), } } @@ -674,13 +674,13 @@ fn unzip2(x: usize) -> &'static str { 4 => "[1, 3, 5, 7]", 8 => "[1, 3, 5, 7, 9, 11, 13, 15]", 16 => "[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]", - _ => panic!("unknown unzip order of len {}", x), + _ => panic!("unknown unzip order of len {x}"), } } fn values(t: &str, vs: &[String]) -> String { if vs.len() == 1 && !t.contains('x') { - format!(": {} = {}", t, vs[0]) + format!(": {t} = {}", vs[0]) } else if vs.len() == 1 && type_to_global_type(t) == "f64" { format!(": {} = {}", type_to_global_type(t), vs[0]) } else { @@ -723,7 +723,7 @@ fn max_val(t: &str) -> &'static str { "i64" => "0x7F_FF_FF_FF_FF_FF_FF_FF", "f32" => "3.40282347e+38", "f64" => "1.7976931348623157e+308", - _ => panic!("No TRUE for type {}", t), + _ => panic!("No TRUE for type {t}"), } } @@ -739,7 +739,7 @@ fn min_val(t: &str) -> &'static str { "i64" => "-9223372036854775808", "f32" => "-3.40282347e+38", "f64" => "-1.7976931348623157e+308", - _ => panic!("No TRUE for type {}", t), + _ => panic!("No TRUE for type {t}"), } } @@ -749,7 +749,7 @@ fn true_val(t: &str) -> &'static str { "u16" => "0xFF_FF", "u32" => "0xFF_FF_FF_FF", "u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF", - _ => panic!("No TRUE for type {}", t), + _ => panic!("No TRUE for type {t}"), } } @@ -763,7 +763,7 @@ fn ff_val(t: &str) -> &'static str { "i16" => "0xFF_FF", "i32" => "0xFF_FF_FF_FF", "i64" => "0xFF_FF_FF_FF_FF_FF_FF_FF", - _ => panic!("No TRUE for type {}", t), + _ => panic!("No TRUE for type {t}"), } } @@ -784,7 +784,7 @@ fn bits(t: &str) -> &'static str { "p8x" => "8", "p16" => "16", "p64" => "64", - _ => panic!("Unknown bits for type {}", t), + _ => panic!("Unknown bits for type {t}"), } } @@ -801,7 +801,7 @@ fn bits_minus_one(t: &str) -> &'static str { "p8x" => "7", "p16" => "15", "p64" => "63", - _ => panic!("Unknown bits for type {}", t), + _ => panic!("Unknown bits for type {t}"), } } @@ -818,7 +818,7 @@ fn half_bits(t: &str) -> &'static str { "p8x" => "4", "p16" => "8", "p64" => "32", - _ => panic!("Unknown bits for type {}", t), + _ => panic!("Unknown bits for type {t}"), } } @@ -852,7 +852,7 @@ fn type_len_str(t: &str) -> &'static str { "poly16x8_t" => "8", "poly64x1_t" => "1", "poly64x2_t" => "2", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -886,7 +886,7 @@ fn type_len_minus_one_str(t: &str) -> &'static str { "poly16x8_t" => "7", "poly64x1_t" => "0", "poly64x2_t" => "1", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -920,7 +920,7 @@ fn type_half_len_str(t: &str) -> &'static str { "poly16x8_t" => "4", "poly64x1_t" => "0", "poly64x2_t" => "1", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -954,12 +954,12 @@ fn type_to_ext(t: &str, v: bool, r: bool, pi8: bool) -> String { native ), _ if pi8 => format!(".p0i8"), - _ => format!(".p0{}", native), + _ => format!(".p0{native}"), }; let sub_type = match &native[0..1] { "i" | "f" => native, "u" => native.replace("u", "i"), - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), }; let ext = format!( "v{}{}{}", @@ -1041,8 +1041,8 @@ fn gen_aarch64( fn_type: Fntype, ) -> (String, String) { let name = match suffix { - Normal => format!("{}{}", current_name, type_to_suffix(in_t[1])), - NoQ => format!("{}{}", current_name, type_to_noq_suffix(in_t[1])), + Normal => format!("{current_name}{}", type_to_suffix(in_t[1])), + NoQ => format!("{current_name}{}", type_to_noq_suffix(in_t[1])), Double => format!( "{}{}", current_name, @@ -1053,15 +1053,15 @@ fn gen_aarch64( current_name, type_to_noq_double_suffixes(out_t, in_t[1]) ), - NSuffix => format!("{}{}", current_name, type_to_n_suffix(in_t[1])), + NSuffix => format!("{current_name}{}", type_to_n_suffix(in_t[1])), DoubleN => format!( "{}{}", current_name, type_to_double_n_suffixes(out_t, in_t[1]) ), - NoQNSuffix => format!("{}{}", current_name, type_to_noq_n_suffix(in_t[1])), - OutSuffix => format!("{}{}", current_name, type_to_suffix(out_t)), - OutNSuffix => format!("{}{}", current_name, type_to_n_suffix(out_t)), + NoQNSuffix => format!("{current_name}{}", type_to_noq_n_suffix(in_t[1])), + OutSuffix => format!("{current_name}{}", type_to_suffix(out_t)), + OutNSuffix => format!("{current_name}{}", type_to_n_suffix(out_t)), OutNox => format!( "{}{}", current_name, @@ -1092,7 +1092,7 @@ fn gen_aarch64( current_name, type_to_lane_suffixes(out_t, in_t[1], false) ), - In2 => format!("{}{}", current_name, type_to_suffix(in_t[2])), + In2 => format!("{current_name}{}", type_to_suffix(in_t[2])), In2Lane => format!( "{}{}", current_name, @@ -1122,11 +1122,11 @@ fn gen_aarch64( }; let current_fn = if let Some(current_fn) = current_fn.clone() { if link_aarch64.is_some() { - panic!("[{}] Can't specify link and fn at the same time.", name) + panic!("[{name}] Can't specify link and fn at the same time.") } current_fn } else if link_aarch64.is_some() { - format!("{}_", name) + format!("{name}_") } else { if multi_fn.is_empty() { panic!( @@ -1174,8 +1174,8 @@ fn gen_aarch64( let sub = type_to_sub_type(in_t[1]); ( match type_sub_len(in_t[1]) { - 1 => format!("a: {}, n: i64, ptr: {}", sub, ptr_type), - 2 => format!("a: {}, b: {}, n: i64, ptr: {}", sub, sub, ptr_type), + 1 => format!("a: {sub}, n: i64, ptr: {ptr_type}"), + 2 => format!("a: {sub}, b: {sub}, n: i64, ptr: {ptr_type}"), 3 => format!( "a: {}, b: {}, c: {}, n: i64, ptr: {}", sub, sub, sub, ptr_type @@ -1187,7 +1187,7 @@ fn gen_aarch64( _ => panic!("unsupported type: {}", in_t[1]), }, if out_t != "void" { - format!(" -> {}", out_t) + format!(" -> {out_t}") } else { String::new() }, @@ -1200,7 +1200,7 @@ fn gen_aarch64( 3 => format!("a: {}, b: {}, c: {}, n: i32", in_t[0], in_t[1], in_t[2]), _ => unimplemented!("unknown para_num"), }, - format!(" -> {}", out_t), + format!(" -> {out_t}"), ) } } else if matches!(fn_type, Fntype::Store) { @@ -1211,23 +1211,20 @@ fn gen_aarch64( type_to_native_type(in_t[1]) }; let subs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub), - 2 => format!("a: {}, b: {}", sub, sub), - 3 => format!("a: {}, b: {}, c: {}", sub, sub, sub), - 4 => format!("a: {}, b: {}, c: {}, d: {}", sub, sub, sub, sub), + 1 => format!("a: {sub}"), + 2 => format!("a: {sub}, b: {sub}"), + 3 => format!("a: {sub}, b: {sub}, c: {sub}"), + 4 => format!("a: {sub}, b: {sub}, c: {sub}, d: {sub}"), _ => panic!("unsupported type: {}", in_t[1]), }; - (format!("{}, ptr: *mut {}", subs, ptr_type), String::new()) + (format!("{subs}, ptr: *mut {ptr_type}"), String::new()) } else if is_vldx(&name) { let ptr_type = if name.contains("dup") { type_to_native_type(out_t) } else { type_to_sub_type(out_t) }; - ( - format!("ptr: *const {}", ptr_type), - format!(" -> {}", out_t), - ) + (format!("ptr: *const {ptr_type}"), format!(" -> {out_t}")) } else { ( match para_num { @@ -1256,7 +1253,7 @@ fn gen_aarch64( assert_eq!(constns.len(), 2); format!(r#""#, constns[0], constns[1]) } else { - format!(r#""#, constn) + format!(r#""#) } } else { String::new() @@ -1314,7 +1311,7 @@ fn gen_aarch64( para_num + 1 ) } else { - format!("\n#[rustc_legacy_const_generics({})]", para_num) + format!("\n#[rustc_legacy_const_generics({para_num})]") } } else { String::new() @@ -1323,7 +1320,7 @@ fn gen_aarch64( let fn_output = if out_t == "void" { String::new() } else { - format!("-> {} ", out_t) + format!("-> {out_t} ") }; let fn_inputs = match para_num { 1 => format!("(a: {})", in_t[0]), @@ -1373,14 +1370,14 @@ fn gen_aarch64( } else if link_aarch64.is_some() && matches!(fn_type, Fntype::Store) { let cast = if is_vstx(&name) { " as _" } else { "" }; match type_sub_len(in_t[1]) { - 1 => format!(r#"{}{}(b, a{})"#, ext_c, current_fn, cast), - 2 => format!(r#"{}{}(b.0, b.1, a{})"#, ext_c, current_fn, cast), - 3 => format!(r#"{}{}(b.0, b.1, b.2, a{})"#, ext_c, current_fn, cast), - 4 => format!(r#"{}{}(b.0, b.1, b.2, b.3, a{})"#, ext_c, current_fn, cast), + 1 => format!(r#"{ext_c}{current_fn}(b, a{cast})"#), + 2 => format!(r#"{ext_c}{current_fn}(b.0, b.1, a{cast})"#), + 3 => format!(r#"{ext_c}{current_fn}(b.0, b.1, b.2, a{cast})"#), + 4 => format!(r#"{ext_c}{current_fn}(b.0, b.1, b.2, b.3, a{cast})"#), _ => panic!("unsupported type: {}", in_t[1]), } } else if link_aarch64.is_some() && is_vldx(&name) { - format!(r#"{}{}(a as _)"#, ext_c, current_fn,) + format!(r#"{ext_c}{current_fn}(a as _)"#,) } else { let trans: [&str; 2] = if link_t[3] != out_t { ["transmute(", ")"] @@ -1388,7 +1385,7 @@ fn gen_aarch64( ["", ""] }; match (multi_calls.len(), para_num, fixed.len()) { - (0, 1, 0) => format!(r#"{}{}{}(a){}"#, ext_c, trans[0], current_fn, trans[1]), + (0, 1, 0) => format!(r#"{ext_c}{}{current_fn}(a){}"#, trans[0], trans[1]), (0, 1, _) => { let fixed: Vec = fixed.iter().take(type_len(in_t[0])).cloned().collect(); @@ -1402,11 +1399,11 @@ fn gen_aarch64( trans[1], ) } - (0, 2, _) => format!(r#"{}{}{}(a, b){}"#, ext_c, trans[0], current_fn, trans[1],), - (0, 3, _) => format!(r#"{}{}(a, b, c)"#, ext_c, current_fn,), - (_, 1, _) => format!(r#"{}{}"#, ext_c, multi_calls,), - (_, 2, _) => format!(r#"{}{}"#, ext_c, multi_calls,), - (_, 3, _) => format!(r#"{}{}"#, ext_c, multi_calls,), + (0, 2, _) => format!(r#"{ext_c}{}{current_fn}(a, b){}"#, trans[0], trans[1],), + (0, 3, _) => format!(r#"{ext_c}{current_fn}(a, b, c)"#,), + (_, 1, _) => format!(r#"{ext_c}{multi_calls}"#,), + (_, 2, _) => format!(r#"{ext_c}{multi_calls}"#,), + (_, 3, _) => format!(r#"{ext_c}{multi_calls}"#,), (_, _, _) => String::new(), } } @@ -1768,8 +1765,8 @@ fn gen_arm( separate: bool, ) -> (String, String) { let name = match suffix { - Normal => format!("{}{}", current_name, type_to_suffix(in_t[1])), - NoQ => format!("{}{}", current_name, type_to_noq_suffix(in_t[1])), + Normal => format!("{current_name}{}", type_to_suffix(in_t[1])), + NoQ => format!("{current_name}{}", type_to_noq_suffix(in_t[1])), Double => format!( "{}{}", current_name, @@ -1780,15 +1777,15 @@ fn gen_arm( current_name, type_to_noq_double_suffixes(out_t, in_t[1]) ), - NSuffix => format!("{}{}", current_name, type_to_n_suffix(in_t[1])), + NSuffix => format!("{current_name}{}", type_to_n_suffix(in_t[1])), DoubleN => format!( "{}{}", current_name, type_to_double_n_suffixes(out_t, in_t[1]) ), - NoQNSuffix => format!("{}{}", current_name, type_to_noq_n_suffix(in_t[1])), - OutSuffix => format!("{}{}", current_name, type_to_suffix(out_t)), - OutNSuffix => format!("{}{}", current_name, type_to_n_suffix(out_t)), + NoQNSuffix => format!("{current_name}{}", type_to_noq_n_suffix(in_t[1])), + OutSuffix => format!("{current_name}{}", type_to_suffix(out_t)), + OutNSuffix => format!("{current_name}{}", type_to_n_suffix(out_t)), OutNox => format!( "{}{}", current_name, @@ -1819,7 +1816,7 @@ fn gen_arm( current_name, type_to_lane_suffixes(out_t, in_t[1], false) ), - In2 => format!("{}{}", current_name, type_to_suffix(in_t[2])), + In2 => format!("{current_name}{}", type_to_suffix(in_t[2])), In2Lane => format!( "{}{}", current_name, @@ -1873,7 +1870,7 @@ fn gen_arm( } current_fn } else if link_aarch64.is_some() || link_arm.is_some() { - format!("{}_", name) + format!("{name}_") } else { if multi_fn.is_empty() { panic!( @@ -1980,9 +1977,9 @@ fn gen_arm( }; let sub_type = type_to_sub_type(in_t[1]); let inputs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub_type), - 2 => format!("a: {}, b: {}", sub_type, sub_type,), - 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), + 1 => format!("a: {sub_type}"), + 2 => format!("a: {sub_type}, b: {sub_type}",), + 3 => format!("a: {sub_type}, b: {sub_type}, c: {sub_type}",), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, @@ -1992,12 +1989,9 @@ fn gen_arm( let out = if out_t == "void" { String::new() } else { - format!(" -> {}", out_t) + format!(" -> {out_t}") }; - ( - format!("ptr: {}, {}, n: i32, size: i32", ptr_type, inputs), - out, - ) + (format!("ptr: {ptr_type}, {inputs}, n: i32, size: i32"), out) } else { let (_, const_type) = if const_arm.contains(":") { let consts: Vec<_> = @@ -2011,15 +2005,15 @@ fn gen_arm( }; ( match para_num { - 1 => format!("a: {}, n: {}", in_t[0], const_type), - 2 => format!("a: {}, b: {}, n: {}", in_t[0], in_t[1], const_type), + 1 => format!("a: {}, n: {const_type}", in_t[0]), + 2 => format!("a: {}, b: {}, n: {const_type}", in_t[0], in_t[1]), 3 => format!( - "a: {}, b: {}, c: {}, n: {}", - in_t[0], in_t[1], in_t[2], const_type + "a: {}, b: {}, c: {}, n: {const_type}", + in_t[0], in_t[1], in_t[2] ), _ => unimplemented!("unknown para_num"), }, - format!(" -> {}", out_t), + format!(" -> {out_t}"), ) } } else if out_t != link_arm_t[3] { @@ -2038,9 +2032,9 @@ fn gen_arm( } else if matches!(fn_type, Fntype::Store) { let sub_type = type_to_sub_type(in_t[1]); let inputs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub_type), - 2 => format!("a: {}, b: {}", sub_type, sub_type,), - 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), + 1 => format!("a: {sub_type}"), + 2 => format!("a: {sub_type}, b: {sub_type}",), + 3 => format!("a: {sub_type}, b: {sub_type}, c: {sub_type}",), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, @@ -2053,14 +2047,11 @@ fn gen_arm( (type_to_native_type(in_t[1]), "") }; ( - format!("ptr: *mut {}, {}{}", ptr_type, inputs, size), + format!("ptr: *mut {ptr_type}, {inputs}{size}"), String::new(), ) } else if is_vldx(&name) { - ( - format!("ptr: *const i8, size: i32"), - format!(" -> {}", out_t), - ) + (format!("ptr: *const i8, size: i32"), format!(" -> {out_t}")) } else { (String::new(), String::new()) } @@ -2084,20 +2075,20 @@ fn gen_arm( }; let sub_type = type_to_sub_type(in_t[1]); let mut inputs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub_type,), - 2 => format!("a: {}, b: {}", sub_type, sub_type,), - 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), + 1 => format!("a: {sub_type}",), + 2 => format!("a: {sub_type}, b: {sub_type}",), + 3 => format!("a: {sub_type}, b: {sub_type}, c: {sub_type}",), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, ), _ => panic!("unknown type: {}", in_t[1]), }; - inputs.push_str(&format!(", n: i64, ptr: {}", ptr_type)); + inputs.push_str(&format!(", n: i64, ptr: {ptr_type}")); let out = if out_t == "void" { String::new() } else { - format!(" -> {}", out_t) + format!(" -> {out_t}") }; (inputs, out) } else if const_aarch64.contains("dup-in_len-N as ttn") { @@ -2111,7 +2102,7 @@ fn gen_arm( ), _ => unimplemented!("unknown para_num"), }, - format!(" -> {}", out_t), + format!(" -> {out_t}"), ) } else { ( @@ -2121,7 +2112,7 @@ fn gen_arm( 3 => format!("a: {}, b: {}, c: {}, n: i32", in_t[0], in_t[1], in_t[2]), _ => unimplemented!("unknown para_num"), }, - format!(" -> {}", out_t), + format!(" -> {out_t}"), ) } } else if out_t != link_aarch64_t[3] { @@ -2140,9 +2131,9 @@ fn gen_arm( } else if matches!(fn_type, Fntype::Store) { let sub_type = type_to_sub_type(in_t[1]); let mut inputs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub_type,), - 2 => format!("a: {}, b: {}", sub_type, sub_type,), - 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), + 1 => format!("a: {sub_type}",), + 2 => format!("a: {sub_type}, b: {sub_type}",), + 3 => format!("a: {sub_type}, b: {sub_type}, c: {sub_type}",), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, @@ -2154,7 +2145,7 @@ fn gen_arm( } else { type_to_native_type(in_t[1]) }; - inputs.push_str(&format!(", ptr: *mut {}", ptr_type)); + inputs.push_str(&format!(", ptr: *mut {ptr_type}")); (inputs, String::new()) } else if is_vldx(&name) { let ptr_type = if name.contains("dup") { @@ -2162,10 +2153,7 @@ fn gen_arm( } else { type_to_sub_type(out_t) }; - ( - format!("ptr: *const {}", ptr_type), - format!(" -> {}", out_t), - ) + (format!("ptr: *const {ptr_type}"), format!(" -> {out_t}")) } else { (String::new(), String::new()) } @@ -2181,7 +2169,7 @@ fn gen_arm( )); }; let const_declare = if let Some(constn) = constn { - format!(r#""#, constn) + format!(r#""#) } else { String::new() }; @@ -2216,7 +2204,7 @@ fn gen_arm( String::new() }; let const_legacy = if constn.is_some() { - format!("\n#[rustc_legacy_const_generics({})]", para_num) + format!("\n#[rustc_legacy_const_generics({para_num})]") } else { String::new() }; @@ -2224,7 +2212,7 @@ fn gen_arm( let fn_output = if out_t == "void" { String::new() } else { - format!("-> {} ", out_t) + format!("-> {out_t} ") }; let fn_inputs = match para_num { 1 => format!("(a: {})", in_t[0]), @@ -2274,15 +2262,15 @@ fn gen_arm( cnt }; match para_num { - 1 => format!("{}(a, {})", current_fn, cnt), - 2 => format!("{}(a, b, {})", current_fn, cnt), + 1 => format!("{current_fn}(a, {cnt})"), + 2 => format!("{current_fn}(a, b, {cnt})"), _ => String::new(), } } } else if out_t != link_arm_t[3] { match para_num { - 1 => format!("transmute({}(a))", current_fn,), - 2 => format!("transmute({}(transmute(a), transmute(b)))", current_fn,), + 1 => format!("transmute({current_fn}(a))",), + 2 => format!("transmute({current_fn}(transmute(a), transmute(b)))",), _ => String::new(), } } else if matches!(fn_type, Fntype::Store) { @@ -2295,10 +2283,10 @@ fn gen_arm( ("", String::new()) }; match type_sub_len(in_t[1]) { - 1 => format!("{}(a{}, b{})", current_fn, cast, size), - 2 => format!("{}(a{}, b.0, b.1{})", current_fn, cast, size), - 3 => format!("{}(a{}, b.0, b.1, b.2{})", current_fn, cast, size), - 4 => format!("{}(a{}, b.0, b.1, b.2, b.3{})", current_fn, cast, size), + 1 => format!("{current_fn}(a{cast}, b{size})"), + 2 => format!("{current_fn}(a{cast}, b.0, b.1{size})"), + 3 => format!("{current_fn}(a{cast}, b.0, b.1, b.2{size})"), + 4 => format!("{current_fn}(a{cast}, b.0, b.1, b.2, b.3{size})"), _ => String::new(), } } else if link_arm.is_some() && is_vldx(&name) { @@ -2345,31 +2333,31 @@ fn gen_arm( cnt.push_str(&const_aarch64); } cnt.push_str(")"); - format!("{}(a, {})", current_fn, cnt) + format!("{current_fn}(a, {cnt})") } else { match para_num { - 1 => format!("{}(a, {})", current_fn, const_aarch64), - 2 => format!("{}(a, b, {})", current_fn, const_aarch64), + 1 => format!("{current_fn}(a, {const_aarch64})"), + 2 => format!("{current_fn}(a, b, {const_aarch64})"), _ => String::new(), } } } else if out_t != link_aarch64_t[3] { match para_num { - 1 => format!("transmute({}(a))", current_fn,), - 2 => format!("transmute({}(a, b))", current_fn,), + 1 => format!("transmute({current_fn}(a))",), + 2 => format!("transmute({current_fn}(a, b))",), _ => String::new(), } } else if matches!(fn_type, Fntype::Store) { let cast = if is_vstx(&name) { " as _" } else { "" }; match type_sub_len(in_t[1]) { - 1 => format!("{}(b, a{})", current_fn, cast), - 2 => format!("{}(b.0, b.1, a{})", current_fn, cast), - 3 => format!("{}(b.0, b.1, b.2, a{})", current_fn, cast), - 4 => format!("{}(b.0, b.1, b.2, b.3, a{})", current_fn, cast), + 1 => format!("{current_fn}(b, a{cast})"), + 2 => format!("{current_fn}(b.0, b.1, a{cast})"), + 3 => format!("{current_fn}(b.0, b.1, b.2, a{cast})"), + 4 => format!("{current_fn}(b.0, b.1, b.2, b.3, a{cast})"), _ => String::new(), } } else if link_aarch64.is_some() && is_vldx(&name) { - format!("{}(a as _)", current_fn) + format!("{current_fn}(a as _)") } else { String::new() }; @@ -2421,7 +2409,7 @@ fn gen_arm( } else { let call = { let stmts = match (multi_calls.len(), para_num, fixed.len()) { - (0, 1, 0) => format!(r#"{}{}(a)"#, ext_c, current_fn,), + (0, 1, 0) => format!(r#"{ext_c}{current_fn}(a)"#,), (0, 1, _) => { let fixed: Vec = fixed.iter().take(type_len(in_t[0])).cloned().collect(); @@ -2433,11 +2421,11 @@ fn gen_arm( current_fn, ) } - (0, 2, _) => format!(r#"{}{}(a, b)"#, ext_c, current_fn,), - (0, 3, _) => format!(r#"{}{}(a, b, c)"#, ext_c, current_fn,), - (_, 1, _) => format!(r#"{}{}"#, ext_c, multi_calls,), - (_, 2, _) => format!(r#"{}{}"#, ext_c, multi_calls,), - (_, 3, _) => format!(r#"{}{}"#, ext_c, multi_calls,), + (0, 2, _) => format!(r#"{ext_c}{current_fn}(a, b)"#,), + (0, 3, _) => format!(r#"{ext_c}{current_fn}(a, b, c)"#,), + (_, 1, _) => format!(r#"{ext_c}{multi_calls}"#,), + (_, 2, _) => format!(r#"{ext_c}{multi_calls}"#,), + (_, 3, _) => format!(r#"{ext_c}{multi_calls}"#,), (_, _, _) => String::new(), }; if stmts != String::new() { @@ -2452,8 +2440,8 @@ fn gen_arm( } }; let stable_aarch64 = match target { - Default | ArmV7 | Vfp4 | FPArmV8 | AES => String::from("\n#[cfg_attr(target_arch = \"aarch64\", stable(feature = \"neon_intrinsics\", since = \"1.59.0\"))]"), - RDM => String::from("\n#[cfg_attr(target_arch = \"aarch64\", stable(feature = \"rdm_intrinsics\", since = \"1.62.0\"))]"), + Default | ArmV7 | Vfp4 | FPArmV8 | AES => String::from("\n#[cfg_attr(not(target_arch = \"arm\"), stable(feature = \"neon_intrinsics\", since = \"1.59.0\"))]"), + RDM => String::from("\n#[cfg_attr(not(target_arch = \"arm\"), stable(feature = \"rdm_intrinsics\", since = \"1.62.0\"))]"), _ => String::new(), }; let function_doc = create_doc_string(current_comment, &name); @@ -2536,9 +2524,9 @@ fn expand_intrinsic(intr: &str, t: &str) -> String { "poly64x1_t" => "i64x1", "poly64x2_t" => "i64x2", */ - _ => panic!("unknown type for extension: {}", t), + _ => panic!("unknown type for extension: {t}"), }; - format!(r#""{}{}""#, intr, ext) + format!(r#""{intr}{ext}""#) } else if intr.ends_with(".s") { let ext = match t { "int8x8_t" => "s8", @@ -2571,9 +2559,9 @@ fn expand_intrinsic(intr: &str, t: &str) -> String { "poly64x1_t" => "i64x1", "poly64x2_t" => "i64x2", */ - _ => panic!("unknown type for extension: {}", t), + _ => panic!("unknown type for extension: {t}"), }; - format!(r#""{}{}""#, &intr[..intr.len() - 1], ext) + format!(r#""{}{ext}""#, &intr[..intr.len() - 1]) } else if intr.ends_with(".l") { let ext = match t { "int8x8_t" => "8", @@ -2604,9 +2592,9 @@ fn expand_intrinsic(intr: &str, t: &str) -> String { "float64x2_t" => "64", "poly64x1_t" => "64", "poly64x2_t" => "64", - _ => panic!("unknown type for extension: {}", t), + _ => panic!("unknown type for extension: {t}"), }; - format!(r#""{}{}""#, &intr[..intr.len() - 1], ext) + format!(r#""{}{ext}""#, &intr[..intr.len() - 1]) } else { intr.to_string() } @@ -2655,7 +2643,7 @@ fn get_call( "halflen" => type_len(in_t[1]) / 2, _ => 0, }; - let mut s = format!("{} [", const_declare); + let mut s = format!("{const_declare} ["); for i in 0..len { if i != 0 { s.push_str(", "); @@ -2693,9 +2681,9 @@ fn get_call( if i != 0 || j != 0 { s.push_str(", "); } - s.push_str(&format!("{} * {} as u32", base_len, &fn_format[2])); + s.push_str(&format!("{base_len} * {} as u32", &fn_format[2])); if j != 0 { - s.push_str(&format!(" + {}", j)); + s.push_str(&format!(" + {j}")); } } } @@ -2709,7 +2697,7 @@ fn get_call( "in_ttn" => type_to_native_type(in_t[1]), _ => String::new(), }; - return format!("{} as {}", &fn_format[1], t); + return format!("{} as {t}", &fn_format[1]); } if fn_name.starts_with("ins") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); @@ -2726,7 +2714,7 @@ fn get_call( "in0_len" => type_len(in_t[0]), _ => 0, }; - let mut s = format!("{} [", const_declare); + let mut s = format!("{const_declare} ["); for i in 0..len { if i != 0 { s.push_str(", "); @@ -2760,7 +2748,7 @@ fn get_call( fn_format[2], fn_format[2] ); } else { - return format!(r#"static_assert_imm{}!({});"#, len, fn_format[2]); + return format!(r#"static_assert_imm{len}!({});"#, fn_format[2]); } } if fn_name.starts_with("static_assert") { @@ -2781,13 +2769,13 @@ fn get_call( }; if lim1 == lim2 { return format!( - r#"static_assert!({} : i32 where {} == {});"#, - fn_format[1], fn_format[1], lim1 + r#"static_assert!({} : i32 where {} == {lim1});"#, + fn_format[1], fn_format[1] ); } else { return format!( - r#"static_assert!({} : i32 where {} >= {} && {} <= {});"#, - fn_format[1], fn_format[1], lim1, fn_format[1], lim2 + r#"static_assert!({} : i32 where {} >= {lim1} && {} <= {lim2});"#, + fn_format[1], fn_format[1], fn_format[1] ); } } @@ -2945,7 +2933,7 @@ fn get_call( if fn_name == "fixed" { let (re_name, re_type) = re.unwrap(); let fixed: Vec = fixed.iter().take(type_len(in_t[1])).cloned().collect(); - return format!(r#"let {}{};"#, re_name, values(&re_type, &fixed)); + return format!(r#"let {re_name}{};"#, values(&re_type, &fixed)); } if fn_name == "fixed-half-right" { let fixed: Vec = fixed.iter().take(type_len(in_t[1])).cloned().collect(); @@ -3083,9 +3071,9 @@ fn get_call( re_name, re_type, fn_name, param_str ) } else if fn_name.starts_with("*") { - format!(r#"{} = {};"#, fn_name, param_str) + format!(r#"{fn_name} = {param_str};"#) } else { - format!(r#"{}({})"#, fn_name, param_str) + format!(r#"{fn_name}({param_str})"#) }; return fn_str; } @@ -3337,7 +3325,7 @@ mod test { in_t = [spec[0], spec[1], spec[2]]; out_t = spec[3]; } else { - panic!("Bad spec: {}", line) + panic!("Bad spec: {line}") } if b.len() == 0 { if matches!(fn_type, Fntype::Store) { @@ -3430,8 +3418,8 @@ mod test { .arg(&arm_out_path) .arg(&aarch64_out_path) .status() { - eprintln!("Could not format `{}`: {}", arm_out_path.to_str().unwrap(), e); - eprintln!("Could not format `{}`: {}", aarch64_out_path.to_str().unwrap(), e); + eprintln!("Could not format `{}`: {e}", arm_out_path.to_str().unwrap()); + eprintln!("Could not format `{}`: {e}", aarch64_out_path.to_str().unwrap()); }; */ Ok(()) diff --git a/library/stdarch/crates/stdarch-test/Cargo.toml b/library/stdarch/crates/stdarch-test/Cargo.toml index 012b4e959..23bddeda6 100644 --- a/library/stdarch/crates/stdarch-test/Cargo.toml +++ b/library/stdarch/crates/stdarch-test/Cargo.toml @@ -2,7 +2,7 @@ name = "stdarch-test" version = "0.1.0" authors = ["Alex Crichton "] -edition = "2018" +edition = "2021" [dependencies] assert-instr-macro = { path = "../assert-instr-macro" } diff --git a/library/stdarch/crates/stdarch-test/src/disassembly.rs b/library/stdarch/crates/stdarch-test/src/disassembly.rs index 3ace6b20e..8e4d57d4e 100644 --- a/library/stdarch/crates/stdarch-test/src/disassembly.rs +++ b/library/stdarch/crates/stdarch-test/src/disassembly.rs @@ -81,7 +81,7 @@ pub(crate) fn disassemble_myself() -> HashSet { .args(add_args) .arg(&me) .output() - .unwrap_or_else(|_| panic!("failed to execute objdump. OBJDUMP={}", objdump)); + .unwrap_or_else(|_| panic!("failed to execute objdump. OBJDUMP={objdump}")); println!( "{}\n{}", output.status, @@ -103,7 +103,7 @@ fn parse(output: &str) -> HashSet { lines.clone().count() ); for line in output.lines().take(100) { - println!("{}", line); + println!("{line}"); } let mut functions = HashSet::new(); @@ -112,9 +112,9 @@ fn parse(output: &str) -> HashSet { if !header.ends_with(':') || !header.contains("stdarch_test_shim") { continue; } - eprintln!("header: {}", header); + eprintln!("header: {header}"); let symbol = normalize(header); - eprintln!("normalized symbol: {}", symbol); + eprintln!("normalized symbol: {symbol}"); let mut instructions = Vec::new(); while let Some(instruction) = lines.next() { if instruction.ends_with(':') { diff --git a/library/stdarch/crates/stdarch-test/src/lib.rs b/library/stdarch/crates/stdarch-test/src/lib.rs index eba17771c..e0cf46cb4 100644 --- a/library/stdarch/crates/stdarch-test/src/lib.rs +++ b/library/stdarch/crates/stdarch-test/src/lib.rs @@ -64,10 +64,10 @@ pub fn assert(shim_addr: usize, fnname: &str, expected: &str) { // Make sure that the shim is not removed black_box(shim_addr); - //eprintln!("shim name: {}", fnname); + //eprintln!("shim name: {fnname}"); let function = &DISASSEMBLY .get(&Function::new(fnname)) - .unwrap_or_else(|| panic!("function \"{}\" not found in the disassembly", fnname)); + .unwrap_or_else(|| panic!("function \"{fnname}\" not found in the disassembly")); //eprintln!(" function: {:?}", function); let mut instrs = &function.instrs[..]; @@ -165,9 +165,9 @@ pub fn assert(shim_addr: usize, fnname: &str, expected: &str) { // Help debug by printing out the found disassembly, and then panic as we // didn't find the instruction. - println!("disassembly for {}: ", fnname,); + println!("disassembly for {fnname}: ",); for (i, instr) in instrs.iter().enumerate() { - println!("\t{:2}: {}", i, instr); + println!("\t{i:2}: {instr}"); } if !found { @@ -194,7 +194,7 @@ pub fn assert_skip_test_ok(name: &str) { if env::var("STDARCH_TEST_EVERYTHING").is_err() { return; } - panic!("skipped test `{}` when it shouldn't be skipped", name); + panic!("skipped test `{name}` when it shouldn't be skipped"); } // See comment in `assert-instr-macro` crate for why this exists diff --git a/library/stdarch/crates/stdarch-verify/Cargo.toml b/library/stdarch/crates/stdarch-verify/Cargo.toml index 6362e3d57..56548fd08 100644 --- a/library/stdarch/crates/stdarch-verify/Cargo.toml +++ b/library/stdarch/crates/stdarch-verify/Cargo.toml @@ -2,7 +2,7 @@ name = "stdarch-verify" version = "0.1.0" authors = ["Alex Crichton "] -edition = "2018" +edition = "2021" [dependencies] proc-macro2 = "1.0" diff --git a/library/stdarch/crates/stdarch-verify/src/lib.rs b/library/stdarch/crates/stdarch-verify/src/lib.rs index 22108d26a..9b66137ba 100644 --- a/library/stdarch/crates/stdarch-verify/src/lib.rs +++ b/library/stdarch/crates/stdarch-verify/src/lib.rs @@ -85,20 +85,20 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { .iter() .map(|&(ref f, path)| { let name = &f.sig.ident; - // println!("{}", name); + // println!("{name}"); let mut arguments = Vec::new(); let mut const_arguments = Vec::new(); for input in f.sig.inputs.iter() { let ty = match *input { syn::FnArg::Typed(ref c) => &c.ty, - _ => panic!("invalid argument on {}", name), + _ => panic!("invalid argument on {name}"), }; arguments.push(to_type(ty)); } for generic in f.sig.generics.params.iter() { let ty = match *generic { syn::GenericParam::Const(ref c) => &c.ty, - _ => panic!("invalid generic argument on {}", name), + _ => panic!("invalid generic argument on {name}"), }; const_arguments.push(to_type(ty)); } @@ -144,12 +144,12 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { // strip leading underscore from fn name when building a test // _mm_foo -> mm_foo such that the test name is test_mm_foo. - let test_name_string = format!("{}", name); + let test_name_string = format!("{name}"); let mut test_name_id = test_name_string.as_str(); while test_name_id.starts_with('_') { test_name_id = &test_name_id[1..]; } - let has_test = tests.contains(&format!("test_{}", test_name_id)); + let has_test = tests.contains(&format!("test_{test_name_id}")); quote! { Function { @@ -167,7 +167,7 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { .collect::>(); let ret = quote! { #input: &[Function] = &[#(#functions),*]; }; - // println!("{}", ret); + // println!("{ret}"); ret.into() } @@ -336,7 +336,7 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "v4f32" => quote! { &v4f32 }, "v2f64" => quote! { &v2f64 }, - s => panic!("unsupported type: \"{}\"", s), + s => panic!("unsupported type: \"{s}\""), }, syn::Type::Ptr(syn::TypePtr { ref elem, diff --git a/library/stdarch/crates/stdarch-verify/tests/arm.rs b/library/stdarch/crates/stdarch-verify/tests/arm.rs index 6ce5ce05f..dd6720ef0 100644 --- a/library/stdarch/crates/stdarch-verify/tests/arm.rs +++ b/library/stdarch/crates/stdarch-verify/tests/arm.rs @@ -628,7 +628,7 @@ fn verify_all_signatures() { if let Err(e) = matches(rust, arm) { println!("failed to verify `{}`", rust.name); - println!(" * {}", e); + println!(" * {e}"); all_valid = false; } } @@ -801,7 +801,7 @@ fn parse_intrinsic(node: &Rc) -> Intrinsic { let instruction = match instruction { Some(s) => s.trim().to_lowercase(), - None => panic!("can't find instruction for `{}`", name), + None => panic!("can't find instruction for `{name}`"), }; Intrinsic { @@ -973,7 +973,7 @@ fn parse_ty_base(s: &str) -> &'static Type { "uint8x8x3_t" => &U8X8X3, "uint8x8x4_t" => &U8X8X4, - _ => panic!("failed to parse html type {:?}", s), + _ => panic!("failed to parse html type {s:?}"), } } diff --git a/library/stdarch/crates/stdarch-verify/tests/mips.rs b/library/stdarch/crates/stdarch-verify/tests/mips.rs index 1eb86dc29..365057b1d 100644 --- a/library/stdarch/crates/stdarch-verify/tests/mips.rs +++ b/library/stdarch/crates/stdarch-verify/tests/mips.rs @@ -125,7 +125,7 @@ impl<'a> From<&'a str> for MsaTy { "u64" => MsaTy::u64, "void" => MsaTy::Void, "void *" => MsaTy::MutVoidPtr, - v => panic!("unknown ty: \"{}\"", v), + v => panic!("unknown ty: \"{v}\""), } } } @@ -198,8 +198,8 @@ fn verify_all_signatures() { } use std::convert::TryFrom; - let intrinsic: MsaIntrinsic = TryFrom::try_from(line) - .unwrap_or_else(|_| panic!("failed to parse line: \"{}\"", line)); + let intrinsic: MsaIntrinsic = + TryFrom::try_from(line).unwrap_or_else(|_| panic!("failed to parse line: \"{line}\"")); assert!(!intrinsics.contains_key(&intrinsic.id)); intrinsics.insert(intrinsic.id.clone(), intrinsic); } @@ -253,7 +253,7 @@ fn verify_all_signatures() { if let Err(e) = matches(rust, mips) { println!("failed to verify `{}`", rust.name); - println!(" * {}", e); + println!(" * {e}"); all_valid = false; } } diff --git a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs index 89494bfd2..cd9bd18ea 100644 --- a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs +++ b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs @@ -367,7 +367,7 @@ fn verify_all_signatures() { } println!("failed to verify `{}`", rust.name); for error in errors { - println!(" * {}", error); + println!(" * {error}"); } all_valid = false; } @@ -403,18 +403,18 @@ fn verify_all_signatures() { if PRINT_MISSING_LISTS || PRINT_MISSING_LISTS_MARKDOWN { for (k, v) in missing { if PRINT_MISSING_LISTS_MARKDOWN { - println!("\n
{:?}

\n", k); + println!("\n

{k:?}

\n"); for intel in v { let url = format!( "https://software.intel.com/sites/landingpage\ /IntrinsicsGuide/#text={}&expand=5236", intel.name ); - println!(" * [ ] [`{}`]({})", intel.name, url); + println!(" * [ ] [`{}`]({url})", intel.name); } println!("

\n"); } else { - println!("\n{:?}\n", k); + println!("\n{k:?}\n"); for intel in v { println!("\t{}", intel.name); } @@ -471,6 +471,18 @@ fn matches(rust: &Function, intel: &Intrinsic) -> Result<(), String> { continue; } + // Some CPUs support VAES/GFNI/VPCLMULQDQ without AVX512, even though + // the Intel documentation states that those instructions require + // AVX512VL. + if *cpuid == "AVX512VL" + && intel + .cpuid + .iter() + .any(|x| matches!(&**x, "VAES" | "GFNI" | "VPCLMULQDQ")) + { + continue; + } + let cpuid = cpuid .chars() .flat_map(|c| c.to_lowercase()) diff --git a/library/stdarch/examples/Cargo.toml b/library/stdarch/examples/Cargo.toml index e2590ed9f..38f497fa6 100644 --- a/library/stdarch/examples/Cargo.toml +++ b/library/stdarch/examples/Cargo.toml @@ -7,7 +7,7 @@ authors = [ "Gonzalo Brito Gadeschi ", ] description = "Examples of the stdarch crate." -edition = "2018" +edition = "2021" default-run = "hex" [dependencies] diff --git a/library/stdarch/examples/connect5.rs b/library/stdarch/examples/connect5.rs index 1b3325785..09e7e48a7 100644 --- a/library/stdarch/examples/connect5.rs +++ b/library/stdarch/examples/connect5.rs @@ -1256,7 +1256,7 @@ fn main() { pos_disp(&test1); if pos_is_end(&test1) { - println!("Game over!!!!!! at Move {}", i); + println!("Game over!!!!!! at Move {i}"); count = i + 1; break; } diff --git a/library/stdarch/examples/hex.rs b/library/stdarch/examples/hex.rs index d982a71b9..a961793a0 100644 --- a/library/stdarch/examples/hex.rs +++ b/library/stdarch/examples/hex.rs @@ -40,7 +40,7 @@ fn main() { io::stdin().read_to_end(&mut input).unwrap(); let mut dst = vec![0; 2 * input.len()]; let s = hex_encode(&input, &mut dst).unwrap(); - println!("{}", s); + println!("{s}"); } fn hex_encode<'a>(src: &[u8], dst: &'a mut [u8]) -> Result<&'a str, usize> { diff --git a/library/stdarch/triagebot.toml b/library/stdarch/triagebot.toml index fa0824ac5..f946af7f6 100644 --- a/library/stdarch/triagebot.toml +++ b/library/stdarch/triagebot.toml @@ -1 +1,4 @@ [assign] + +[assign.owners] +"*" = ["@Amanieu"] -- cgit v1.2.3