summaryrefslogtreecommitdiffstats
path: root/third_party/simde/simde/arm/neon/and.h
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/simde/simde/arm/neon/and.h')
-rw-r--r--third_party/simde/simde/arm/neon/and.h552
1 files changed, 552 insertions, 0 deletions
diff --git a/third_party/simde/simde/arm/neon/and.h b/third_party/simde/simde/arm/neon/and.h
new file mode 100644
index 0000000000..381154228f
--- /dev/null
+++ b/third_party/simde/simde/arm/neon/and.h
@@ -0,0 +1,552 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Copyright:
+ * 2020 Evan Nemerson <evan@nemerson.com>
+ * 2020 Christopher Moore <moore@free.fr>
+ */
+
+#if !defined(SIMDE_ARM_NEON_AND_H)
+#define SIMDE_ARM_NEON_AND_H
+
+#include "types.h"
+
+HEDLEY_DIAGNOSTIC_PUSH
+SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
+SIMDE_BEGIN_DECLS_
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_int8x8_t
+simde_vand_s8(simde_int8x8_t a, simde_int8x8_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vand_s8(a, b);
+ #else
+ simde_int8x8_private
+ r_,
+ a_ = simde_int8x8_to_private(a),
+ b_ = simde_int8x8_to_private(b);
+
+ #if defined(SIMDE_X86_MMX_NATIVE)
+ r_.m64 = _mm_and_si64(a_.m64, b_.m64);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_int8x8_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vand_s8
+ #define vand_s8(a, b) simde_vand_s8((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_int16x4_t
+simde_vand_s16(simde_int16x4_t a, simde_int16x4_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vand_s16(a, b);
+ #else
+ simde_int16x4_private
+ r_,
+ a_ = simde_int16x4_to_private(a),
+ b_ = simde_int16x4_to_private(b);
+
+ #if defined(SIMDE_X86_MMX_NATIVE)
+ r_.m64 = _mm_and_si64(a_.m64, b_.m64);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_int16x4_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vand_s16
+ #define vand_s16(a, b) simde_vand_s16((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_int32x2_t
+simde_vand_s32(simde_int32x2_t a, simde_int32x2_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vand_s32(a, b);
+ #else
+ simde_int32x2_private
+ r_,
+ a_ = simde_int32x2_to_private(a),
+ b_ = simde_int32x2_to_private(b);
+
+ #if defined(SIMDE_X86_MMX_NATIVE)
+ r_.m64 = _mm_and_si64(a_.m64, b_.m64);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_int32x2_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vand_s32
+ #define vand_s32(a, b) simde_vand_s32((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_int64x1_t
+simde_vand_s64(simde_int64x1_t a, simde_int64x1_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vand_s64(a, b);
+ #else
+ simde_int64x1_private
+ r_,
+ a_ = simde_int64x1_to_private(a),
+ b_ = simde_int64x1_to_private(b);
+
+ #if defined(SIMDE_X86_MMX_NATIVE)
+ r_.m64 = _mm_and_si64(a_.m64, b_.m64);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_int64x1_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vand_s64
+ #define vand_s64(a, b) simde_vand_s64((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_uint8x8_t
+simde_vand_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vand_u8(a, b);
+ #else
+ simde_uint8x8_private
+ r_,
+ a_ = simde_uint8x8_to_private(a),
+ b_ = simde_uint8x8_to_private(b);
+
+ #if defined(SIMDE_X86_MMX_NATIVE)
+ r_.m64 = _mm_and_si64(a_.m64, b_.m64);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_uint8x8_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vand_u8
+ #define vand_u8(a, b) simde_vand_u8((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_uint16x4_t
+simde_vand_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vand_u16(a, b);
+ #else
+ simde_uint16x4_private
+ r_,
+ a_ = simde_uint16x4_to_private(a),
+ b_ = simde_uint16x4_to_private(b);
+
+ #if defined(SIMDE_X86_MMX_NATIVE)
+ r_.m64 = _mm_and_si64(a_.m64, b_.m64);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_uint16x4_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vand_u16
+ #define vand_u16(a, b) simde_vand_u16((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_uint32x2_t
+simde_vand_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vand_u32(a, b);
+ #else
+ simde_uint32x2_private
+ r_,
+ a_ = simde_uint32x2_to_private(a),
+ b_ = simde_uint32x2_to_private(b);
+
+ #if defined(SIMDE_X86_MMX_NATIVE)
+ r_.m64 = _mm_and_si64(a_.m64, b_.m64);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_uint32x2_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vand_u32
+ #define vand_u32(a, b) simde_vand_u32((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_uint64x1_t
+simde_vand_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vand_u64(a, b);
+ #else
+ simde_uint64x1_private
+ r_,
+ a_ = simde_uint64x1_to_private(a),
+ b_ = simde_uint64x1_to_private(b);
+
+ #if defined(SIMDE_X86_MMX_NATIVE)
+ r_.m64 = _mm_and_si64(a_.m64, b_.m64);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_uint64x1_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vand_u64
+ #define vand_u64(a, b) simde_vand_u64((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_int8x16_t
+simde_vandq_s8(simde_int8x16_t a, simde_int8x16_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vandq_s8(a, b);
+ #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
+ return vec_and(a, b);
+ #else
+ simde_int8x16_private
+ r_,
+ a_ = simde_int8x16_to_private(a),
+ b_ = simde_int8x16_to_private(b);
+
+ #if defined(SIMDE_X86_SSE2_NATIVE)
+ r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
+ #elif defined(SIMDE_WASM_SIMD128_NATIVE)
+ r_.v128 = wasm_v128_and(a_.v128, b_.v128);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_int8x16_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vandq_s8
+ #define vandq_s8(a, b) simde_vandq_s8((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_int16x8_t
+simde_vandq_s16(simde_int16x8_t a, simde_int16x8_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vandq_s16(a, b);
+ #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
+ return vec_and(a, b);
+ #else
+ simde_int16x8_private
+ r_,
+ a_ = simde_int16x8_to_private(a),
+ b_ = simde_int16x8_to_private(b);
+
+ #if defined(SIMDE_X86_SSE2_NATIVE)
+ r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
+ #elif defined(SIMDE_WASM_SIMD128_NATIVE)
+ r_.v128 = wasm_v128_and(a_.v128, b_.v128);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_int16x8_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vandq_s16
+ #define vandq_s16(a, b) simde_vandq_s16((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_int32x4_t
+simde_vandq_s32(simde_int32x4_t a, simde_int32x4_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vandq_s32(a, b);
+ #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
+ return vec_and(a, b);
+ #else
+ simde_int32x4_private
+ r_,
+ a_ = simde_int32x4_to_private(a),
+ b_ = simde_int32x4_to_private(b);
+
+ #if defined(SIMDE_X86_SSE2_NATIVE)
+ r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
+ #elif defined(SIMDE_WASM_SIMD128_NATIVE)
+ r_.v128 = wasm_v128_and(a_.v128, b_.v128);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_int32x4_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vandq_s32
+ #define vandq_s32(a, b) simde_vandq_s32((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_int64x2_t
+simde_vandq_s64(simde_int64x2_t a, simde_int64x2_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vandq_s64(a, b);
+ #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
+ return vec_and(a, b);
+ #else
+ simde_int64x2_private
+ r_,
+ a_ = simde_int64x2_to_private(a),
+ b_ = simde_int64x2_to_private(b);
+
+ #if defined(SIMDE_X86_SSE2_NATIVE)
+ r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
+ #elif defined(SIMDE_WASM_SIMD128_NATIVE)
+ r_.v128 = wasm_v128_and(a_.v128, b_.v128);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_int64x2_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vandq_s64
+ #define vandq_s64(a, b) simde_vandq_s64((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_uint8x16_t
+simde_vandq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vandq_u8(a, b);
+ #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
+ return vec_and(a, b);
+ #else
+ simde_uint8x16_private
+ r_,
+ a_ = simde_uint8x16_to_private(a),
+ b_ = simde_uint8x16_to_private(b);
+
+ #if defined(SIMDE_X86_SSE2_NATIVE)
+ r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
+ #elif defined(SIMDE_WASM_SIMD128_NATIVE)
+ r_.v128 = wasm_v128_and(a_.v128, b_.v128);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_uint8x16_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vandq_u8
+ #define vandq_u8(a, b) simde_vandq_u8((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_uint16x8_t
+simde_vandq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vandq_u16(a, b);
+ #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
+ return vec_and(a, b);
+ #else
+ simde_uint16x8_private
+ r_,
+ a_ = simde_uint16x8_to_private(a),
+ b_ = simde_uint16x8_to_private(b);
+
+ #if defined(SIMDE_X86_SSE2_NATIVE)
+ r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
+ #elif defined(SIMDE_WASM_SIMD128_NATIVE)
+ r_.v128 = wasm_v128_and(a_.v128, b_.v128);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_uint16x8_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vandq_u16
+ #define vandq_u16(a, b) simde_vandq_u16((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_uint32x4_t
+simde_vandq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vandq_u32(a, b);
+ #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
+ return vec_and(a, b);
+ #else
+ simde_uint32x4_private
+ r_,
+ a_ = simde_uint32x4_to_private(a),
+ b_ = simde_uint32x4_to_private(b);
+
+ #if defined(SIMDE_X86_SSE2_NATIVE)
+ r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
+ #elif defined(SIMDE_WASM_SIMD128_NATIVE)
+ r_.v128 = wasm_v128_and(a_.v128, b_.v128);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_uint32x4_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vandq_u32
+ #define vandq_u32(a, b) simde_vandq_u32((a), (b))
+#endif
+
+SIMDE_FUNCTION_ATTRIBUTES
+simde_uint64x2_t
+simde_vandq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
+ #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
+ return vandq_u64(a, b);
+ #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
+ return vec_and(a, b);
+ #else
+ simde_uint64x2_private
+ r_,
+ a_ = simde_uint64x2_to_private(a),
+ b_ = simde_uint64x2_to_private(b);
+
+ #if defined(SIMDE_X86_SSE2_NATIVE)
+ r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
+ #elif defined(SIMDE_WASM_SIMD128_NATIVE)
+ r_.v128 = wasm_v128_and(a_.v128, b_.v128);
+ #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
+ r_.values = a_.values & b_.values;
+ #else
+ SIMDE_VECTORIZE
+ for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
+ r_.values[i] = a_.values[i] & b_.values[i];
+ }
+ #endif
+
+ return simde_uint64x2_from_private(r_);
+ #endif
+}
+#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
+ #undef vandq_u64
+ #define vandq_u64(a, b) simde_vandq_u64((a), (b))
+#endif
+
+SIMDE_END_DECLS_
+HEDLEY_DIAGNOSTIC_POP
+
+#endif /* !defined(SIMDE_ARM_NEON_AND_H) */