summaryrefslogtreecommitdiffstats
path: root/comm/third_party/botan/src/lib/utils/simd/simd_32.h
diff options
context:
space:
mode:
Diffstat (limited to 'comm/third_party/botan/src/lib/utils/simd/simd_32.h')
-rw-r--r--comm/third_party/botan/src/lib/utils/simd/simd_32.h621
1 files changed, 621 insertions, 0 deletions
diff --git a/comm/third_party/botan/src/lib/utils/simd/simd_32.h b/comm/third_party/botan/src/lib/utils/simd/simd_32.h
new file mode 100644
index 0000000000..5cbc32a187
--- /dev/null
+++ b/comm/third_party/botan/src/lib/utils/simd/simd_32.h
@@ -0,0 +1,621 @@
+/*
+* Lightweight wrappers for SIMD operations
+* (C) 2009,2011,2016,2017,2019 Jack Lloyd
+*
+* Botan is released under the Simplified BSD License (see license.txt)
+*/
+
+#ifndef BOTAN_SIMD_32_H_
+#define BOTAN_SIMD_32_H_
+
+#include <botan/types.h>
+
+#if defined(BOTAN_TARGET_SUPPORTS_SSE2)
+ #include <emmintrin.h>
+ #define BOTAN_SIMD_USE_SSE2
+
+#elif defined(BOTAN_TARGET_SUPPORTS_ALTIVEC)
+ #include <botan/bswap.h>
+ #include <botan/loadstor.h>
+ #include <altivec.h>
+ #undef vector
+ #undef bool
+ #define BOTAN_SIMD_USE_ALTIVEC
+
+#elif defined(BOTAN_TARGET_SUPPORTS_NEON)
+ #include <botan/cpuid.h>
+ #include <arm_neon.h>
+ #define BOTAN_SIMD_USE_NEON
+
+#else
+ #error "No SIMD instruction set enabled"
+#endif
+
+#if defined(BOTAN_SIMD_USE_SSE2)
+ #define BOTAN_SIMD_ISA "sse2"
+ #define BOTAN_VPERM_ISA "ssse3"
+ #define BOTAN_CLMUL_ISA "pclmul"
+#elif defined(BOTAN_SIMD_USE_NEON)
+ #if defined(BOTAN_TARGET_ARCH_IS_ARM64)
+ #define BOTAN_SIMD_ISA "+simd"
+ #define BOTAN_CLMUL_ISA "+crypto"
+ #else
+ #define BOTAN_SIMD_ISA "fpu=neon"
+ #endif
+ #define BOTAN_VPERM_ISA BOTAN_SIMD_ISA
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ #define BOTAN_SIMD_ISA "altivec"
+ #define BOTAN_VPERM_ISA "altivec"
+ #define BOTAN_CLMUL_ISA "crypto"
+#endif
+
+namespace Botan {
+
+#if defined(BOTAN_SIMD_USE_SSE2)
+ typedef __m128i native_simd_type;
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ typedef __vector unsigned int native_simd_type;
+#elif defined(BOTAN_SIMD_USE_NEON)
+ typedef uint32x4_t native_simd_type;
+#endif
+
+/**
+* 4x32 bit SIMD register
+*
+* This class is not a general purpose SIMD type, and only offers
+* instructions needed for evaluation of specific crypto primitives.
+* For example it does not currently have equality operators of any
+* kind.
+*
+* Implemented for SSE2, VMX (Altivec), and NEON.
+*/
+class SIMD_4x32 final
+ {
+ public:
+
+ SIMD_4x32& operator=(const SIMD_4x32& other) = default;
+ SIMD_4x32(const SIMD_4x32& other) = default;
+
+ SIMD_4x32& operator=(SIMD_4x32&& other) = default;
+ SIMD_4x32(SIMD_4x32&& other) = default;
+
+ /**
+ * Zero initialize SIMD register with 4 32-bit elements
+ */
+ SIMD_4x32() // zero initialized
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ m_simd = _mm_setzero_si128();
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ m_simd = vec_splat_u32(0);
+#elif defined(BOTAN_SIMD_USE_NEON)
+ m_simd = vdupq_n_u32(0);
+#endif
+ }
+
+ /**
+ * Load SIMD register with 4 32-bit elements
+ */
+ explicit SIMD_4x32(const uint32_t B[4])
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ m_simd = _mm_loadu_si128(reinterpret_cast<const __m128i*>(B));
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ __vector unsigned int val = { B[0], B[1], B[2], B[3]};
+ m_simd = val;
+#elif defined(BOTAN_SIMD_USE_NEON)
+ m_simd = vld1q_u32(B);
+#endif
+ }
+
+ /**
+ * Load SIMD register with 4 32-bit elements
+ */
+ SIMD_4x32(uint32_t B0, uint32_t B1, uint32_t B2, uint32_t B3)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ m_simd = _mm_set_epi32(B3, B2, B1, B0);
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ __vector unsigned int val = {B0, B1, B2, B3};
+ m_simd = val;
+#elif defined(BOTAN_SIMD_USE_NEON)
+ // Better way to do this?
+ const uint32_t B[4] = { B0, B1, B2, B3 };
+ m_simd = vld1q_u32(B);
+#endif
+ }
+
+ /**
+ * Load SIMD register with one 32-bit element repeated
+ */
+ static SIMD_4x32 splat(uint32_t B)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_set1_epi32(B));
+#elif defined(BOTAN_SIMD_USE_NEON)
+ return SIMD_4x32(vdupq_n_u32(B));
+#else
+ return SIMD_4x32(B, B, B, B);
+#endif
+ }
+
+ /**
+ * Load SIMD register with one 8-bit element repeated
+ */
+ static SIMD_4x32 splat_u8(uint8_t B)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_set1_epi8(B));
+#elif defined(BOTAN_SIMD_USE_NEON)
+ return SIMD_4x32(vreinterpretq_u32_u8(vdupq_n_u8(B)));
+#else
+ const uint32_t B4 = make_uint32(B, B, B, B);
+ return SIMD_4x32(B4, B4, B4, B4);
+#endif
+ }
+
+ /**
+ * Load a SIMD register with little-endian convention
+ */
+ static SIMD_4x32 load_le(const void* in)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_loadu_si128(reinterpret_cast<const __m128i*>(in)));
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ uint32_t R[4];
+ Botan::load_le(R, static_cast<const uint8_t*>(in), 4);
+ return SIMD_4x32(R);
+#elif defined(BOTAN_SIMD_USE_NEON)
+ SIMD_4x32 l(vld1q_u32(static_cast<const uint32_t*>(in)));
+ return CPUID::is_big_endian() ? l.bswap() : l;
+#endif
+ }
+
+ /**
+ * Load a SIMD register with big-endian convention
+ */
+ static SIMD_4x32 load_be(const void* in)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return load_le(in).bswap();
+
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ uint32_t R[4];
+ Botan::load_be(R, static_cast<const uint8_t*>(in), 4);
+ return SIMD_4x32(R);
+
+#elif defined(BOTAN_SIMD_USE_NEON)
+ SIMD_4x32 l(vld1q_u32(static_cast<const uint32_t*>(in)));
+ return CPUID::is_little_endian() ? l.bswap() : l;
+#endif
+ }
+
+ void store_le(uint32_t out[4]) const
+ {
+ this->store_le(reinterpret_cast<uint8_t*>(out));
+ }
+
+ void store_le(uint64_t out[2]) const
+ {
+ this->store_le(reinterpret_cast<uint8_t*>(out));
+ }
+
+ /**
+ * Load a SIMD register with little-endian convention
+ */
+ void store_le(uint8_t out[]) const
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(out), raw());
+
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+
+ union {
+ __vector unsigned int V;
+ uint32_t R[4];
+ } vec;
+ vec.V = raw();
+ Botan::store_le(out, vec.R[0], vec.R[1], vec.R[2], vec.R[3]);
+
+#elif defined(BOTAN_SIMD_USE_NEON)
+ if(CPUID::is_little_endian())
+ {
+ vst1q_u8(out, vreinterpretq_u8_u32(m_simd));
+ }
+ else
+ {
+ vst1q_u8(out, vreinterpretq_u8_u32(bswap().m_simd));
+ }
+#endif
+ }
+
+ /**
+ * Load a SIMD register with big-endian convention
+ */
+ void store_be(uint8_t out[]) const
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+
+ bswap().store_le(out);
+
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+
+ union {
+ __vector unsigned int V;
+ uint32_t R[4];
+ } vec;
+ vec.V = m_simd;
+ Botan::store_be(out, vec.R[0], vec.R[1], vec.R[2], vec.R[3]);
+
+#elif defined(BOTAN_SIMD_USE_NEON)
+ if(CPUID::is_little_endian())
+ {
+ vst1q_u8(out, vreinterpretq_u8_u32(bswap().m_simd));
+ }
+ else
+ {
+ vst1q_u8(out, vreinterpretq_u8_u32(m_simd));
+ }
+#endif
+ }
+
+ /*
+ * This is used for SHA-2/SHACAL2
+ * Return rotr(ROT1) ^ rotr(ROT2) ^ rotr(ROT3)
+ */
+ template<size_t ROT1, size_t ROT2, size_t ROT3>
+ SIMD_4x32 rho() const
+ {
+ const SIMD_4x32 rot1 = this->rotr<ROT1>();
+ const SIMD_4x32 rot2 = this->rotr<ROT2>();
+ const SIMD_4x32 rot3 = this->rotr<ROT3>();
+ return (rot1 ^ rot2 ^ rot3);
+ }
+
+ /**
+ * Left rotation by a compile time constant
+ */
+ template<size_t ROT>
+ SIMD_4x32 rotl() const
+ {
+ static_assert(ROT > 0 && ROT < 32, "Invalid rotation constant");
+
+#if defined(BOTAN_SIMD_USE_SSE2)
+
+ return SIMD_4x32(_mm_or_si128(_mm_slli_epi32(m_simd, static_cast<int>(ROT)),
+ _mm_srli_epi32(m_simd, static_cast<int>(32-ROT))));
+
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+
+ const unsigned int r = static_cast<unsigned int>(ROT);
+ __vector unsigned int rot = {r, r, r, r};
+ return SIMD_4x32(vec_rl(m_simd, rot));
+
+#elif defined(BOTAN_SIMD_USE_NEON)
+
+#if defined(BOTAN_TARGET_ARCH_IS_ARM64)
+
+ BOTAN_IF_CONSTEXPR(ROT == 8)
+ {
+ const uint8_t maskb[16] = { 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 };
+ const uint8x16_t mask = vld1q_u8(maskb);
+ return SIMD_4x32(vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(m_simd), mask)));
+ }
+ else BOTAN_IF_CONSTEXPR(ROT == 16)
+ {
+ return SIMD_4x32(vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(m_simd))));
+ }
+#endif
+ return SIMD_4x32(vorrq_u32(vshlq_n_u32(m_simd, static_cast<int>(ROT)),
+ vshrq_n_u32(m_simd, static_cast<int>(32-ROT))));
+#endif
+ }
+
+ /**
+ * Right rotation by a compile time constant
+ */
+ template<size_t ROT>
+ SIMD_4x32 rotr() const
+ {
+ return this->rotl<32-ROT>();
+ }
+
+ /**
+ * Add elements of a SIMD vector
+ */
+ SIMD_4x32 operator+(const SIMD_4x32& other) const
+ {
+ SIMD_4x32 retval(*this);
+ retval += other;
+ return retval;
+ }
+
+ /**
+ * Subtract elements of a SIMD vector
+ */
+ SIMD_4x32 operator-(const SIMD_4x32& other) const
+ {
+ SIMD_4x32 retval(*this);
+ retval -= other;
+ return retval;
+ }
+
+ /**
+ * XOR elements of a SIMD vector
+ */
+ SIMD_4x32 operator^(const SIMD_4x32& other) const
+ {
+ SIMD_4x32 retval(*this);
+ retval ^= other;
+ return retval;
+ }
+
+ /**
+ * Binary OR elements of a SIMD vector
+ */
+ SIMD_4x32 operator|(const SIMD_4x32& other) const
+ {
+ SIMD_4x32 retval(*this);
+ retval |= other;
+ return retval;
+ }
+
+ /**
+ * Binary AND elements of a SIMD vector
+ */
+ SIMD_4x32 operator&(const SIMD_4x32& other) const
+ {
+ SIMD_4x32 retval(*this);
+ retval &= other;
+ return retval;
+ }
+
+ void operator+=(const SIMD_4x32& other)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ m_simd = _mm_add_epi32(m_simd, other.m_simd);
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ m_simd = vec_add(m_simd, other.m_simd);
+#elif defined(BOTAN_SIMD_USE_NEON)
+ m_simd = vaddq_u32(m_simd, other.m_simd);
+#endif
+ }
+
+ void operator-=(const SIMD_4x32& other)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ m_simd = _mm_sub_epi32(m_simd, other.m_simd);
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ m_simd = vec_sub(m_simd, other.m_simd);
+#elif defined(BOTAN_SIMD_USE_NEON)
+ m_simd = vsubq_u32(m_simd, other.m_simd);
+#endif
+ }
+
+ void operator^=(const SIMD_4x32& other)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ m_simd = _mm_xor_si128(m_simd, other.m_simd);
+
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ m_simd = vec_xor(m_simd, other.m_simd);
+#elif defined(BOTAN_SIMD_USE_NEON)
+ m_simd = veorq_u32(m_simd, other.m_simd);
+#endif
+ }
+
+ void operator|=(const SIMD_4x32& other)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ m_simd = _mm_or_si128(m_simd, other.m_simd);
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ m_simd = vec_or(m_simd, other.m_simd);
+#elif defined(BOTAN_SIMD_USE_NEON)
+ m_simd = vorrq_u32(m_simd, other.m_simd);
+#endif
+ }
+
+ void operator&=(const SIMD_4x32& other)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ m_simd = _mm_and_si128(m_simd, other.m_simd);
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ m_simd = vec_and(m_simd, other.m_simd);
+#elif defined(BOTAN_SIMD_USE_NEON)
+ m_simd = vandq_u32(m_simd, other.m_simd);
+#endif
+ }
+
+
+ template<int SHIFT> SIMD_4x32 shl() const
+ {
+ static_assert(SHIFT > 0 && SHIFT <= 31, "Invalid shift count");
+
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_slli_epi32(m_simd, SHIFT));
+
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ const unsigned int s = static_cast<unsigned int>(SHIFT);
+ const __vector unsigned int shifts = {s, s, s, s};
+ return SIMD_4x32(vec_sl(m_simd, shifts));
+#elif defined(BOTAN_SIMD_USE_NEON)
+ return SIMD_4x32(vshlq_n_u32(m_simd, SHIFT));
+#endif
+ }
+
+ template<int SHIFT> SIMD_4x32 shr() const
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_srli_epi32(m_simd, SHIFT));
+
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ const unsigned int s = static_cast<unsigned int>(SHIFT);
+ const __vector unsigned int shifts = {s, s, s, s};
+ return SIMD_4x32(vec_sr(m_simd, shifts));
+#elif defined(BOTAN_SIMD_USE_NEON)
+ return SIMD_4x32(vshrq_n_u32(m_simd, SHIFT));
+#endif
+ }
+
+ SIMD_4x32 operator~() const
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_xor_si128(m_simd, _mm_set1_epi32(0xFFFFFFFF)));
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ return SIMD_4x32(vec_nor(m_simd, m_simd));
+#elif defined(BOTAN_SIMD_USE_NEON)
+ return SIMD_4x32(vmvnq_u32(m_simd));
+#endif
+ }
+
+ // (~reg) & other
+ SIMD_4x32 andc(const SIMD_4x32& other) const
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_andnot_si128(m_simd, other.m_simd));
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ /*
+ AltiVec does arg1 & ~arg2 rather than SSE's ~arg1 & arg2
+ so swap the arguments
+ */
+ return SIMD_4x32(vec_andc(other.m_simd, m_simd));
+#elif defined(BOTAN_SIMD_USE_NEON)
+ // NEON is also a & ~b
+ return SIMD_4x32(vbicq_u32(other.m_simd, m_simd));
+#endif
+ }
+
+ /**
+ * Return copy *this with each word byte swapped
+ */
+ SIMD_4x32 bswap() const
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+
+ __m128i T = m_simd;
+ T = _mm_shufflehi_epi16(T, _MM_SHUFFLE(2, 3, 0, 1));
+ T = _mm_shufflelo_epi16(T, _MM_SHUFFLE(2, 3, 0, 1));
+ return SIMD_4x32(_mm_or_si128(_mm_srli_epi16(T, 8), _mm_slli_epi16(T, 8)));
+
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+
+ union {
+ __vector unsigned int V;
+ uint32_t R[4];
+ } vec;
+
+ vec.V = m_simd;
+ bswap_4(vec.R);
+ return SIMD_4x32(vec.R[0], vec.R[1], vec.R[2], vec.R[3]);
+
+#elif defined(BOTAN_SIMD_USE_NEON)
+ return SIMD_4x32(vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(m_simd))));
+#endif
+ }
+
+ template<size_t I>
+ SIMD_4x32 shift_elems_left() const
+ {
+ static_assert(I <= 3, "Invalid shift count");
+
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_slli_si128(raw(), 4*I));
+#elif defined(BOTAN_SIMD_USE_NEON)
+ return SIMD_4x32(vextq_u32(vdupq_n_u32(0), raw(), 4-I));
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ const __vector unsigned int zero = vec_splat_u32(0);
+
+ const __vector unsigned char shuf[3] = {
+ { 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
+ { 16, 17, 18, 19, 20, 21, 22, 23, 0, 1, 2, 3, 4, 5, 6, 7 },
+ { 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 0, 1, 2, 3 },
+ };
+
+ return SIMD_4x32(vec_perm(raw(), zero, shuf[I-1]));
+#endif
+ }
+
+ template<size_t I>
+ SIMD_4x32 shift_elems_right() const
+ {
+ static_assert(I <= 3, "Invalid shift count");
+
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_srli_si128(raw(), 4*I));
+#elif defined(BOTAN_SIMD_USE_NEON)
+ return SIMD_4x32(vextq_u32(raw(), vdupq_n_u32(0), I));
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ const __vector unsigned int zero = vec_splat_u32(0);
+
+ const __vector unsigned char shuf[3] = {
+ { 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 },
+ { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 },
+ { 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 },
+ };
+
+ return SIMD_4x32(vec_perm(raw(), zero, shuf[I-1]));
+#endif
+ }
+
+ /**
+ * 4x4 Transposition on SIMD registers
+ */
+ static void transpose(SIMD_4x32& B0, SIMD_4x32& B1,
+ SIMD_4x32& B2, SIMD_4x32& B3)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ const __m128i T0 = _mm_unpacklo_epi32(B0.m_simd, B1.m_simd);
+ const __m128i T1 = _mm_unpacklo_epi32(B2.m_simd, B3.m_simd);
+ const __m128i T2 = _mm_unpackhi_epi32(B0.m_simd, B1.m_simd);
+ const __m128i T3 = _mm_unpackhi_epi32(B2.m_simd, B3.m_simd);
+
+ B0.m_simd = _mm_unpacklo_epi64(T0, T1);
+ B1.m_simd = _mm_unpackhi_epi64(T0, T1);
+ B2.m_simd = _mm_unpacklo_epi64(T2, T3);
+ B3.m_simd = _mm_unpackhi_epi64(T2, T3);
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ const __vector unsigned int T0 = vec_mergeh(B0.m_simd, B2.m_simd);
+ const __vector unsigned int T1 = vec_mergeh(B1.m_simd, B3.m_simd);
+ const __vector unsigned int T2 = vec_mergel(B0.m_simd, B2.m_simd);
+ const __vector unsigned int T3 = vec_mergel(B1.m_simd, B3.m_simd);
+
+ B0.m_simd = vec_mergeh(T0, T1);
+ B1.m_simd = vec_mergel(T0, T1);
+ B2.m_simd = vec_mergeh(T2, T3);
+ B3.m_simd = vec_mergel(T2, T3);
+
+#elif defined(BOTAN_SIMD_USE_NEON) && defined(BOTAN_TARGET_ARCH_IS_ARM32)
+ const uint32x4x2_t T0 = vzipq_u32(B0.m_simd, B2.m_simd);
+ const uint32x4x2_t T1 = vzipq_u32(B1.m_simd, B3.m_simd);
+ const uint32x4x2_t O0 = vzipq_u32(T0.val[0], T1.val[0]);
+ const uint32x4x2_t O1 = vzipq_u32(T0.val[1], T1.val[1]);
+
+ B0.m_simd = O0.val[0];
+ B1.m_simd = O0.val[1];
+ B2.m_simd = O1.val[0];
+ B3.m_simd = O1.val[1];
+
+#elif defined(BOTAN_SIMD_USE_NEON) && defined(BOTAN_TARGET_ARCH_IS_ARM64)
+ const uint32x4_t T0 = vzip1q_u32(B0.m_simd, B2.m_simd);
+ const uint32x4_t T2 = vzip2q_u32(B0.m_simd, B2.m_simd);
+ const uint32x4_t T1 = vzip1q_u32(B1.m_simd, B3.m_simd);
+ const uint32x4_t T3 = vzip2q_u32(B1.m_simd, B3.m_simd);
+
+ B0.m_simd = vzip1q_u32(T0, T1);
+ B1.m_simd = vzip2q_u32(T0, T1);
+ B2.m_simd = vzip1q_u32(T2, T3);
+ B3.m_simd = vzip2q_u32(T2, T3);
+#endif
+ }
+
+ native_simd_type raw() const BOTAN_FUNC_ISA(BOTAN_SIMD_ISA) { return m_simd; }
+
+ explicit SIMD_4x32(native_simd_type x) : m_simd(x) {}
+ private:
+ native_simd_type m_simd;
+ };
+
+}
+
+#endif