summaryrefslogtreecommitdiffstats
path: root/third_party/aom/av1/common/arm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /third_party/aom/av1/common/arm
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/aom/av1/common/arm')
-rw-r--r--third_party/aom/av1/common/arm/av1_inv_txfm_neon.c3231
-rw-r--r--third_party/aom/av1/common/arm/av1_inv_txfm_neon.h154
-rw-r--r--third_party/aom/av1/common/arm/av1_txfm_neon.c28
-rw-r--r--third_party/aom/av1/common/arm/blend_a64_hmask_neon.c134
-rw-r--r--third_party/aom/av1/common/arm/blend_a64_vmask_neon.c141
-rw-r--r--third_party/aom/av1/common/arm/cfl_neon.c584
-rw-r--r--third_party/aom/av1/common/arm/convolve_neon.c1455
-rw-r--r--third_party/aom/av1/common/arm/convolve_neon.h228
-rw-r--r--third_party/aom/av1/common/arm/jnt_convolve_neon.c1740
-rw-r--r--third_party/aom/av1/common/arm/mem_neon.h494
-rw-r--r--third_party/aom/av1/common/arm/reconinter_neon.c86
-rw-r--r--third_party/aom/av1/common/arm/selfguided_neon.c1508
-rw-r--r--third_party/aom/av1/common/arm/transpose_neon.h537
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon.c714
-rw-r--r--third_party/aom/av1/common/arm/wiener_convolve_neon.c530
15 files changed, 11564 insertions, 0 deletions
diff --git a/third_party/aom/av1/common/arm/av1_inv_txfm_neon.c b/third_party/aom/av1/common/arm/av1_inv_txfm_neon.c
new file mode 100644
index 0000000000..bad411743d
--- /dev/null
+++ b/third_party/aom/av1/common/arm/av1_inv_txfm_neon.c
@@ -0,0 +1,3231 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/aom_dsp_rtcd.h"
+#include "config/av1_rtcd.h"
+
+#include "av1/common/av1_inv_txfm1d.h"
+#include "av1/common/av1_inv_txfm1d_cfg.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/common/enums.h"
+#include "av1/common/idct.h"
+#include "av1/common/arm/av1_inv_txfm_neon.h"
+#include "av1/common/arm/transpose_neon.h"
+
+// 1D itx types
+typedef enum ATTRIBUTE_PACKED {
+ IDCT_1D,
+ IADST_1D,
+ IFLIPADST_1D = IADST_1D,
+ IIDENTITY_1D,
+ ITX_TYPES_1D,
+} ITX_TYPE_1D;
+
+static const ITX_TYPE_1D vitx_1d_tab[TX_TYPES] = {
+ IDCT_1D, IADST_1D, IDCT_1D, IADST_1D,
+ IFLIPADST_1D, IDCT_1D, IFLIPADST_1D, IADST_1D,
+ IFLIPADST_1D, IIDENTITY_1D, IDCT_1D, IIDENTITY_1D,
+ IADST_1D, IIDENTITY_1D, IFLIPADST_1D, IIDENTITY_1D,
+};
+
+static const ITX_TYPE_1D hitx_1d_tab[TX_TYPES] = {
+ IDCT_1D, IDCT_1D, IADST_1D, IADST_1D,
+ IDCT_1D, IFLIPADST_1D, IFLIPADST_1D, IFLIPADST_1D,
+ IADST_1D, IIDENTITY_1D, IIDENTITY_1D, IDCT_1D,
+ IIDENTITY_1D, IADST_1D, IIDENTITY_1D, IFLIPADST_1D,
+};
+
+// 1D functions
+static const transform_1d_neon lowbd_txfm_all_1d_arr[TX_SIZES][ITX_TYPES_1D] = {
+ { av1_idct4_new, av1_iadst4_new, av1_iidentity4_c },
+ { av1_idct8_new, av1_iadst8_new, av1_iidentity8_c },
+ { av1_idct16_new, av1_iadst16_new, av1_iidentity16_c },
+ { av1_idct32_new, NULL, NULL },
+ { av1_idct64_new, NULL, NULL },
+};
+
+static INLINE void lowbd_add_flip_buffer_8xn_neon(int16x8_t *in,
+ uint8_t *output, int stride,
+ int flipud,
+ const int height) {
+ int j = flipud ? (height - 1) : 0;
+ const int step = flipud ? -1 : 1;
+ int16x8_t temp_output;
+ for (int i = 0; i < height; ++i, j += step) {
+ temp_output = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(output)));
+ temp_output = vaddq_s16(temp_output, in[j]);
+ vst1_u8(output, vqmovun_s16(temp_output));
+ output += stride;
+ }
+}
+
+static INLINE uint8x16_t lowbd_get_recon_16x16_neon(const uint8x16_t pred,
+ int16x8_t res0,
+ int16x8_t res1) {
+ int16x8_t temp_output[2];
+ uint8x16_t temp_output_8q;
+ temp_output[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pred)));
+ temp_output[0] = vaddq_s16(temp_output[0], res0);
+ temp_output[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pred)));
+ temp_output[1] = vaddq_s16(temp_output[1], res1);
+ temp_output_8q =
+ vcombine_u8(vqmovun_s16(temp_output[0]), vqmovun_s16(temp_output[1]));
+ return temp_output_8q;
+}
+
+static INLINE void lowbd_add_flip_buffer_16xn_neon(int16x8_t *in,
+ uint8_t *output, int stride,
+ int flipud, int height) {
+ uint8x16_t temp_output_8q;
+ int j = flipud ? (height - 1) : 0;
+ const int step = flipud ? -1 : 1;
+ for (int i = 0; i < height; ++i, j += step) {
+ temp_output_8q = vld1q_u8(output + i * stride);
+ temp_output_8q =
+ lowbd_get_recon_16x16_neon(temp_output_8q, in[j], in[j + height]);
+ vst1q_u8((output + i * stride), temp_output_8q);
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_memset_neon(int16x8_t *a, int size,
+ int value) {
+ for (int i = 0; i < size; i++) {
+ a[i] = vdupq_n_s16((int16_t)value);
+ }
+}
+
+static INLINE void btf_16_lane_0_1_neon(const int16x8_t in0,
+ const int16x8_t in1, const int16x4_t c,
+ int16x8_t *t0, int16x8_t *t1) {
+ int32x4_t s0[2], s1[2];
+ int16x4_t v0[2], v1[2];
+
+ s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 0);
+ s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 0);
+ s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 1);
+ s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 1);
+
+ s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 1);
+ s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 1);
+ s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 0);
+ s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 0);
+
+ v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT);
+ v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT);
+ v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT);
+ v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT);
+
+ *t0 = vcombine_s16(v0[0], v0[1]);
+ *t1 = vcombine_s16(v1[0], v1[1]);
+}
+
+static INLINE void btf_16_lane_1_0_neon(const int16x8_t in0,
+ const int16x8_t in1, const int16x4_t c,
+ int16x8_t *t0, int16x8_t *t1) {
+ int32x4_t s0[2], s1[2];
+ int16x4_t v0[2], v1[2];
+
+ s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 1);
+ s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 1);
+ s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 0);
+ s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 0);
+
+ s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 0);
+ s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 0);
+ s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 1);
+ s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 1);
+
+ v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT);
+ v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT);
+ v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT);
+ v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT);
+
+ *t0 = vcombine_s16(v0[0], v0[1]);
+ *t1 = vcombine_s16(v1[0], v1[1]);
+}
+
+static INLINE void btf_16_lane_2_3_neon(const int16x8_t in0,
+ const int16x8_t in1, const int16x4_t c,
+ int16x8_t *t0, int16x8_t *t1) {
+ int32x4_t s0[2], s1[2];
+ int16x4_t v0[2], v1[2];
+
+ s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 2);
+ s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 2);
+ s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 3);
+ s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 3);
+
+ s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 3);
+ s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 3);
+ s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 2);
+ s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 2);
+
+ v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT);
+ v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT);
+ v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT);
+ v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT);
+
+ *t0 = vcombine_s16(v0[0], v0[1]);
+ *t1 = vcombine_s16(v1[0], v1[1]);
+}
+
+static INLINE void btf_16_neon(const int16x8_t in0, int16_t coef1,
+ int16_t coef2, int16x8_t *t0, int16x8_t *t1) {
+ int32x4_t s0_l, s0_h, s1_l, s1_h;
+ int16x4_t v0[2], v1[2];
+
+ s0_l = vmull_n_s16(vget_low_s16(in0), coef1);
+ s0_h = vmull_n_s16(vget_high_s16(in0), coef1);
+ s1_l = vmull_n_s16(vget_low_s16(in0), coef2);
+ s1_h = vmull_n_s16(vget_high_s16(in0), coef2);
+
+ v0[0] = vrshrn_n_s32(s0_l, INV_COS_BIT);
+ v0[1] = vrshrn_n_s32(s0_h, INV_COS_BIT);
+ v1[0] = vrshrn_n_s32(s1_l, INV_COS_BIT);
+ v1[1] = vrshrn_n_s32(s1_h, INV_COS_BIT);
+
+ *t0 = vcombine_s16(v0[0], v0[1]);
+ *t1 = vcombine_s16(v1[0], v1[1]);
+}
+
+static INLINE void btf_16_lane_3_2_neon(const int16x8_t in0,
+ const int16x8_t in1, const int16x4_t c,
+ int16x8_t *t0, int16x8_t *t1) {
+ int32x4_t s0[2], s1[2];
+ int16x4_t v0[2], v1[2];
+
+ s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 3);
+ s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 3);
+ s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 2);
+ s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 2);
+
+ s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 2);
+ s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 2);
+ s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 3);
+ s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 3);
+
+ v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT);
+ v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT);
+ v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT);
+ v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT);
+
+ *t0 = vcombine_s16(v0[0], v0[1]);
+ *t1 = vcombine_s16(v1[0], v1[1]);
+}
+
+static INLINE void btf_16_half_neon(int16x8_t *const x, const int16x4_t c) {
+ int32x4_t t0[2], t1[2];
+ int16x4_t v0[2], v1[2];
+
+ // Don't add/sub before multiply, which will overflow in iadst8.
+ const int32x4_t x0_lo = vmull_lane_s16(vget_low_s16(x[0]), c, 0);
+ const int32x4_t x0_hi = vmull_lane_s16(vget_high_s16(x[0]), c, 0);
+ const int32x4_t x1_lo = vmull_lane_s16(vget_low_s16(x[1]), c, 0);
+ const int32x4_t x1_hi = vmull_lane_s16(vget_high_s16(x[1]), c, 0);
+
+ t0[0] = vaddq_s32(x0_lo, x1_lo);
+ t0[1] = vaddq_s32(x0_hi, x1_hi);
+ t1[0] = vsubq_s32(x0_lo, x1_lo);
+ t1[1] = vsubq_s32(x0_hi, x1_hi);
+
+ v0[0] = vrshrn_n_s32(t0[0], INV_COS_BIT);
+ v0[1] = vrshrn_n_s32(t0[1], INV_COS_BIT);
+ v1[0] = vrshrn_n_s32(t1[0], INV_COS_BIT);
+ v1[1] = vrshrn_n_s32(t1[1], INV_COS_BIT);
+
+ x[0] = vcombine_s16(v0[0], v0[1]);
+ x[1] = vcombine_s16(v1[0], v1[1]);
+}
+
+static INLINE int16x4_t create_s16x4_neon(int16_t *const c0, int16_t *const c1,
+ int16_t *const c2,
+ int16_t *const c3) {
+ int16x4_t val = vdup_n_s16((int16_t)0);
+ val = vld1_lane_s16(c0, val, 0);
+ val = vld1_lane_s16(c1, val, 1);
+ val = vld1_lane_s16(c2, val, 2);
+ val = vld1_lane_s16(c3, val, 3);
+ return val;
+}
+
+static INLINE void iadst8_new_neon(int16x8_t *const in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+
+ const int16x4_t c0 =
+ create_s16x4_neon((int16_t *)(cospi + 4), (int16_t *)(cospi + 60),
+ (int16_t *)(cospi + 20), (int16_t *)(cospi + 44));
+ const int16x4_t c1 =
+ create_s16x4_neon((int16_t *)(cospi + 36), (int16_t *)(cospi + 28),
+ (int16_t *)(cospi + 52), (int16_t *)(cospi + 12));
+ const int16x4_t c2 =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ int16x8_t x[8];
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+ // Stage 1
+ x[0] = in[7];
+ x[1] = in[0];
+ x[2] = in[5];
+ x[3] = in[2];
+ x[4] = in[3];
+ x[5] = in[4];
+ x[6] = in[1];
+ x[7] = in[6];
+
+ // Stage 2
+ btf_16_lane_0_1_neon(x[0], x[1], c0, &s0, &s1);
+ btf_16_lane_2_3_neon(x[2], x[3], c0, &s2, &s3);
+ btf_16_lane_0_1_neon(x[4], x[5], c1, &s4, &s5);
+ btf_16_lane_2_3_neon(x[6], x[7], c1, &s6, &s7);
+
+ // Stage 3
+ x[0] = vqaddq_s16(s0, s4);
+ x[1] = vqaddq_s16(s1, s5);
+ x[2] = vqaddq_s16(s2, s6);
+ x[3] = vqaddq_s16(s3, s7);
+ x[4] = vqsubq_s16(s0, s4);
+ x[5] = vqsubq_s16(s1, s5);
+ x[6] = vqsubq_s16(s2, s6);
+ x[7] = vqsubq_s16(s3, s7);
+
+ // Stage 4
+ s0 = x[0];
+ s1 = x[1];
+ s2 = x[2];
+ s3 = x[3];
+ btf_16_lane_2_3_neon(x[4], x[5], c2, &s4, &s5);
+ btf_16_lane_3_2_neon(x[7], x[6], c2, &s7, &s6);
+
+ // Stage 5
+ x[0] = vqaddq_s16(s0, s2);
+ x[1] = vqaddq_s16(s1, s3);
+ x[2] = vqsubq_s16(s0, s2);
+ x[3] = vqsubq_s16(s1, s3);
+ x[4] = vqaddq_s16(s4, s6);
+ x[5] = vqaddq_s16(s5, s7);
+ x[6] = vqsubq_s16(s4, s6);
+ x[7] = vqsubq_s16(s5, s7);
+
+ // stage 6
+ btf_16_half_neon(x + 2, c2);
+ btf_16_half_neon(x + 6, c2);
+
+ // Stage 7
+ out[0] = x[0];
+ out[1] = vnegq_s16(x[4]);
+ out[2] = x[6];
+ out[3] = vnegq_s16(x[2]);
+ out[4] = x[3];
+ out[5] = vnegq_s16(x[7]);
+ out[6] = x[5];
+ out[7] = vnegq_s16(x[1]);
+}
+
+static INLINE void iadst8_low1_new_neon(int16x8_t *const in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const int16x4_t c2 =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ int16x8_t x[8];
+ int16x8_t s0, s1, s4, s5;
+
+ // Stage 1
+ x[1] = in[0];
+
+ // Stage 2
+
+ btf_16_neon(x[1], cospi[60], -cospi[4], &s0, &s1);
+
+ // Stage 3
+ x[0] = s0;
+ x[1] = s1;
+ x[4] = s0;
+ x[5] = s1;
+
+ // Stage 4
+ s0 = x[0];
+ s1 = x[1];
+ btf_16_lane_2_3_neon(x[4], x[5], c2, &s4, &s5);
+
+ // Stage 5
+ x[0] = s0;
+ x[1] = s1;
+ x[2] = s0;
+ x[3] = s1;
+ x[4] = s4;
+ x[5] = s5;
+ x[6] = s4;
+ x[7] = s5;
+
+ // stage 6
+ btf_16_half_neon(x + 2, c2);
+ btf_16_half_neon(x + 6, c2);
+
+ // Stage 7
+ out[0] = x[0];
+ out[1] = vnegq_s16(x[4]);
+ out[2] = x[6];
+ out[3] = vnegq_s16(x[2]);
+ out[4] = x[3];
+ out[5] = vnegq_s16(x[7]);
+ out[6] = x[5];
+ out[7] = vnegq_s16(x[1]);
+}
+
+static INLINE void idct8_new_neon(int16x8_t *in, int16x8_t *out, int8_t cos_bit,
+ int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ int16x8_t step1[8], step2[8];
+ const int16x4_t c0 =
+ create_s16x4_neon((int16_t *)(cospi + 8), (int16_t *)(cospi + 56),
+ (int16_t *)(cospi + 40), (int16_t *)(cospi + 24));
+ const int16x4_t c2 =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ // stage 2
+ btf_16_lane_0_1_neon(in[1], in[7], c0, &step1[7], &step1[4]);
+ btf_16_lane_2_3_neon(in[5], in[3], c0, &step1[6], &step1[5]);
+
+ // stage 3
+ btf_16_lane_0_1_neon(in[0], in[4], c2, &step2[0], &step2[1]);
+ btf_16_lane_2_3_neon(in[2], in[6], c2, &step2[3], &step2[2]);
+ step2[4] = vqaddq_s16(step1[4], step1[5]);
+ step2[5] = vqsubq_s16(step1[4], step1[5]);
+ step2[6] = vqsubq_s16(step1[7], step1[6]);
+ step2[7] = vqaddq_s16(step1[7], step1[6]);
+
+ // stage 4
+ step1[0] = vqaddq_s16(step2[0], step2[3]);
+ step1[1] = vqaddq_s16(step2[1], step2[2]);
+ step1[2] = vqsubq_s16(step2[1], step2[2]);
+ step1[3] = vqsubq_s16(step2[0], step2[3]);
+ btf_16_lane_0_1_neon(step2[6], step2[5], c2, &step1[6], &step1[5]);
+
+ // stage 5
+ out[0] = vqaddq_s16(step1[0], step2[7]);
+ out[1] = vqaddq_s16(step1[1], step1[6]);
+ out[2] = vqaddq_s16(step1[2], step1[5]);
+ out[3] = vqaddq_s16(step1[3], step2[4]);
+ out[4] = vqsubq_s16(step1[3], step2[4]);
+ out[5] = vqsubq_s16(step1[2], step1[5]);
+ out[6] = vqsubq_s16(step1[1], step1[6]);
+ out[7] = vqsubq_s16(step1[0], step2[7]);
+}
+
+static INLINE void idct8_low1_new_neon(int16x8_t *in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ int16x8_t step1;
+ int32x4_t t32[2];
+
+ // stage 1
+ // stage 2
+ // stage 3
+ t32[0] = vmull_n_s16(vget_low_s16(in[0]), (int16_t)cospi[32]);
+ t32[1] = vmull_n_s16(vget_high_s16(in[0]), (int16_t)cospi[32]);
+
+ step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT),
+ vrshrn_n_s32(t32[1], INV_COS_BIT));
+
+ // stage 4
+ // stage 5
+ out[0] = step1;
+ out[1] = step1;
+ out[2] = step1;
+ out[3] = step1;
+ out[4] = step1;
+ out[5] = step1;
+ out[6] = step1;
+ out[7] = step1;
+}
+
+void av1_round_shift_array_16_neon(int16x8_t *arr, int size, int bit) {
+ assert(!(size % 4));
+ if (!bit) return;
+ const int16x8_t dup_bits_n_16x8 = vdupq_n_s16((int16_t)(-bit));
+ for (int i = 0; i < size; i++) {
+ arr[i] = vrshlq_s16(arr[i], dup_bits_n_16x8);
+ }
+}
+
+static INLINE void flip_buf_ud_neon(int16x8_t *input, int size) {
+ int16x8_t temp[8];
+ for (int i = 0; i < size; ++i) {
+ temp[i] = input[size - 1 - i];
+ }
+ for (int i = 0; i < size; ++i) {
+ input[i] = temp[i];
+ }
+}
+
+static INLINE void load_buffer_32bit_to_16bit_neon(const int32_t *input,
+ int16x8_t *const a,
+ int out_size) {
+ for (int i = 0; i < 8; ++i) {
+ a[i] = vcombine_s16(vmovn_s32(vld1q_s32(input)),
+ vmovn_s32(vld1q_s32(input + 4)));
+ input += out_size;
+ }
+}
+
+static INLINE void identity8_new_neon(int16x8_t *input, int16x8_t *output,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ (void)cos_bit;
+
+ output[0] = vmulq_n_s16(input[0], (int16_t)2);
+ output[1] = vmulq_n_s16(input[1], (int16_t)2);
+ output[2] = vmulq_n_s16(input[2], (int16_t)2);
+ output[3] = vmulq_n_s16(input[3], (int16_t)2);
+ output[4] = vmulq_n_s16(input[4], (int16_t)2);
+ output[5] = vmulq_n_s16(input[5], (int16_t)2);
+ output[6] = vmulq_n_s16(input[6], (int16_t)2);
+ output[7] = vmulq_n_s16(input[7], (int16_t)2);
+}
+
+static INLINE void round_shift_for_rect(int16x8_t *input, int16x8_t *output,
+ int size) {
+ int32x4_t out_low, out_high;
+ int16x4_t low, high;
+
+ for (int z = 0; z < size; ++z) {
+ out_low = vmull_n_s16(vget_low_s16(input[z]), (int16_t)NewInvSqrt2);
+ out_high = vmull_n_s16(vget_high_s16(input[z]), (int16_t)NewInvSqrt2);
+
+ low = vqrshrn_n_s32(out_low, (int32_t)NewSqrt2Bits);
+ high = vqrshrn_n_s32(out_high, (int32_t)NewSqrt2Bits);
+
+ output[z] = vcombine_s16(low, high);
+ }
+}
+
+static INLINE void identity16_new_neon(int16x8_t *input, int16x8_t *output,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ (void)cos_bit;
+
+ int32x4_t out_low, out_high;
+ int16x4_t low, high;
+ int16_t scale = (int16_t)(2 * NewSqrt2);
+
+ for (int z = 0; z < 16; ++z) {
+ out_low = vmull_n_s16(vget_low_s16(input[z]), scale);
+ out_high = vmull_n_s16(vget_high_s16(input[z]), scale);
+
+ low = vqrshrn_n_s32(out_low, (int32_t)NewSqrt2Bits);
+ high = vqrshrn_n_s32(out_high, (int32_t)NewSqrt2Bits);
+
+ output[z] = vcombine_s16(low, high);
+ }
+}
+
+static INLINE void identity32_new_neon(int16x8_t *input, int16x8_t *output,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ (void)cos_bit;
+
+ for (int z = 0; z < 32; ++z) {
+ output[z] = vmulq_n_s16(input[z], (int16_t)4);
+ }
+}
+
+static INLINE void idct16_low1_new_neon(int16x8_t *in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ int16x8_t step1;
+ int32x4_t t32[2];
+
+ // stage 4
+
+ t32[0] = vmull_n_s16(vget_low_s16(in[0]), cospi[32]);
+ t32[1] = vmull_n_s16(vget_high_s16(in[0]), cospi[32]);
+ step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT),
+ vrshrn_n_s32(t32[1], INV_COS_BIT));
+
+ // stage 6
+ // stage 7
+ out[0] = step1;
+ out[1] = step1;
+ out[2] = step1;
+ out[3] = step1;
+ out[4] = step1;
+ out[5] = step1;
+ out[6] = step1;
+ out[7] = step1;
+ out[8] = step1;
+ out[9] = step1;
+ out[10] = step1;
+ out[11] = step1;
+ out[12] = step1;
+ out[13] = step1;
+ out[14] = step1;
+ out[15] = step1;
+}
+
+static INLINE void idct16_new_neon(int16x8_t *in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ int16x8_t step1[16], step2[16];
+
+ const int16x4_t c0 =
+ create_s16x4_neon((int16_t *)(cospi + 4), (int16_t *)(cospi + 60),
+ (int16_t *)(cospi + 36), (int16_t *)(cospi + 28));
+ const int16x4_t c1 =
+ create_s16x4_neon((int16_t *)(cospi + 20), (int16_t *)(cospi + 44),
+ (int16_t *)(cospi + 52), (int16_t *)(cospi + 12));
+ const int16x4_t c2 =
+ create_s16x4_neon((int16_t *)(cospi + 8), (int16_t *)(cospi + 56),
+ (int16_t *)(cospi + 40), (int16_t *)(cospi + 24));
+ const int16x4_t c3 =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ // stage 2
+
+ btf_16_lane_0_1_neon(in[1], in[15], c0, &step2[15], &step2[8]);
+ btf_16_lane_2_3_neon(in[9], in[7], c0, &step2[14], &step2[9]);
+ btf_16_lane_0_1_neon(in[5], in[11], c1, &step2[13], &step2[10]);
+ btf_16_lane_2_3_neon(in[13], in[3], c1, &step2[12], &step2[11]);
+
+ step2[0] = in[0];
+ step2[1] = in[8];
+ step2[2] = in[4];
+ step2[3] = in[12];
+ step2[4] = in[2];
+ step2[5] = in[10];
+ step2[6] = in[6];
+ step2[7] = in[14];
+
+ // stage 3
+
+ btf_16_lane_0_1_neon(step2[4], step2[7], c2, &step1[7], &step1[4]);
+ btf_16_lane_2_3_neon(step2[5], step2[6], c2, &step1[6], &step1[5]);
+
+ step1[0] = step2[0];
+ step1[1] = step2[1];
+ step1[2] = step2[2];
+ step1[3] = step2[3];
+ step1[8] = vqaddq_s16(step2[8], step2[9]);
+ step1[9] = vqsubq_s16(step2[8], step2[9]);
+ step1[10] = vqsubq_s16(step2[11], step2[10]);
+ step1[11] = vqaddq_s16(step2[11], step2[10]);
+ step1[12] = vqaddq_s16(step2[12], step2[13]);
+ step1[13] = vqsubq_s16(step2[12], step2[13]);
+ step1[14] = vqsubq_s16(step2[15], step2[14]);
+ step1[15] = vqaddq_s16(step2[15], step2[14]);
+
+ // stage 4
+
+ btf_16_lane_0_1_neon(step1[0], step1[1], c3, &step2[0], &step2[1]);
+ btf_16_lane_2_3_neon(step1[2], step1[3], c3, &step2[3], &step2[2]);
+ btf_16_lane_2_3_neon(step1[14], step1[9], c3, &step2[14], &step2[9]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[10]), vnegq_s16(step1[13]), c3,
+ &step2[10], &step2[13]);
+
+ step2[4] = vqaddq_s16(step1[4], step1[5]);
+ step2[5] = vqsubq_s16(step1[4], step1[5]);
+ step2[6] = vqsubq_s16(step1[7], step1[6]);
+ step2[7] = vqaddq_s16(step1[7], step1[6]);
+ step2[8] = step1[8];
+ step2[11] = step1[11];
+ step2[12] = step1[12];
+ step2[15] = step1[15];
+
+ // stage 5
+
+ btf_16_lane_0_1_neon(step2[6], step2[5], c3, &step1[6], &step1[5]);
+
+ step1[0] = vqaddq_s16(step2[0], step2[3]);
+ step1[1] = vqaddq_s16(step2[1], step2[2]);
+ step1[2] = vqsubq_s16(step2[1], step2[2]);
+ step1[3] = vqsubq_s16(step2[0], step2[3]);
+ step1[4] = step2[4];
+ step1[7] = step2[7];
+ step1[8] = vqaddq_s16(step2[8], step2[11]);
+ step1[9] = vqaddq_s16(step2[9], step2[10]);
+ step1[10] = vqsubq_s16(step2[9], step2[10]);
+ step1[11] = vqsubq_s16(step2[8], step2[11]);
+ step1[12] = vqsubq_s16(step2[15], step2[12]);
+ step1[13] = vqsubq_s16(step2[14], step2[13]);
+ step1[14] = vqaddq_s16(step2[14], step2[13]);
+ step1[15] = vqaddq_s16(step2[15], step2[12]);
+
+ // stage 6
+
+ btf_16_lane_0_1_neon(step1[13], step1[10], c3, &step2[13], &step2[10]);
+ btf_16_lane_0_1_neon(step1[12], step1[11], c3, &step2[12], &step2[11]);
+
+ step2[0] = vqaddq_s16(step1[0], step1[7]);
+ step2[1] = vqaddq_s16(step1[1], step1[6]);
+ step2[2] = vqaddq_s16(step1[2], step1[5]);
+ step2[3] = vqaddq_s16(step1[3], step1[4]);
+ step2[4] = vqsubq_s16(step1[3], step1[4]);
+ step2[5] = vqsubq_s16(step1[2], step1[5]);
+ step2[6] = vqsubq_s16(step1[1], step1[6]);
+ step2[7] = vqsubq_s16(step1[0], step1[7]);
+ step2[8] = step1[8];
+ step2[9] = step1[9];
+ step2[14] = step1[14];
+ step2[15] = step1[15];
+
+ // stage 7
+ out[0] = vqaddq_s16(step2[0], step2[15]);
+ out[1] = vqaddq_s16(step2[1], step2[14]);
+ out[2] = vqaddq_s16(step2[2], step2[13]);
+ out[3] = vqaddq_s16(step2[3], step2[12]);
+ out[4] = vqaddq_s16(step2[4], step2[11]);
+ out[5] = vqaddq_s16(step2[5], step2[10]);
+ out[6] = vqaddq_s16(step2[6], step2[9]);
+ out[7] = vqaddq_s16(step2[7], step2[8]);
+ out[8] = vqsubq_s16(step2[7], step2[8]);
+ out[9] = vqsubq_s16(step2[6], step2[9]);
+ out[10] = vqsubq_s16(step2[5], step2[10]);
+ out[11] = vqsubq_s16(step2[4], step2[11]);
+ out[12] = vqsubq_s16(step2[3], step2[12]);
+ out[13] = vqsubq_s16(step2[2], step2[13]);
+ out[14] = vqsubq_s16(step2[1], step2[14]);
+ out[15] = vqsubq_s16(step2[0], step2[15]);
+}
+
+static INLINE void idct16_low8_new_neon(int16x8_t *in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ int16x8_t step1[16], step2[16];
+ const int16x4_t c0 =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ // stage 1
+ // stage 2
+
+ step2[0] = in[0];
+ step2[2] = in[4];
+ step2[4] = in[2];
+ step2[6] = in[6];
+
+ btf_16_neon(in[1], cospi[60], cospi[4], &step2[8], &step2[15]);
+ btf_16_neon(in[7], -cospi[36], cospi[28], &step2[9], &step2[14]);
+ btf_16_neon(in[5], cospi[44], cospi[20], &step2[10], &step2[13]);
+ btf_16_neon(in[3], -cospi[52], cospi[12], &step2[11], &step2[12]);
+
+ // stage 3
+
+ btf_16_neon(step2[4], cospi[56], cospi[8], &step1[4], &step1[7]);
+ btf_16_neon(step2[6], -cospi[40], cospi[24], &step1[5], &step1[6]);
+
+ step1[0] = step2[0];
+ step1[2] = step2[2];
+ step1[8] = vqaddq_s16(step2[8], step2[9]);
+ step1[9] = vqsubq_s16(step2[8], step2[9]);
+ step1[10] = vqsubq_s16(step2[11], step2[10]);
+ step1[11] = vqaddq_s16(step2[11], step2[10]);
+ step1[12] = vqaddq_s16(step2[12], step2[13]);
+ step1[13] = vqsubq_s16(step2[12], step2[13]);
+ step1[14] = vqsubq_s16(step2[15], step2[14]);
+ step1[15] = vqaddq_s16(step2[15], step2[14]);
+
+ // stage 4
+
+ btf_16_neon(step1[0], cospi[32], cospi[32], &step2[0], &step2[1]);
+ btf_16_neon(step1[2], cospi[48], cospi[16], &step2[2], &step2[3]);
+ btf_16_lane_2_3_neon(step1[14], step1[9], c0, &step2[14], &step2[9]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[10]), vnegq_s16(step1[13]), c0,
+ &step2[10], &step2[13]);
+
+ step2[4] = vqaddq_s16(step1[4], step1[5]);
+ step2[5] = vqsubq_s16(step1[4], step1[5]);
+ step2[6] = vqsubq_s16(step1[7], step1[6]);
+ step2[7] = vqaddq_s16(step1[7], step1[6]);
+ step2[8] = step1[8];
+ step2[11] = step1[11];
+ step2[12] = step1[12];
+ step2[15] = step1[15];
+
+ // stage 5
+
+ btf_16_lane_0_1_neon(step2[6], step2[5], c0, &step1[6], &step1[5]);
+ step1[0] = vqaddq_s16(step2[0], step2[3]);
+ step1[1] = vqaddq_s16(step2[1], step2[2]);
+ step1[2] = vqsubq_s16(step2[1], step2[2]);
+ step1[3] = vqsubq_s16(step2[0], step2[3]);
+ step1[4] = step2[4];
+ step1[7] = step2[7];
+ step1[8] = vqaddq_s16(step2[8], step2[11]);
+ step1[9] = vqaddq_s16(step2[9], step2[10]);
+ step1[10] = vqsubq_s16(step2[9], step2[10]);
+ step1[11] = vqsubq_s16(step2[8], step2[11]);
+ step1[12] = vqsubq_s16(step2[15], step2[12]);
+ step1[13] = vqsubq_s16(step2[14], step2[13]);
+ step1[14] = vqaddq_s16(step2[14], step2[13]);
+ step1[15] = vqaddq_s16(step2[15], step2[12]);
+
+ // stage 6
+ btf_16_lane_0_1_neon(step1[13], step1[10], c0, &step2[13], &step2[10]);
+ btf_16_lane_0_1_neon(step1[12], step1[11], c0, &step2[12], &step2[11]);
+
+ step2[0] = vqaddq_s16(step1[0], step1[7]);
+ step2[1] = vqaddq_s16(step1[1], step1[6]);
+ step2[2] = vqaddq_s16(step1[2], step1[5]);
+ step2[3] = vqaddq_s16(step1[3], step1[4]);
+ step2[4] = vqsubq_s16(step1[3], step1[4]);
+ step2[5] = vqsubq_s16(step1[2], step1[5]);
+ step2[6] = vqsubq_s16(step1[1], step1[6]);
+ step2[7] = vqsubq_s16(step1[0], step1[7]);
+ step2[8] = step1[8];
+ step2[9] = step1[9];
+ step2[14] = step1[14];
+ step2[15] = step1[15];
+
+ // stage 7
+
+ out[0] = vqaddq_s16(step2[0], step2[15]);
+ out[1] = vqaddq_s16(step2[1], step2[14]);
+ out[2] = vqaddq_s16(step2[2], step2[13]);
+ out[3] = vqaddq_s16(step2[3], step2[12]);
+ out[4] = vqaddq_s16(step2[4], step2[11]);
+ out[5] = vqaddq_s16(step2[5], step2[10]);
+ out[6] = vqaddq_s16(step2[6], step2[9]);
+ out[7] = vqaddq_s16(step2[7], step2[8]);
+ out[8] = vqsubq_s16(step2[7], step2[8]);
+ out[9] = vqsubq_s16(step2[6], step2[9]);
+ out[10] = vqsubq_s16(step2[5], step2[10]);
+ out[11] = vqsubq_s16(step2[4], step2[11]);
+ out[12] = vqsubq_s16(step2[3], step2[12]);
+ out[13] = vqsubq_s16(step2[2], step2[13]);
+ out[14] = vqsubq_s16(step2[1], step2[14]);
+ out[15] = vqsubq_s16(step2[0], step2[15]);
+}
+
+static INLINE void iadst16_new_neon(int16x8_t *const in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+
+ const int16x4_t c0 =
+ create_s16x4_neon((int16_t *)(cospi + 2), (int16_t *)(cospi + 62),
+ (int16_t *)(cospi + 10), (int16_t *)(cospi + 54));
+ const int16x4_t c1 =
+ create_s16x4_neon((int16_t *)(cospi + 18), (int16_t *)(cospi + 46),
+ (int16_t *)(cospi + 26), (int16_t *)(cospi + 38));
+ const int16x4_t c2 =
+ create_s16x4_neon((int16_t *)(cospi + 34), (int16_t *)(cospi + 30),
+ (int16_t *)(cospi + 42), (int16_t *)(cospi + 22));
+ const int16x4_t c3 =
+ create_s16x4_neon((int16_t *)(cospi + 50), (int16_t *)(cospi + 14),
+ (int16_t *)(cospi + 58), (int16_t *)(cospi + 6));
+ const int16x4_t c4 =
+ create_s16x4_neon((int16_t *)(cospi + 8), (int16_t *)(cospi + 56),
+ (int16_t *)(cospi + 40), (int16_t *)(cospi + 24));
+
+ const int16x4_t c =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ int16x8_t x[16];
+ int16x8_t t[14];
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ int16x8_t s8, s9, s10, s11, s12, s13, s14, s15;
+
+ // Stage 1
+ x[0] = in[15];
+ x[1] = in[0];
+ x[2] = in[13];
+ x[3] = in[2];
+ x[4] = in[11];
+ x[5] = in[4];
+ x[6] = in[9];
+ x[7] = in[6];
+ x[8] = in[7];
+ x[9] = in[8];
+ x[10] = in[5];
+ x[11] = in[10];
+ x[12] = in[3];
+ x[13] = in[12];
+ x[14] = in[1];
+ x[15] = in[14];
+
+ // Stage 2
+ btf_16_lane_0_1_neon(x[0], x[1], c0, &s0, &s1);
+ btf_16_lane_2_3_neon(x[2], x[3], c0, &s2, &s3);
+ btf_16_lane_0_1_neon(x[4], x[5], c1, &s4, &s5);
+ btf_16_lane_2_3_neon(x[6], x[7], c1, &s6, &s7);
+ btf_16_lane_0_1_neon(x[8], x[9], c2, &s8, &s9);
+ btf_16_lane_2_3_neon(x[10], x[11], c2, &s10, &s11);
+ btf_16_lane_0_1_neon(x[12], x[13], c3, &s12, &s13);
+ btf_16_lane_2_3_neon(x[14], x[15], c3, &s14, &s15);
+
+ // Stage 3
+ x[0] = vqaddq_s16(s0, s8);
+ x[1] = vqaddq_s16(s1, s9);
+ x[2] = vqaddq_s16(s2, s10);
+ x[3] = vqaddq_s16(s3, s11);
+ x[4] = vqaddq_s16(s4, s12);
+ x[5] = vqaddq_s16(s5, s13);
+ x[6] = vqaddq_s16(s6, s14);
+ x[7] = vqaddq_s16(s7, s15);
+ x[8] = vqsubq_s16(s0, s8);
+ x[9] = vqsubq_s16(s1, s9);
+ x[10] = vqsubq_s16(s2, s10);
+ x[11] = vqsubq_s16(s3, s11);
+ x[12] = vqsubq_s16(s4, s12);
+ x[13] = vqsubq_s16(s5, s13);
+ x[14] = vqsubq_s16(s6, s14);
+ x[15] = vqsubq_s16(s7, s15);
+
+ // Stage 4
+ t[0] = x[0];
+ t[1] = x[1];
+ t[2] = x[2];
+ t[3] = x[3];
+ t[4] = x[4];
+ t[5] = x[5];
+ t[6] = x[6];
+ t[7] = x[7];
+ btf_16_lane_0_1_neon(x[8], x[9], c4, &s8, &s9);
+ btf_16_lane_2_3_neon(x[10], x[11], c4, &s10, &s11);
+ btf_16_lane_1_0_neon(x[13], x[12], c4, &s13, &s12);
+ btf_16_lane_3_2_neon(x[15], x[14], c4, &s15, &s14);
+
+ // Stage 5
+ x[0] = vqaddq_s16(t[0], t[4]);
+ x[1] = vqaddq_s16(t[1], t[5]);
+ x[2] = vqaddq_s16(t[2], t[6]);
+ x[3] = vqaddq_s16(t[3], t[7]);
+ x[4] = vqsubq_s16(t[0], t[4]);
+ x[5] = vqsubq_s16(t[1], t[5]);
+ x[6] = vqsubq_s16(t[2], t[6]);
+ x[7] = vqsubq_s16(t[3], t[7]);
+ x[8] = vqaddq_s16(s8, s12);
+ x[9] = vqaddq_s16(s9, s13);
+ x[10] = vqaddq_s16(s10, s14);
+ x[11] = vqaddq_s16(s11, s15);
+ x[12] = vqsubq_s16(s8, s12);
+ x[13] = vqsubq_s16(s9, s13);
+ x[14] = vqsubq_s16(s10, s14);
+ x[15] = vqsubq_s16(s11, s15);
+
+ // stage 6
+ t[0] = x[0];
+ t[1] = x[1];
+ t[2] = x[2];
+ t[3] = x[3];
+ btf_16_lane_2_3_neon(x[4], x[5], c, &s4, &s5);
+ btf_16_lane_3_2_neon(x[7], x[6], c, &s7, &s6);
+ t[8] = x[8];
+ t[9] = x[9];
+ t[10] = x[10];
+ t[11] = x[11];
+ btf_16_lane_2_3_neon(x[12], x[13], c, &s12, &s13);
+ btf_16_lane_3_2_neon(x[15], x[14], c, &s15, &s14);
+
+ // Stage 7
+ x[0] = vqaddq_s16(t[0], t[2]);
+ x[1] = vqaddq_s16(t[1], t[3]);
+ x[2] = vqsubq_s16(t[0], t[2]);
+ x[3] = vqsubq_s16(t[1], t[3]);
+ x[4] = vqaddq_s16(s4, s6);
+ x[5] = vqaddq_s16(s5, s7);
+ x[6] = vqsubq_s16(s4, s6);
+ x[7] = vqsubq_s16(s5, s7);
+ x[8] = vqaddq_s16(t[8], t[10]);
+ x[9] = vqaddq_s16(t[9], t[11]);
+ x[10] = vqsubq_s16(t[8], t[10]);
+ x[11] = vqsubq_s16(t[9], t[11]);
+ x[12] = vqaddq_s16(s12, s14);
+ x[13] = vqaddq_s16(s13, s15);
+ x[14] = vqsubq_s16(s12, s14);
+ x[15] = vqsubq_s16(s13, s15);
+
+ // Stage 8
+ btf_16_half_neon(x + 2, c);
+ btf_16_half_neon(x + 6, c);
+ btf_16_half_neon(x + 10, c);
+ btf_16_half_neon(x + 14, c);
+
+ // Stage 9
+ out[0] = x[0];
+ out[1] = vnegq_s16(x[8]);
+ out[2] = x[12];
+ out[3] = vnegq_s16(x[4]);
+ out[4] = x[6];
+ out[5] = vnegq_s16(x[14]);
+ out[6] = x[10];
+ out[7] = vnegq_s16(x[2]);
+ out[8] = x[3];
+ out[9] = vnegq_s16(x[11]);
+ out[10] = x[15];
+ out[11] = vnegq_s16(x[7]);
+ out[12] = x[5];
+ out[13] = vnegq_s16(x[13]);
+ out[14] = x[9];
+ out[15] = vnegq_s16(x[1]);
+}
+
+static INLINE void iadst16_low1_new_neon(int16x8_t *const in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const int16x4_t c4 =
+ create_s16x4_neon((int16_t *)(cospi + 8), (int16_t *)(cospi + 56),
+ (int16_t *)(cospi + 40), (int16_t *)(cospi + 24));
+ const int16x4_t c =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ int16x8_t x[16];
+ int16x8_t t[10];
+ int16x8_t s0, s1, s4, s5;
+ int16x8_t s8, s9, s12, s13;
+
+ // Stage 1
+ x[1] = in[0];
+
+ // Stage 2
+ btf_16_neon(x[1], cospi[62], -cospi[2], &s0, &s1);
+
+ // Stage 3
+ x[0] = s0;
+ x[1] = s1;
+ x[8] = s0;
+ x[9] = s1;
+
+ // Stage 4
+ t[0] = x[0];
+ t[1] = x[1];
+ btf_16_lane_0_1_neon(x[8], x[9], c4, &s8, &s9);
+
+ // Stage 5
+ x[0] = t[0];
+ x[1] = t[1];
+ x[4] = t[0];
+ x[5] = t[1];
+ x[8] = s8;
+ x[9] = s9;
+ x[12] = s8;
+ x[13] = s9;
+
+ // stage 6
+ t[0] = x[0];
+ t[1] = x[1];
+ btf_16_lane_2_3_neon(x[4], x[5], c, &s4, &s5);
+ t[8] = x[8];
+ t[9] = x[9];
+ btf_16_lane_2_3_neon(x[12], x[13], c, &s12, &s13);
+
+ // Stage 7
+ x[0] = t[0];
+ x[1] = t[1];
+ x[2] = t[0];
+ x[3] = t[1];
+ x[4] = s4;
+ x[5] = s5;
+ x[6] = s4;
+ x[7] = s5;
+ x[8] = t[8];
+ x[9] = t[9];
+ x[10] = t[8];
+ x[11] = t[9];
+ x[12] = s12;
+ x[13] = s13;
+ x[14] = s12;
+ x[15] = s13;
+
+ // Stage 8
+ btf_16_half_neon(x + 2, c);
+ btf_16_half_neon(x + 6, c);
+ btf_16_half_neon(x + 10, c);
+ btf_16_half_neon(x + 14, c);
+
+ // Stage 9
+ out[0] = x[0];
+ out[1] = vnegq_s16(x[8]);
+ out[2] = x[12];
+ out[3] = vnegq_s16(x[4]);
+ out[4] = x[6];
+ out[5] = vnegq_s16(x[14]);
+ out[6] = x[10];
+ out[7] = vnegq_s16(x[2]);
+ out[8] = x[3];
+ out[9] = vnegq_s16(x[11]);
+ out[10] = x[15];
+ out[11] = vnegq_s16(x[7]);
+ out[12] = x[5];
+ out[13] = vnegq_s16(x[13]);
+ out[14] = x[9];
+ out[15] = vnegq_s16(x[1]);
+}
+
+static INLINE void iadst16_low8_new_neon(int16x8_t *const in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+
+ const int16x4_t c4 =
+ create_s16x4_neon((int16_t *)(cospi + 8), (int16_t *)(cospi + 56),
+ (int16_t *)(cospi + 40), (int16_t *)(cospi + 24));
+ const int16x4_t c =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ int16x8_t x[16];
+ int16x8_t t[14];
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ int16x8_t s8, s9, s10, s11, s12, s13, s14, s15;
+
+ // Stage 1
+ x[1] = in[0];
+ x[3] = in[2];
+ x[5] = in[4];
+ x[7] = in[6];
+ x[8] = in[7];
+ x[10] = in[5];
+ x[12] = in[3];
+ x[14] = in[1];
+
+ // Stage 2
+ btf_16_neon(x[1], cospi[62], -cospi[2], &s0, &s1);
+ btf_16_neon(x[3], cospi[54], -cospi[10], &s2, &s3);
+ btf_16_neon(x[5], cospi[46], -cospi[18], &s4, &s5);
+ btf_16_neon(x[7], cospi[38], -cospi[26], &s6, &s7);
+
+ btf_16_neon(x[8], cospi[34], cospi[30], &s8, &s9);
+ btf_16_neon(x[10], cospi[42], cospi[22], &s10, &s11);
+ btf_16_neon(x[12], cospi[50], cospi[14], &s12, &s13);
+ btf_16_neon(x[14], cospi[58], cospi[6], &s14, &s15);
+
+ // Stage 3
+ x[0] = vqaddq_s16(s0, s8);
+ x[1] = vqaddq_s16(s1, s9);
+ x[2] = vqaddq_s16(s2, s10);
+ x[3] = vqaddq_s16(s3, s11);
+ x[4] = vqaddq_s16(s4, s12);
+ x[5] = vqaddq_s16(s5, s13);
+ x[6] = vqaddq_s16(s6, s14);
+ x[7] = vqaddq_s16(s7, s15);
+ x[8] = vqsubq_s16(s0, s8);
+ x[9] = vqsubq_s16(s1, s9);
+ x[10] = vqsubq_s16(s2, s10);
+ x[11] = vqsubq_s16(s3, s11);
+ x[12] = vqsubq_s16(s4, s12);
+ x[13] = vqsubq_s16(s5, s13);
+ x[14] = vqsubq_s16(s6, s14);
+ x[15] = vqsubq_s16(s7, s15);
+
+ // Stage 4
+ t[0] = x[0];
+ t[1] = x[1];
+ t[2] = x[2];
+ t[3] = x[3];
+ t[4] = x[4];
+ t[5] = x[5];
+ t[6] = x[6];
+ t[7] = x[7];
+ btf_16_lane_0_1_neon(x[8], x[9], c4, &s8, &s9);
+ btf_16_lane_2_3_neon(x[10], x[11], c4, &s10, &s11);
+ btf_16_lane_1_0_neon(x[13], x[12], c4, &s13, &s12);
+ btf_16_lane_3_2_neon(x[15], x[14], c4, &s15, &s14);
+
+ // Stage 5
+ x[0] = vqaddq_s16(t[0], t[4]);
+ x[1] = vqaddq_s16(t[1], t[5]);
+ x[2] = vqaddq_s16(t[2], t[6]);
+ x[3] = vqaddq_s16(t[3], t[7]);
+ x[4] = vqsubq_s16(t[0], t[4]);
+ x[5] = vqsubq_s16(t[1], t[5]);
+ x[6] = vqsubq_s16(t[2], t[6]);
+ x[7] = vqsubq_s16(t[3], t[7]);
+ x[8] = vqaddq_s16(s8, s12);
+ x[9] = vqaddq_s16(s9, s13);
+ x[10] = vqaddq_s16(s10, s14);
+ x[11] = vqaddq_s16(s11, s15);
+ x[12] = vqsubq_s16(s8, s12);
+ x[13] = vqsubq_s16(s9, s13);
+ x[14] = vqsubq_s16(s10, s14);
+ x[15] = vqsubq_s16(s11, s15);
+
+ // stage 6
+ t[0] = x[0];
+ t[1] = x[1];
+ t[2] = x[2];
+ t[3] = x[3];
+ btf_16_lane_2_3_neon(x[4], x[5], c, &s4, &s5);
+ btf_16_lane_3_2_neon(x[7], x[6], c, &s7, &s6);
+ t[8] = x[8];
+ t[9] = x[9];
+ t[10] = x[10];
+ t[11] = x[11];
+ btf_16_lane_2_3_neon(x[12], x[13], c, &s12, &s13);
+ btf_16_lane_3_2_neon(x[15], x[14], c, &s15, &s14);
+
+ // Stage 7
+ x[0] = vqaddq_s16(t[0], t[2]);
+ x[1] = vqaddq_s16(t[1], t[3]);
+ x[2] = vqsubq_s16(t[0], t[2]);
+ x[3] = vqsubq_s16(t[1], t[3]);
+ x[4] = vqaddq_s16(s4, s6);
+ x[5] = vqaddq_s16(s5, s7);
+ x[6] = vqsubq_s16(s4, s6);
+ x[7] = vqsubq_s16(s5, s7);
+ x[8] = vqaddq_s16(t[8], t[10]);
+ x[9] = vqaddq_s16(t[9], t[11]);
+ x[10] = vqsubq_s16(t[8], t[10]);
+ x[11] = vqsubq_s16(t[9], t[11]);
+ x[12] = vqaddq_s16(s12, s14);
+ x[13] = vqaddq_s16(s13, s15);
+ x[14] = vqsubq_s16(s12, s14);
+ x[15] = vqsubq_s16(s13, s15);
+
+ // Stage 8
+ btf_16_half_neon(x + 2, c);
+ btf_16_half_neon(x + 6, c);
+ btf_16_half_neon(x + 10, c);
+ btf_16_half_neon(x + 14, c);
+
+ // Stage 9
+ out[0] = x[0];
+ out[1] = vnegq_s16(x[8]);
+ out[2] = x[12];
+ out[3] = vnegq_s16(x[4]);
+ out[4] = x[6];
+ out[5] = vnegq_s16(x[14]);
+ out[6] = x[10];
+ out[7] = vnegq_s16(x[2]);
+ out[8] = x[3];
+ out[9] = vnegq_s16(x[11]);
+ out[10] = x[15];
+ out[11] = vnegq_s16(x[7]);
+ out[12] = x[5];
+ out[13] = vnegq_s16(x[13]);
+ out[14] = x[9];
+ out[15] = vnegq_s16(x[1]);
+}
+
+static INLINE void idct32_new_neon(int16x8_t *in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ int16x8_t step1[32], step2[32];
+
+ const int16x4_t c0 =
+ create_s16x4_neon((int16_t *)(cospi + 2), (int16_t *)(cospi + 62),
+ (int16_t *)(cospi + 34), (int16_t *)(cospi + 30));
+ const int16x4_t c1 =
+ create_s16x4_neon((int16_t *)(cospi + 18), (int16_t *)(cospi + 46),
+ (int16_t *)(cospi + 50), (int16_t *)(cospi + 14));
+ const int16x4_t c2 =
+ create_s16x4_neon((int16_t *)(cospi + 10), (int16_t *)(cospi + 54),
+ (int16_t *)(cospi + 42), (int16_t *)(cospi + 22));
+ const int16x4_t c3 =
+ create_s16x4_neon((int16_t *)(cospi + 26), (int16_t *)(cospi + 38),
+ (int16_t *)(cospi + 58), (int16_t *)(cospi + 6));
+ const int16x4_t c4 =
+ create_s16x4_neon((int16_t *)(cospi + 4), (int16_t *)(cospi + 60),
+ (int16_t *)(cospi + 36), (int16_t *)(cospi + 28));
+ const int16x4_t c5 =
+ create_s16x4_neon((int16_t *)(cospi + 20), (int16_t *)(cospi + 44),
+ (int16_t *)(cospi + 52), (int16_t *)(cospi + 12));
+ const int16x4_t c6 =
+ create_s16x4_neon((int16_t *)(cospi + 8), (int16_t *)(cospi + 56),
+ (int16_t *)(cospi + 40), (int16_t *)(cospi + 24));
+ const int16x4_t c7 =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ // stage 2
+
+ btf_16_lane_0_1_neon(in[1], in[31], c0, &step2[31], &step2[16]);
+ btf_16_lane_2_3_neon(in[17], in[15], c0, &step2[30], &step2[17]);
+ btf_16_lane_0_1_neon(in[9], in[23], c1, &step2[29], &step2[18]);
+ btf_16_lane_2_3_neon(in[25], in[7], c1, &step2[28], &step2[19]);
+ btf_16_lane_0_1_neon(in[5], in[27], c2, &step2[27], &step2[20]);
+ btf_16_lane_2_3_neon(in[21], in[11], c2, &step2[26], &step2[21]);
+ btf_16_lane_0_1_neon(in[13], in[19], c3, &step2[25], &step2[22]);
+ btf_16_lane_2_3_neon(in[29], in[3], c3, &step2[24], &step2[23]);
+
+ step2[0] = in[0];
+ step2[1] = in[16];
+ step2[2] = in[8];
+ step2[3] = in[24];
+ step2[4] = in[4];
+ step2[5] = in[20];
+ step2[6] = in[12];
+ step2[7] = in[28];
+ step2[8] = in[2];
+ step2[9] = in[18];
+ step2[10] = in[10];
+ step2[11] = in[26];
+ step2[12] = in[6];
+ step2[13] = in[22];
+ step2[14] = in[14];
+ step2[15] = in[30];
+
+ // stage 3
+
+ btf_16_lane_0_1_neon(step2[8], step2[15], c4, &step1[15], &step1[8]);
+ btf_16_lane_2_3_neon(step2[9], step2[14], c4, &step1[14], &step1[9]);
+ btf_16_lane_0_1_neon(step2[10], step2[13], c5, &step1[13], &step1[10]);
+ btf_16_lane_2_3_neon(step2[11], step2[12], c5, &step1[12], &step1[11]);
+
+ step1[0] = step2[0];
+ step1[1] = step2[1];
+ step1[2] = step2[2];
+ step1[3] = step2[3];
+ step1[4] = step2[4];
+ step1[5] = step2[5];
+ step1[6] = step2[6];
+ step1[7] = step2[7];
+
+ step1[16] = vqaddq_s16(step2[16], step2[17]);
+ step1[17] = vqsubq_s16(step2[16], step2[17]);
+ step1[18] = vqsubq_s16(step2[19], step2[18]);
+ step1[19] = vqaddq_s16(step2[19], step2[18]);
+ step1[20] = vqaddq_s16(step2[20], step2[21]);
+ step1[21] = vqsubq_s16(step2[20], step2[21]);
+ step1[22] = vqsubq_s16(step2[23], step2[22]);
+ step1[23] = vqaddq_s16(step2[23], step2[22]);
+ step1[24] = vqaddq_s16(step2[24], step2[25]);
+ step1[25] = vqsubq_s16(step2[24], step2[25]);
+ step1[26] = vqsubq_s16(step2[27], step2[26]);
+ step1[27] = vqaddq_s16(step2[27], step2[26]);
+ step1[28] = vqaddq_s16(step2[28], step2[29]);
+ step1[29] = vqsubq_s16(step2[28], step2[29]);
+ step1[30] = vqsubq_s16(step2[31], step2[30]);
+ step1[31] = vqaddq_s16(step2[31], step2[30]);
+
+ // stage 4
+
+ btf_16_lane_0_1_neon(step1[4], step1[7], c6, &step2[7], &step2[4]);
+ btf_16_lane_2_3_neon(step1[5], step1[6], c6, &step2[6], &step2[5]);
+ btf_16_lane_0_1_neon(step1[30], step1[17], c6, &step2[30], &step2[17]);
+ btf_16_lane_1_0_neon(vnegq_s16(step1[18]), vnegq_s16(step1[29]), c6,
+ &step2[18], &step2[29]);
+ btf_16_lane_2_3_neon(step1[26], step1[21], c6, &step2[26], &step2[21]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[22]), vnegq_s16(step1[25]), c6,
+ &step2[22], &step2[25]);
+
+ step2[0] = step1[0];
+ step2[1] = step1[1];
+ step2[2] = step1[2];
+ step2[3] = step1[3];
+ step2[8] = vqaddq_s16(step1[8], step1[9]);
+ step2[9] = vqsubq_s16(step1[8], step1[9]);
+ step2[10] = vqsubq_s16(step1[11], step1[10]);
+ step2[11] = vqaddq_s16(step1[11], step1[10]);
+ step2[12] = vqaddq_s16(step1[12], step1[13]);
+ step2[13] = vqsubq_s16(step1[12], step1[13]);
+ step2[14] = vqsubq_s16(step1[15], step1[14]);
+ step2[15] = vqaddq_s16(step1[15], step1[14]);
+ step2[16] = step1[16];
+ step2[19] = step1[19];
+ step2[20] = step1[20];
+ step2[23] = step1[23];
+ step2[24] = step1[24];
+ step2[27] = step1[27];
+ step2[28] = step1[28];
+ step2[31] = step1[31];
+
+ // stage 5
+
+ btf_16_lane_0_1_neon(step2[0], step2[1], c7, &step1[0], &step1[1]);
+ btf_16_lane_2_3_neon(step2[2], step2[3], c7, &step1[3], &step1[2]);
+ btf_16_lane_2_3_neon(step2[14], step2[9], c7, &step1[14], &step1[9]);
+ btf_16_lane_3_2_neon(vnegq_s16(step2[10]), vnegq_s16(step2[13]), c7,
+ &step1[10], &step1[13]);
+
+ step1[4] = vqaddq_s16(step2[4], step2[5]);
+ step1[5] = vqsubq_s16(step2[4], step2[5]);
+ step1[6] = vqsubq_s16(step2[7], step2[6]);
+ step1[7] = vqaddq_s16(step2[7], step2[6]);
+ step1[8] = step2[8];
+ step1[11] = step2[11];
+ step1[12] = step2[12];
+ step1[15] = step2[15];
+ step1[16] = vqaddq_s16(step2[16], step2[19]);
+ step1[17] = vqaddq_s16(step2[17], step2[18]);
+ step1[18] = vqsubq_s16(step2[17], step2[18]);
+ step1[19] = vqsubq_s16(step2[16], step2[19]);
+ step1[20] = vqsubq_s16(step2[23], step2[20]);
+ step1[21] = vqsubq_s16(step2[22], step2[21]);
+ step1[22] = vqaddq_s16(step2[22], step2[21]);
+ step1[23] = vqaddq_s16(step2[23], step2[20]);
+ step1[24] = vqaddq_s16(step2[24], step2[27]);
+ step1[25] = vqaddq_s16(step2[25], step2[26]);
+ step1[26] = vqsubq_s16(step2[25], step2[26]);
+ step1[27] = vqsubq_s16(step2[24], step2[27]);
+ step1[28] = vqsubq_s16(step2[31], step2[28]);
+ step1[29] = vqsubq_s16(step2[30], step2[29]);
+ step1[30] = vqaddq_s16(step2[30], step2[29]);
+ step1[31] = vqaddq_s16(step2[31], step2[28]);
+
+ // stage 6
+
+ btf_16_lane_0_1_neon(step1[6], step1[5], c7, &step2[6], &step2[5]);
+ btf_16_lane_2_3_neon(step1[29], step1[18], c7, &step2[29], &step2[18]);
+ btf_16_lane_2_3_neon(step1[28], step1[19], c7, &step2[28], &step2[19]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[20]), vnegq_s16(step1[27]), c7,
+ &step2[20], &step2[27]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[21]), vnegq_s16(step1[26]), c7,
+ &step2[21], &step2[26]);
+
+ step2[0] = vqaddq_s16(step1[0], step1[3]);
+ step2[1] = vqaddq_s16(step1[1], step1[2]);
+ step2[2] = vqsubq_s16(step1[1], step1[2]);
+ step2[3] = vqsubq_s16(step1[0], step1[3]);
+ step2[4] = step1[4];
+ step2[7] = step1[7];
+ step2[8] = vqaddq_s16(step1[8], step1[11]);
+ step2[9] = vqaddq_s16(step1[9], step1[10]);
+ step2[10] = vqsubq_s16(step1[9], step1[10]);
+ step2[11] = vqsubq_s16(step1[8], step1[11]);
+ step2[12] = vqsubq_s16(step1[15], step1[12]);
+ step2[13] = vqsubq_s16(step1[14], step1[13]);
+ step2[14] = vqaddq_s16(step1[14], step1[13]);
+ step2[15] = vqaddq_s16(step1[15], step1[12]);
+ step2[16] = step1[16];
+ step2[17] = step1[17];
+ step2[22] = step1[22];
+ step2[23] = step1[23];
+ step2[24] = step1[24];
+ step2[25] = step1[25];
+ step2[30] = step1[30];
+ step2[31] = step1[31];
+
+ // stage 7
+
+ btf_16_lane_0_1_neon(step2[13], step2[10], c7, &step1[13], &step1[10]);
+ btf_16_lane_0_1_neon(step2[12], step2[11], c7, &step1[12], &step1[11]);
+
+ step1[0] = vqaddq_s16(step2[0], step2[7]);
+ step1[1] = vqaddq_s16(step2[1], step2[6]);
+ step1[2] = vqaddq_s16(step2[2], step2[5]);
+ step1[3] = vqaddq_s16(step2[3], step2[4]);
+ step1[4] = vqsubq_s16(step2[3], step2[4]);
+ step1[5] = vqsubq_s16(step2[2], step2[5]);
+ step1[6] = vqsubq_s16(step2[1], step2[6]);
+ step1[7] = vqsubq_s16(step2[0], step2[7]);
+ step1[8] = step2[8];
+ step1[9] = step2[9];
+ step1[14] = step2[14];
+ step1[15] = step2[15];
+ step1[16] = vqaddq_s16(step2[16], step2[23]);
+ step1[17] = vqaddq_s16(step2[17], step2[22]);
+ step1[18] = vqaddq_s16(step2[18], step2[21]);
+ step1[19] = vqaddq_s16(step2[19], step2[20]);
+ step1[20] = vqsubq_s16(step2[19], step2[20]);
+ step1[21] = vqsubq_s16(step2[18], step2[21]);
+ step1[22] = vqsubq_s16(step2[17], step2[22]);
+ step1[23] = vqsubq_s16(step2[16], step2[23]);
+ step1[24] = vqsubq_s16(step2[31], step2[24]);
+ step1[25] = vqsubq_s16(step2[30], step2[25]);
+ step1[26] = vqsubq_s16(step2[29], step2[26]);
+ step1[27] = vqsubq_s16(step2[28], step2[27]);
+ step1[28] = vqaddq_s16(step2[27], step2[28]);
+ step1[29] = vqaddq_s16(step2[26], step2[29]);
+ step1[30] = vqaddq_s16(step2[25], step2[30]);
+ step1[31] = vqaddq_s16(step2[24], step2[31]);
+
+ // stage 8
+
+ btf_16_lane_0_1_neon(step1[27], step1[20], c7, &step2[27], &step2[20]);
+ btf_16_lane_0_1_neon(step1[26], step1[21], c7, &step2[26], &step2[21]);
+ btf_16_lane_0_1_neon(step1[25], step1[22], c7, &step2[25], &step2[22]);
+ btf_16_lane_0_1_neon(step1[24], step1[23], c7, &step2[24], &step2[23]);
+
+ step2[0] = vqaddq_s16(step1[0], step1[15]);
+ step2[1] = vqaddq_s16(step1[1], step1[14]);
+ step2[2] = vqaddq_s16(step1[2], step1[13]);
+ step2[3] = vqaddq_s16(step1[3], step1[12]);
+ step2[4] = vqaddq_s16(step1[4], step1[11]);
+ step2[5] = vqaddq_s16(step1[5], step1[10]);
+ step2[6] = vqaddq_s16(step1[6], step1[9]);
+ step2[7] = vqaddq_s16(step1[7], step1[8]);
+ step2[8] = vqsubq_s16(step1[7], step1[8]);
+ step2[9] = vqsubq_s16(step1[6], step1[9]);
+ step2[10] = vqsubq_s16(step1[5], step1[10]);
+ step2[11] = vqsubq_s16(step1[4], step1[11]);
+ step2[12] = vqsubq_s16(step1[3], step1[12]);
+ step2[13] = vqsubq_s16(step1[2], step1[13]);
+ step2[14] = vqsubq_s16(step1[1], step1[14]);
+ step2[15] = vqsubq_s16(step1[0], step1[15]);
+ step2[16] = step1[16];
+ step2[17] = step1[17];
+ step2[18] = step1[18];
+ step2[19] = step1[19];
+ step2[28] = step1[28];
+ step2[29] = step1[29];
+ step2[30] = step1[30];
+ step2[31] = step1[31];
+
+ // stage 9
+
+ out[0] = vqaddq_s16(step2[0], step2[31]);
+ out[1] = vqaddq_s16(step2[1], step2[30]);
+ out[2] = vqaddq_s16(step2[2], step2[29]);
+ out[3] = vqaddq_s16(step2[3], step2[28]);
+ out[4] = vqaddq_s16(step2[4], step2[27]);
+ out[5] = vqaddq_s16(step2[5], step2[26]);
+ out[6] = vqaddq_s16(step2[6], step2[25]);
+ out[7] = vqaddq_s16(step2[7], step2[24]);
+ out[8] = vqaddq_s16(step2[8], step2[23]);
+ out[9] = vqaddq_s16(step2[9], step2[22]);
+ out[10] = vqaddq_s16(step2[10], step2[21]);
+ out[11] = vqaddq_s16(step2[11], step2[20]);
+ out[12] = vqaddq_s16(step2[12], step2[19]);
+ out[13] = vqaddq_s16(step2[13], step2[18]);
+ out[14] = vqaddq_s16(step2[14], step2[17]);
+ out[15] = vqaddq_s16(step2[15], step2[16]);
+ out[16] = vqsubq_s16(step2[15], step2[16]);
+ out[17] = vqsubq_s16(step2[14], step2[17]);
+ out[18] = vqsubq_s16(step2[13], step2[18]);
+ out[19] = vqsubq_s16(step2[12], step2[19]);
+ out[20] = vqsubq_s16(step2[11], step2[20]);
+ out[21] = vqsubq_s16(step2[10], step2[21]);
+ out[22] = vqsubq_s16(step2[9], step2[22]);
+ out[23] = vqsubq_s16(step2[8], step2[23]);
+ out[24] = vqsubq_s16(step2[7], step2[24]);
+ out[25] = vqsubq_s16(step2[6], step2[25]);
+ out[26] = vqsubq_s16(step2[5], step2[26]);
+ out[27] = vqsubq_s16(step2[4], step2[27]);
+ out[28] = vqsubq_s16(step2[3], step2[28]);
+ out[29] = vqsubq_s16(step2[2], step2[29]);
+ out[30] = vqsubq_s16(step2[1], step2[30]);
+ out[31] = vqsubq_s16(step2[0], step2[31]);
+}
+
+static INLINE void idct32_low1_new_neon(int16x8_t *in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ int16x8_t step1;
+ int32x4_t t32[2];
+
+ // stage 1
+ // stage 2
+ // stage 3
+ // stage 4
+ // stage 5
+
+ t32[0] = vmull_n_s16(vget_low_s16(in[0]), cospi[32]);
+ t32[1] = vmull_n_s16(vget_high_s16(in[0]), cospi[32]);
+ step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT),
+ vrshrn_n_s32(t32[1], INV_COS_BIT));
+
+ // stage 6
+ // stage 7
+ // stage 8
+ // stage 9
+
+ out[0] = step1;
+ out[1] = step1;
+ out[2] = step1;
+ out[3] = step1;
+ out[4] = step1;
+ out[5] = step1;
+ out[6] = step1;
+ out[7] = step1;
+ out[8] = step1;
+ out[9] = step1;
+ out[10] = step1;
+ out[11] = step1;
+ out[12] = step1;
+ out[13] = step1;
+ out[14] = step1;
+ out[15] = step1;
+ out[16] = step1;
+ out[17] = step1;
+ out[18] = step1;
+ out[19] = step1;
+ out[20] = step1;
+ out[21] = step1;
+ out[22] = step1;
+ out[23] = step1;
+ out[24] = step1;
+ out[25] = step1;
+ out[26] = step1;
+ out[27] = step1;
+ out[28] = step1;
+ out[29] = step1;
+ out[30] = step1;
+ out[31] = step1;
+}
+
+static INLINE void idct32_low8_new_neon(int16x8_t *in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ int16x8_t step1[32], step2[32];
+ int32x4_t t32[16];
+ const int16x4_t c0 =
+ create_s16x4_neon((int16_t *)(cospi + 8), (int16_t *)(cospi + 56),
+ (int16_t *)(cospi + 40), (int16_t *)(cospi + 24));
+ const int16x4_t c1 =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ // stage 1
+ // stage 2
+
+ step2[0] = in[0];
+ step2[4] = in[4];
+ step2[8] = in[2];
+ step2[12] = in[6];
+
+ btf_16_neon(in[1], cospi[62], cospi[2], &step2[16], &step2[31]);
+ btf_16_neon(in[7], -cospi[50], cospi[14], &step2[19], &step2[28]);
+ btf_16_neon(in[5], cospi[54], cospi[10], &step2[20], &step2[27]);
+ btf_16_neon(in[3], -cospi[58], cospi[6], &step2[23], &step2[24]);
+
+ // stage 3
+ step1[0] = step2[0];
+ step1[4] = step2[4];
+
+ btf_16_neon(step2[8], cospi[60], cospi[4], &step1[8], &step1[15]);
+ btf_16_neon(step2[12], -cospi[52], cospi[12], &step1[11], &step1[12]);
+
+ step1[16] = step2[16];
+ step1[17] = step2[16];
+ step1[18] = step2[19];
+ step1[19] = step2[19];
+ step1[20] = step2[20];
+ step1[21] = step2[20];
+ step1[22] = step2[23];
+ step1[23] = step2[23];
+ step1[24] = step2[24];
+ step1[25] = step2[24];
+ step1[26] = step2[27];
+ step1[27] = step2[27];
+ step1[28] = step2[28];
+ step1[29] = step2[28];
+ step1[30] = step2[31];
+ step1[31] = step2[31];
+
+ // stage 4
+
+ btf_16_neon(step1[4], cospi[56], cospi[8], &step2[4], &step2[7]);
+ btf_16_lane_0_1_neon(step1[30], step1[17], c0, &step2[30], &step2[17]);
+ btf_16_lane_1_0_neon(vnegq_s16(step1[18]), vnegq_s16(step1[29]), c0,
+ &step2[18], &step2[29]);
+ btf_16_lane_2_3_neon(step1[26], step1[21], c0, &step2[26], &step2[21]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[22]), vnegq_s16(step1[25]), c0,
+ &step2[22], &step2[25]);
+
+ step2[0] = step1[0];
+ step2[8] = step1[8];
+ step2[9] = step1[8];
+ step2[10] = step1[11];
+ step2[11] = step1[11];
+ step2[12] = step1[12];
+ step2[13] = step1[12];
+ step2[14] = step1[15];
+ step2[15] = step1[15];
+ step2[16] = step1[16];
+ step2[19] = step1[19];
+ step2[20] = step1[20];
+ step2[23] = step1[23];
+ step2[24] = step1[24];
+ step2[27] = step1[27];
+ step2[28] = step1[28];
+ step2[31] = step1[31];
+
+ // stage 5
+
+ t32[0] = vmull_n_s16(vget_low_s16(step2[0]), cospi[32]);
+ t32[1] = vmull_n_s16(vget_high_s16(step2[0]), cospi[32]);
+ step1[0] = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT),
+ vrshrn_n_s32(t32[1], INV_COS_BIT));
+
+ btf_16_lane_2_3_neon(step2[14], step2[9], c1, &step1[14], &step1[9]);
+ btf_16_lane_3_2_neon(vnegq_s16(step2[10]), vnegq_s16(step2[13]), c1,
+ &step1[10], &step1[13]);
+
+ step1[4] = step2[4];
+ step1[5] = step2[4];
+ step1[6] = step2[7];
+ step1[7] = step2[7];
+ step1[8] = step2[8];
+ step1[11] = step2[11];
+ step1[12] = step2[12];
+ step1[15] = step2[15];
+ step1[16] = vqaddq_s16(step2[16], step2[19]);
+ step1[17] = vqaddq_s16(step2[17], step2[18]);
+ step1[18] = vqsubq_s16(step2[17], step2[18]);
+ step1[19] = vqsubq_s16(step2[16], step2[19]);
+ step1[20] = vqsubq_s16(step2[23], step2[20]);
+ step1[21] = vqsubq_s16(step2[22], step2[21]);
+ step1[22] = vqaddq_s16(step2[22], step2[21]);
+ step1[23] = vqaddq_s16(step2[23], step2[20]);
+ step1[24] = vqaddq_s16(step2[24], step2[27]);
+ step1[25] = vqaddq_s16(step2[25], step2[26]);
+ step1[26] = vqsubq_s16(step2[25], step2[26]);
+ step1[27] = vqsubq_s16(step2[24], step2[27]);
+ step1[28] = vqsubq_s16(step2[31], step2[28]);
+ step1[29] = vqsubq_s16(step2[30], step2[29]);
+ step1[30] = vqaddq_s16(step2[30], step2[29]);
+ step1[31] = vqaddq_s16(step2[31], step2[28]);
+
+ // stage 6
+
+ btf_16_lane_0_1_neon(step1[6], step1[5], c1, &step2[6], &step2[5]);
+ btf_16_lane_2_3_neon(step1[29], step1[18], c1, &step2[29], &step2[18]);
+ btf_16_lane_2_3_neon(step1[28], step1[19], c1, &step2[28], &step2[19]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[20]), vnegq_s16(step1[27]), c1,
+ &step2[20], &step2[27]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[21]), vnegq_s16(step1[26]), c1,
+ &step2[21], &step2[26]);
+
+ step2[0] = step1[0];
+ step2[1] = step1[0];
+ step2[2] = step1[0];
+ step2[3] = step1[0];
+ step2[4] = step1[4];
+ step2[7] = step1[7];
+ step2[8] = vqaddq_s16(step1[8], step1[11]);
+ step2[9] = vqaddq_s16(step1[9], step1[10]);
+ step2[10] = vqsubq_s16(step1[9], step1[10]);
+ step2[11] = vqsubq_s16(step1[8], step1[11]);
+ step2[12] = vqsubq_s16(step1[15], step1[12]);
+ step2[13] = vqsubq_s16(step1[14], step1[13]);
+ step2[14] = vqaddq_s16(step1[14], step1[13]);
+ step2[15] = vqaddq_s16(step1[15], step1[12]);
+ step2[16] = step1[16];
+ step2[17] = step1[17];
+ step2[22] = step1[22];
+ step2[23] = step1[23];
+ step2[24] = step1[24];
+ step2[25] = step1[25];
+ step2[30] = step1[30];
+ step2[31] = step1[31];
+
+ // stage 7
+
+ btf_16_lane_0_1_neon(step2[13], step2[10], c1, &step1[13], &step1[10]);
+ btf_16_lane_0_1_neon(step2[12], step2[11], c1, &step1[12], &step1[11]);
+
+ step1[0] = vqaddq_s16(step2[0], step2[7]);
+ step1[1] = vqaddq_s16(step2[1], step2[6]);
+ step1[2] = vqaddq_s16(step2[2], step2[5]);
+ step1[3] = vqaddq_s16(step2[3], step2[4]);
+ step1[4] = vqsubq_s16(step2[3], step2[4]);
+ step1[5] = vqsubq_s16(step2[2], step2[5]);
+ step1[6] = vqsubq_s16(step2[1], step2[6]);
+ step1[7] = vqsubq_s16(step2[0], step2[7]);
+ step1[8] = step2[8];
+ step1[9] = step2[9];
+ step1[14] = step2[14];
+ step1[15] = step2[15];
+ step1[16] = vqaddq_s16(step2[16], step2[23]);
+ step1[17] = vqaddq_s16(step2[17], step2[22]);
+ step1[18] = vqaddq_s16(step2[18], step2[21]);
+ step1[19] = vqaddq_s16(step2[19], step2[20]);
+ step1[20] = vqsubq_s16(step2[19], step2[20]);
+ step1[21] = vqsubq_s16(step2[18], step2[21]);
+ step1[22] = vqsubq_s16(step2[17], step2[22]);
+ step1[23] = vqsubq_s16(step2[16], step2[23]);
+ step1[24] = vqsubq_s16(step2[31], step2[24]);
+ step1[25] = vqsubq_s16(step2[30], step2[25]);
+ step1[26] = vqsubq_s16(step2[29], step2[26]);
+ step1[27] = vqsubq_s16(step2[28], step2[27]);
+ step1[28] = vqaddq_s16(step2[27], step2[28]);
+ step1[29] = vqaddq_s16(step2[26], step2[29]);
+ step1[30] = vqaddq_s16(step2[25], step2[30]);
+ step1[31] = vqaddq_s16(step2[24], step2[31]);
+
+ // stage 8
+
+ btf_16_lane_0_1_neon(step1[27], step1[20], c1, &step2[27], &step2[20]);
+ btf_16_lane_0_1_neon(step1[26], step1[21], c1, &step2[26], &step2[21]);
+ btf_16_lane_0_1_neon(step1[25], step1[22], c1, &step2[25], &step2[22]);
+ btf_16_lane_0_1_neon(step1[24], step1[23], c1, &step2[24], &step2[23]);
+
+ step2[0] = vqaddq_s16(step1[0], step1[15]);
+ step2[1] = vqaddq_s16(step1[1], step1[14]);
+ step2[2] = vqaddq_s16(step1[2], step1[13]);
+ step2[3] = vqaddq_s16(step1[3], step1[12]);
+ step2[4] = vqaddq_s16(step1[4], step1[11]);
+ step2[5] = vqaddq_s16(step1[5], step1[10]);
+ step2[6] = vqaddq_s16(step1[6], step1[9]);
+ step2[7] = vqaddq_s16(step1[7], step1[8]);
+ step2[8] = vqsubq_s16(step1[7], step1[8]);
+ step2[9] = vqsubq_s16(step1[6], step1[9]);
+ step2[10] = vqsubq_s16(step1[5], step1[10]);
+ step2[11] = vqsubq_s16(step1[4], step1[11]);
+ step2[12] = vqsubq_s16(step1[3], step1[12]);
+ step2[13] = vqsubq_s16(step1[2], step1[13]);
+ step2[14] = vqsubq_s16(step1[1], step1[14]);
+ step2[15] = vqsubq_s16(step1[0], step1[15]);
+ step2[16] = step1[16];
+ step2[17] = step1[17];
+ step2[18] = step1[18];
+ step2[19] = step1[19];
+ step2[28] = step1[28];
+ step2[29] = step1[29];
+ step2[30] = step1[30];
+ step2[31] = step1[31];
+
+ // stage 9
+
+ out[0] = vqaddq_s16(step2[0], step2[31]);
+ out[1] = vqaddq_s16(step2[1], step2[30]);
+ out[2] = vqaddq_s16(step2[2], step2[29]);
+ out[3] = vqaddq_s16(step2[3], step2[28]);
+ out[4] = vqaddq_s16(step2[4], step2[27]);
+ out[5] = vqaddq_s16(step2[5], step2[26]);
+ out[6] = vqaddq_s16(step2[6], step2[25]);
+ out[7] = vqaddq_s16(step2[7], step2[24]);
+ out[8] = vqaddq_s16(step2[8], step2[23]);
+ out[9] = vqaddq_s16(step2[9], step2[22]);
+ out[10] = vqaddq_s16(step2[10], step2[21]);
+ out[11] = vqaddq_s16(step2[11], step2[20]);
+ out[12] = vqaddq_s16(step2[12], step2[19]);
+ out[13] = vqaddq_s16(step2[13], step2[18]);
+ out[14] = vqaddq_s16(step2[14], step2[17]);
+ out[15] = vqaddq_s16(step2[15], step2[16]);
+ out[16] = vqsubq_s16(step2[15], step2[16]);
+ out[17] = vqsubq_s16(step2[14], step2[17]);
+ out[18] = vqsubq_s16(step2[13], step2[18]);
+ out[19] = vqsubq_s16(step2[12], step2[19]);
+ out[20] = vqsubq_s16(step2[11], step2[20]);
+ out[21] = vqsubq_s16(step2[10], step2[21]);
+ out[22] = vqsubq_s16(step2[9], step2[22]);
+ out[23] = vqsubq_s16(step2[8], step2[23]);
+ out[24] = vqsubq_s16(step2[7], step2[24]);
+ out[25] = vqsubq_s16(step2[6], step2[25]);
+ out[26] = vqsubq_s16(step2[5], step2[26]);
+ out[27] = vqsubq_s16(step2[4], step2[27]);
+ out[28] = vqsubq_s16(step2[3], step2[28]);
+ out[29] = vqsubq_s16(step2[2], step2[29]);
+ out[30] = vqsubq_s16(step2[1], step2[30]);
+ out[31] = vqsubq_s16(step2[0], step2[31]);
+}
+
+static INLINE void idct32_low16_new_neon(int16x8_t *in, int16x8_t *out,
+ int8_t cos_bit, int bit) {
+ (void)bit;
+ const int32_t *cospi = cospi_arr(cos_bit);
+ int16x8_t step1[32], step2[32];
+ int32x4_t t32[16];
+ const int16x4_t c0 =
+ create_s16x4_neon((int16_t *)(cospi + 8), (int16_t *)(cospi + 56),
+ (int16_t *)(cospi + 40), (int16_t *)(cospi + 24));
+ const int16x4_t c1 =
+ create_s16x4_neon((int16_t *)(cospi + 32), (int16_t *)(cospi + 32),
+ (int16_t *)(cospi + 16), (int16_t *)(cospi + 48));
+
+ // stage 1
+ // stage 2
+
+ btf_16_neon(in[1], cospi[62], cospi[2], &step2[16], &step2[31]);
+ btf_16_neon(in[15], -cospi[34], cospi[30], &step2[17], &step2[30]);
+ btf_16_neon(in[9], cospi[46], cospi[18], &step2[18], &step2[29]);
+ btf_16_neon(in[7], -cospi[50], cospi[14], &step2[19], &step2[28]);
+ btf_16_neon(in[5], cospi[54], cospi[10], &step2[20], &step2[27]);
+ btf_16_neon(in[11], -cospi[42], cospi[22], &step2[21], &step2[26]);
+ btf_16_neon(in[13], cospi[38], cospi[26], &step2[22], &step2[25]);
+ btf_16_neon(in[3], -cospi[58], cospi[6], &step2[23], &step2[24]);
+
+ step2[0] = in[0];
+ step2[2] = in[8];
+ step2[4] = in[4];
+ step2[6] = in[12];
+ step2[8] = in[2];
+ step2[10] = in[10];
+ step2[12] = in[6];
+ step2[14] = in[14];
+
+ // stage 3
+
+ btf_16_neon(step2[8], cospi[60], cospi[4], &step1[8], &step1[15]);
+ btf_16_neon(step2[14], -cospi[36], cospi[28], &step1[9], &step1[14]);
+ btf_16_neon(step2[10], cospi[44], cospi[20], &step1[10], &step1[13]);
+ btf_16_neon(step2[12], -cospi[52], cospi[12], &step1[11], &step1[12]);
+
+ step1[0] = step2[0];
+ step1[2] = step2[2];
+ step1[4] = step2[4];
+ step1[6] = step2[6];
+ step1[16] = vqaddq_s16(step2[16], step2[17]);
+ step1[17] = vqsubq_s16(step2[16], step2[17]);
+ step1[18] = vqsubq_s16(step2[19], step2[18]);
+ step1[19] = vqaddq_s16(step2[19], step2[18]);
+ step1[20] = vqaddq_s16(step2[20], step2[21]);
+ step1[21] = vqsubq_s16(step2[20], step2[21]);
+ step1[22] = vqsubq_s16(step2[23], step2[22]);
+ step1[23] = vqaddq_s16(step2[23], step2[22]);
+ step1[24] = vqaddq_s16(step2[24], step2[25]);
+ step1[25] = vqsubq_s16(step2[24], step2[25]);
+ step1[26] = vqsubq_s16(step2[27], step2[26]);
+ step1[27] = vqaddq_s16(step2[27], step2[26]);
+ step1[28] = vqaddq_s16(step2[28], step2[29]);
+ step1[29] = vqsubq_s16(step2[28], step2[29]);
+ step1[30] = vqsubq_s16(step2[31], step2[30]);
+ step1[31] = vqaddq_s16(step2[31], step2[30]);
+
+ // stage 4
+
+ btf_16_neon(step1[4], cospi[56], cospi[8], &step2[4], &step2[7]);
+ btf_16_neon(step1[6], -cospi[40], cospi[24], &step2[5], &step2[6]);
+ btf_16_lane_0_1_neon(step1[30], step1[17], c0, &step2[30], &step2[17]);
+ btf_16_lane_1_0_neon(vnegq_s16(step1[18]), vnegq_s16(step1[29]), c0,
+ &step2[18], &step2[29]);
+ btf_16_lane_2_3_neon(step1[26], step1[21], c0, &step2[26], &step2[21]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[22]), vnegq_s16(step1[25]), c0,
+ &step2[22], &step2[25]);
+
+ step2[0] = step1[0];
+ step2[2] = step1[2];
+ step2[8] = vqaddq_s16(step1[8], step1[9]);
+ step2[9] = vqsubq_s16(step1[8], step1[9]);
+ step2[10] = vqsubq_s16(step1[11], step1[10]);
+ step2[11] = vqaddq_s16(step1[11], step1[10]);
+ step2[12] = vqaddq_s16(step1[12], step1[13]);
+ step2[13] = vqsubq_s16(step1[12], step1[13]);
+ step2[14] = vqsubq_s16(step1[15], step1[14]);
+ step2[15] = vqaddq_s16(step1[15], step1[14]);
+ step2[16] = step1[16];
+ step2[19] = step1[19];
+ step2[20] = step1[20];
+ step2[23] = step1[23];
+ step2[24] = step1[24];
+ step2[27] = step1[27];
+ step2[28] = step1[28];
+ step2[31] = step1[31];
+
+ // stage 5
+
+ t32[0] = vmull_n_s16(vget_low_s16(step2[0]), cospi[32]);
+ t32[1] = vmull_n_s16(vget_high_s16(step2[0]), cospi[32]);
+
+ step1[0] = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT),
+ vrshrn_n_s32(t32[1], INV_COS_BIT));
+
+ btf_16_neon(step2[2], cospi[48], cospi[16], &step1[2], &step1[3]);
+ btf_16_lane_2_3_neon(step2[14], step2[9], c1, &step1[14], &step1[9]);
+ btf_16_lane_3_2_neon(vnegq_s16(step2[10]), vnegq_s16(step2[13]), c1,
+ &step1[10], &step1[13]);
+
+ step1[4] = vqaddq_s16(step2[4], step2[5]);
+ step1[5] = vqsubq_s16(step2[4], step2[5]);
+ step1[6] = vqsubq_s16(step2[7], step2[6]);
+ step1[7] = vqaddq_s16(step2[7], step2[6]);
+ step1[8] = step2[8];
+ step1[11] = step2[11];
+ step1[12] = step2[12];
+ step1[15] = step2[15];
+ step1[16] = vqaddq_s16(step2[16], step2[19]);
+ step1[17] = vqaddq_s16(step2[17], step2[18]);
+ step1[18] = vqsubq_s16(step2[17], step2[18]);
+ step1[19] = vqsubq_s16(step2[16], step2[19]);
+ step1[20] = vqsubq_s16(step2[23], step2[20]);
+ step1[21] = vqsubq_s16(step2[22], step2[21]);
+ step1[22] = vqaddq_s16(step2[22], step2[21]);
+ step1[23] = vqaddq_s16(step2[23], step2[20]);
+ step1[24] = vqaddq_s16(step2[24], step2[27]);
+ step1[25] = vqaddq_s16(step2[25], step2[26]);
+ step1[26] = vqsubq_s16(step2[25], step2[26]);
+ step1[27] = vqsubq_s16(step2[24], step2[27]);
+ step1[28] = vqsubq_s16(step2[31], step2[28]);
+ step1[29] = vqsubq_s16(step2[30], step2[29]);
+ step1[30] = vqaddq_s16(step2[30], step2[29]);
+ step1[31] = vqaddq_s16(step2[31], step2[28]);
+
+ // stage 6
+
+ btf_16_lane_0_1_neon(step1[6], step1[5], c1, &step2[6], &step2[5]);
+ btf_16_lane_2_3_neon(step1[29], step1[18], c1, &step2[29], &step2[18]);
+ btf_16_lane_2_3_neon(step1[28], step1[19], c1, &step2[28], &step2[19]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[20]), vnegq_s16(step1[27]), c1,
+ &step2[20], &step2[27]);
+ btf_16_lane_3_2_neon(vnegq_s16(step1[21]), vnegq_s16(step1[26]), c1,
+ &step2[21], &step2[26]);
+
+ step2[0] = vqaddq_s16(step1[0], step1[3]);
+ step2[1] = vqaddq_s16(step1[0], step1[2]);
+ step2[2] = vqsubq_s16(step1[0], step1[2]);
+ step2[3] = vqsubq_s16(step1[0], step1[3]);
+ step2[4] = step1[4];
+ step2[7] = step1[7];
+ step2[8] = vqaddq_s16(step1[8], step1[11]);
+ step2[9] = vqaddq_s16(step1[9], step1[10]);
+ step2[10] = vqsubq_s16(step1[9], step1[10]);
+ step2[11] = vqsubq_s16(step1[8], step1[11]);
+ step2[12] = vqsubq_s16(step1[15], step1[12]);
+ step2[13] = vqsubq_s16(step1[14], step1[13]);
+ step2[14] = vqaddq_s16(step1[14], step1[13]);
+ step2[15] = vqaddq_s16(step1[15], step1[12]);
+ step2[16] = step1[16];
+ step2[17] = step1[17];
+ step2[22] = step1[22];
+ step2[23] = step1[23];
+ step2[24] = step1[24];
+ step2[25] = step1[25];
+ step2[30] = step1[30];
+ step2[31] = step1[31];
+
+ // stage 7
+
+ btf_16_lane_0_1_neon(step2[13], step2[10], c1, &step1[13], &step1[10]);
+ btf_16_lane_0_1_neon(step2[12], step2[11], c1, &step1[12], &step1[11]);
+
+ step1[0] = vqaddq_s16(step2[0], step2[7]);
+ step1[1] = vqaddq_s16(step2[1], step2[6]);
+ step1[2] = vqaddq_s16(step2[2], step2[5]);
+ step1[3] = vqaddq_s16(step2[3], step2[4]);
+ step1[4] = vqsubq_s16(step2[3], step2[4]);
+ step1[5] = vqsubq_s16(step2[2], step2[5]);
+ step1[6] = vqsubq_s16(step2[1], step2[6]);
+ step1[7] = vqsubq_s16(step2[0], step2[7]);
+ step1[8] = step2[8];
+ step1[9] = step2[9];
+ step1[14] = step2[14];
+ step1[15] = step2[15];
+ step1[16] = vqaddq_s16(step2[16], step2[23]);
+ step1[17] = vqaddq_s16(step2[17], step2[22]);
+ step1[18] = vqaddq_s16(step2[18], step2[21]);
+ step1[19] = vqaddq_s16(step2[19], step2[20]);
+ step1[20] = vqsubq_s16(step2[19], step2[20]);
+ step1[21] = vqsubq_s16(step2[18], step2[21]);
+ step1[22] = vqsubq_s16(step2[17], step2[22]);
+ step1[23] = vqsubq_s16(step2[16], step2[23]);
+ step1[24] = vqsubq_s16(step2[31], step2[24]);
+ step1[25] = vqsubq_s16(step2[30], step2[25]);
+ step1[26] = vqsubq_s16(step2[29], step2[26]);
+ step1[27] = vqsubq_s16(step2[28], step2[27]);
+ step1[28] = vqaddq_s16(step2[27], step2[28]);
+ step1[29] = vqaddq_s16(step2[26], step2[29]);
+ step1[30] = vqaddq_s16(step2[25], step2[30]);
+ step1[31] = vqaddq_s16(step2[24], step2[31]);
+
+ // stage 8
+
+ btf_16_lane_0_1_neon(step1[27], step1[20], c1, &step2[27], &step2[20]);
+ btf_16_lane_0_1_neon(step1[26], step1[21], c1, &step2[26], &step2[21]);
+ btf_16_lane_0_1_neon(step1[25], step1[22], c1, &step2[25], &step2[22]);
+ btf_16_lane_0_1_neon(step1[24], step1[23], c1, &step2[24], &step2[23]);
+
+ step2[0] = vqaddq_s16(step1[0], step1[15]);
+ step2[1] = vqaddq_s16(step1[1], step1[14]);
+ step2[2] = vqaddq_s16(step1[2], step1[13]);
+ step2[3] = vqaddq_s16(step1[3], step1[12]);
+ step2[4] = vqaddq_s16(step1[4], step1[11]);
+ step2[5] = vqaddq_s16(step1[5], step1[10]);
+ step2[6] = vqaddq_s16(step1[6], step1[9]);
+ step2[7] = vqaddq_s16(step1[7], step1[8]);
+ step2[8] = vqsubq_s16(step1[7], step1[8]);
+ step2[9] = vqsubq_s16(step1[6], step1[9]);
+ step2[10] = vqsubq_s16(step1[5], step1[10]);
+ step2[11] = vqsubq_s16(step1[4], step1[11]);
+ step2[12] = vqsubq_s16(step1[3], step1[12]);
+ step2[13] = vqsubq_s16(step1[2], step1[13]);
+ step2[14] = vqsubq_s16(step1[1], step1[14]);
+ step2[15] = vqsubq_s16(step1[0], step1[15]);
+ step2[16] = step1[16];
+ step2[17] = step1[17];
+ step2[18] = step1[18];
+ step2[19] = step1[19];
+ step2[28] = step1[28];
+ step2[29] = step1[29];
+ step2[30] = step1[30];
+ step2[31] = step1[31];
+
+ // stage 9
+
+ out[0] = vqaddq_s16(step2[0], step2[31]);
+ out[1] = vqaddq_s16(step2[1], step2[30]);
+ out[2] = vqaddq_s16(step2[2], step2[29]);
+ out[3] = vqaddq_s16(step2[3], step2[28]);
+ out[4] = vqaddq_s16(step2[4], step2[27]);
+ out[5] = vqaddq_s16(step2[5], step2[26]);
+ out[6] = vqaddq_s16(step2[6], step2[25]);
+ out[7] = vqaddq_s16(step2[7], step2[24]);
+ out[8] = vqaddq_s16(step2[8], step2[23]);
+ out[9] = vqaddq_s16(step2[9], step2[22]);
+ out[10] = vqaddq_s16(step2[10], step2[21]);
+ out[11] = vqaddq_s16(step2[11], step2[20]);
+ out[12] = vqaddq_s16(step2[12], step2[19]);
+ out[13] = vqaddq_s16(step2[13], step2[18]);
+ out[14] = vqaddq_s16(step2[14], step2[17]);
+ out[15] = vqaddq_s16(step2[15], step2[16]);
+ out[16] = vqsubq_s16(step2[15], step2[16]);
+ out[17] = vqsubq_s16(step2[14], step2[17]);
+ out[18] = vqsubq_s16(step2[13], step2[18]);
+ out[19] = vqsubq_s16(step2[12], step2[19]);
+ out[20] = vqsubq_s16(step2[11], step2[20]);
+ out[21] = vqsubq_s16(step2[10], step2[21]);
+ out[22] = vqsubq_s16(step2[9], step2[22]);
+ out[23] = vqsubq_s16(step2[8], step2[23]);
+ out[24] = vqsubq_s16(step2[7], step2[24]);
+ out[25] = vqsubq_s16(step2[6], step2[25]);
+ out[26] = vqsubq_s16(step2[5], step2[26]);
+ out[27] = vqsubq_s16(step2[4], step2[27]);
+ out[28] = vqsubq_s16(step2[3], step2[28]);
+ out[29] = vqsubq_s16(step2[2], step2[29]);
+ out[30] = vqsubq_s16(step2[1], step2[30]);
+ out[31] = vqsubq_s16(step2[0], step2[31]);
+}
+
+// Functions for blocks with eob at DC and within
+// topleft 8x8, 16x16, 32x32 corner
+static const transform_1d_neon
+ lowbd_txfm_all_1d_zeros_w8_arr[TX_SIZES][ITX_TYPES_1D][4] = {
+ {
+ { av1_idct4_new, av1_idct4_new, NULL, NULL },
+ { av1_iadst4_new, av1_iadst4_new, NULL, NULL },
+ { av1_iidentity4_c, av1_iidentity4_c, NULL, NULL },
+ },
+ { { av1_idct8_new, av1_idct8_new, NULL, NULL },
+ { av1_iadst8_new, av1_iadst8_new, NULL, NULL },
+ { av1_iidentity8_c, av1_iidentity8_c, NULL, NULL } },
+ {
+ { av1_idct16_new, av1_idct16_new, av1_idct16_new, NULL },
+ { av1_iadst16_new, av1_iadst16_new, av1_iadst16_new, NULL },
+ { av1_iidentity16_c, av1_iidentity16_c, av1_iidentity16_c, NULL },
+ },
+ { { av1_idct32_new, av1_idct32_new, av1_idct32_new, av1_idct32_new },
+ { NULL, NULL, NULL, NULL },
+ { av1_iidentity32_c, av1_iidentity32_c, av1_iidentity32_c,
+ av1_iidentity32_c } },
+ { { av1_idct64_new, av1_idct64_new, av1_idct64_new, av1_idct64_new },
+ { NULL, NULL, NULL, NULL },
+ { NULL, NULL, NULL, NULL } }
+ };
+
+static const transform_neon
+ lowbd_txfm_all_1d_zeros_w_arr[TX_SIZES][ITX_TYPES_1D][4] = {
+ {
+ { NULL, NULL, NULL, NULL },
+ { NULL, NULL, NULL, NULL },
+ { NULL, NULL, NULL, NULL },
+ },
+ { { idct8_low1_new_neon, idct8_new_neon, NULL, NULL },
+ { iadst8_low1_new_neon, iadst8_new_neon, NULL, NULL },
+ { identity8_new_neon, identity8_new_neon, NULL, NULL } },
+ {
+ { idct16_low1_new_neon, idct16_low8_new_neon, idct16_new_neon, NULL },
+ { iadst16_low1_new_neon, iadst16_low8_new_neon, iadst16_new_neon,
+ NULL },
+ { identity16_new_neon, identity16_new_neon, identity16_new_neon,
+ NULL },
+ },
+ { { idct32_low1_new_neon, idct32_low8_new_neon, idct32_low16_new_neon,
+ idct32_new_neon },
+ { NULL, NULL, NULL, NULL },
+ { identity32_new_neon, identity32_new_neon, identity32_new_neon,
+ identity32_new_neon } },
+ { { NULL, NULL, NULL, NULL },
+ { NULL, NULL, NULL, NULL },
+ { NULL, NULL, NULL, NULL } }
+ };
+
+static INLINE void lowbd_inv_txfm2d_add_wxh_idtx_neon(
+ const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ DECLARE_ALIGNED(32, int, txfm_buf[32 * 32 + 32 + 32]);
+ int32_t *temp_in = txfm_buf;
+
+ int eobx, eoby;
+ get_eobx_eoby_scan_default(&eobx, &eoby, tx_size, eob);
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3;
+
+ const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row);
+ const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col);
+
+ int32_t *temp_out = temp_in + buf_offset;
+ int32_t *buf = temp_out + buf_offset;
+ int32_t *buf_ptr = buf;
+ const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16 };
+ int r, bd = 8;
+
+ const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx];
+ const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby];
+ const transform_1d_neon row_txfm =
+ lowbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x];
+ const transform_1d_neon col_txfm =
+ lowbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y];
+
+ assert(col_txfm != NULL);
+ assert(row_txfm != NULL);
+
+ // row tx
+ int row_start = (buf_size_nonzero_h_div8 * 8);
+ for (int i = 0; i < row_start; i++) {
+ if (abs(rect_type) == 1) {
+ for (int j = 0; j < txfm_size_col; j++)
+ temp_in[j] = round_shift((int64_t)input[j] * NewInvSqrt2, NewSqrt2Bits);
+ row_txfm(temp_in, buf_ptr, cos_bit_row, stage_range);
+ } else {
+ row_txfm(input, buf_ptr, cos_bit_row, stage_range);
+ }
+ av1_round_shift_array(buf_ptr, txfm_size_col, -shift[0]);
+ input += txfm_size_col;
+ buf_ptr += txfm_size_col;
+ }
+
+ // Doing memset for the rows which are not processed in row transform.
+ memset(buf_ptr, 0,
+ sizeof(int32_t) * txfm_size_col * (txfm_size_row - row_start));
+
+ // col tx
+ for (int c = 0; c < txfm_size_col; c++) {
+ for (r = 0; r < txfm_size_row; ++r) temp_in[r] = buf[r * txfm_size_col + c];
+
+ col_txfm(temp_in, temp_out, cos_bit_col, stage_range);
+ av1_round_shift_array(temp_out, txfm_size_row, -shift[1]);
+
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] =
+ highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
+ }
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_idtx_neon(const int32_t *input,
+ uint8_t *output, int stride,
+ TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ int16x8_t a[32 * 4];
+ int16x8_t b[32 * 4];
+ int eobx, eoby;
+ get_eobx_eoby_scan_default(&eobx, &eoby, tx_size, eob);
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ lowbd_inv_txfm2d_memset_neon(&a[0], (txfm_size_col * (txfm_size_row) >> 3),
+ 0);
+ lowbd_inv_txfm2d_memset_neon(&b[0], (txfm_size_col * (txfm_size_row) >> 3),
+ 0);
+ const int buf_size_w_div8 = txfm_size_col >> 3;
+ const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row);
+ const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3;
+ const int buf_size_nonzero_w_div8 = (eobx + 8) >> 3;
+ const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx];
+ const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby];
+ const int32_t *input_1;
+ int temp_b = 0;
+ const transform_neon row_txfm =
+ lowbd_txfm_all_1d_zeros_w_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x];
+ const transform_neon col_txfm =
+ lowbd_txfm_all_1d_zeros_w_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y];
+
+ assert(col_txfm != NULL);
+ assert(row_txfm != NULL);
+
+ for (int i = 0; i < buf_size_nonzero_h_div8; i++) {
+ input_1 = input;
+ for (int j = 0; j < buf_size_nonzero_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ load_buffer_32bit_to_16bit_neon(input_1, &a[k], txfm_size_col);
+ transpose_s16_8x8q(&a[k], &a[k]);
+ input_1 += 8;
+ }
+ input += (txfm_size_col * 8);
+ if (abs(rect_type) == 1) {
+ int y = i * txfm_size_col;
+ round_shift_for_rect(&a[y], &a[y], txfm_size_col);
+ }
+ row_txfm(&a[i * txfm_size_col], &a[i * txfm_size_col], cos_bit_row, 0);
+ av1_round_shift_array_16_neon(&a[i * txfm_size_col], txfm_size_col,
+ -shift[0]);
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ transpose_s16_8x8q(&a[k], &b[temp_b + txfm_size_row * j]);
+ }
+ temp_b += 8;
+ }
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ col_txfm(&b[j * txfm_size_row], &b[j * txfm_size_row], cos_bit_col, 0);
+ av1_round_shift_array_16_neon(&b[j * txfm_size_row], txfm_size_row,
+ -shift[1]);
+ }
+ if (txfm_size_col >= 16) {
+ for (int i = 0; i < (txfm_size_col >> 4); i++) {
+ lowbd_add_flip_buffer_16xn_neon(
+ &b[i * txfm_size_row * 2], output + 16 * i, stride, 0, txfm_size_row);
+ }
+ } else if (txfm_size_col == 8) {
+ lowbd_add_flip_buffer_8xn_neon(b, output, stride, 0, txfm_size_row);
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_v_wxh_identity_neon(
+ const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ DECLARE_ALIGNED(32, int, txfm_buf[32 * 32 + 32 + 32]);
+ int32_t *temp_in = txfm_buf;
+
+ int eobx, eoby;
+ get_eobx_eoby_scan_v_identity(&eobx, &eoby, tx_size, eob);
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3;
+
+ const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row);
+ const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col);
+
+ int32_t *temp_out = temp_in + buf_offset;
+ int32_t *buf = temp_out + buf_offset;
+ int32_t *buf_ptr = buf;
+ const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16 };
+ int r, bd = 8;
+
+ const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx];
+ const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby];
+ const transform_1d_neon row_txfm =
+ lowbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x];
+ const transform_1d_neon col_txfm =
+ lowbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y];
+
+ assert(col_txfm != NULL);
+ assert(row_txfm != NULL);
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ // row tx
+ int row_start = (buf_size_nonzero_h_div8 * 8);
+ for (int i = 0; i < row_start; i++) {
+ if (abs(rect_type) == 1) {
+ for (int j = 0; j < txfm_size_col; j++)
+ temp_in[j] = round_shift((int64_t)input[j] * NewInvSqrt2, NewSqrt2Bits);
+ row_txfm(temp_in, buf_ptr, cos_bit_row, stage_range);
+ } else {
+ row_txfm(input, buf_ptr, cos_bit_row, stage_range);
+ }
+ av1_round_shift_array(buf_ptr, txfm_size_col, -shift[0]);
+ input += txfm_size_col;
+ buf_ptr += txfm_size_col;
+ }
+ // Doing memset for the rows which are not processed in row transform.
+ memset(buf_ptr, 0,
+ sizeof(int32_t) * txfm_size_col * (txfm_size_row - row_start));
+
+ // col tx
+ for (int c = 0; c < txfm_size_col; c++) {
+ if (lr_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + c];
+ } else {
+ // flip left right
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)];
+ }
+ col_txfm(temp_in, temp_out, cos_bit_col, stage_range);
+ av1_round_shift_array(temp_out, txfm_size_row, -shift[1]);
+
+ if (ud_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] =
+ highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
+ }
+ } else {
+ // flip upside down
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] = highbd_clip_pixel_add(
+ output[r * stride + c], temp_out[txfm_size_row - r - 1], bd);
+ }
+ }
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_v_identity_neon(
+ const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ int16x8_t a[16 * 2];
+ int16x8_t b[16 * 2];
+ int eobx, eoby, ud_flip, lr_flip;
+ get_eobx_eoby_scan_v_identity(&eobx, &eoby, tx_size, eob);
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ lowbd_inv_txfm2d_memset_neon(&b[0], (txfm_size_col * (txfm_size_row) >> 3),
+ 0);
+ const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row);
+ const int buf_size_w_div8 = txfm_size_col >> 3;
+ const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3;
+ const int buf_size_nonzero_w_div8 = (eobx + 8) >> 3;
+ const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx];
+ const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby];
+ const int32_t *input_1;
+ int temp_b = 0;
+ const transform_neon row_txfm =
+ lowbd_txfm_all_1d_zeros_w_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x];
+ const transform_neon col_txfm =
+ lowbd_txfm_all_1d_zeros_w_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y];
+
+ assert(col_txfm != NULL);
+ assert(row_txfm != NULL);
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < buf_size_nonzero_h_div8; i++) {
+ input_1 = input;
+ for (int j = 0; j < buf_size_nonzero_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ load_buffer_32bit_to_16bit_neon(input_1, &a[k], txfm_size_col);
+ transpose_s16_8x8q(&a[k], &a[k]);
+ input_1 += 8;
+ }
+ input += (txfm_size_col * 8);
+ if (abs(rect_type) == 1) {
+ int y = i * txfm_size_col;
+ round_shift_for_rect(&a[y], &a[y], txfm_size_col);
+ }
+ row_txfm(&a[i * txfm_size_col], &a[i * txfm_size_col], cos_bit_row, 0);
+ av1_round_shift_array_16_neon(&a[i * txfm_size_col], txfm_size_col,
+ -shift[0]);
+ if (lr_flip == 1) {
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ flip_buf_ud_neon(&a[k], 8);
+ transpose_s16_8x8q(
+ &a[k], &b[temp_b + txfm_size_row * (buf_size_w_div8 - 1 - j)]);
+ }
+ temp_b += 8;
+ } else {
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ transpose_s16_8x8q(&a[k], &b[temp_b + txfm_size_row * j]);
+ }
+ temp_b += 8;
+ }
+ }
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ col_txfm(&b[j * txfm_size_row], &b[j * txfm_size_row], cos_bit_col, 0);
+ av1_round_shift_array_16_neon(&b[j * txfm_size_row], txfm_size_row,
+ -shift[1]);
+ }
+ if (txfm_size_col >= 16) {
+ for (int i = 0; i < (txfm_size_col >> 4); i++) {
+ lowbd_add_flip_buffer_16xn_neon(
+ &b[i * txfm_size_row * 2], output + 16 * i, stride, 0, txfm_size_row);
+ }
+ } else if (txfm_size_col == 8) {
+ lowbd_add_flip_buffer_8xn_neon(b, output, stride, 0, txfm_size_row);
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_h_wxh_identity_neon(
+ const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ DECLARE_ALIGNED(32, int, txfm_buf[32 * 32 + 32 + 32]);
+ int32_t *temp_in = txfm_buf;
+
+ int eobx, eoby;
+ get_eobx_eoby_scan_h_identity(&eobx, &eoby, tx_size, eob);
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3;
+
+ const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row);
+ const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col);
+
+ int32_t *temp_out = temp_in + buf_offset;
+ int32_t *buf = temp_out + buf_offset;
+ int32_t *buf_ptr = buf;
+ const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16 };
+ int r, bd = 8;
+
+ const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx];
+ const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby];
+ const transform_1d_neon row_txfm =
+ lowbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x];
+ const transform_1d_neon col_txfm =
+ lowbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y];
+
+ assert(col_txfm != NULL);
+ assert(row_txfm != NULL);
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ // row tx
+ int row_start = (buf_size_nonzero_h_div8 * 8);
+ for (int i = 0; i < row_start; i++) {
+ if (abs(rect_type) == 1) {
+ for (int j = 0; j < txfm_size_col; j++)
+ temp_in[j] = round_shift((int64_t)input[j] * NewInvSqrt2, NewSqrt2Bits);
+ row_txfm(temp_in, buf_ptr, cos_bit_row, stage_range);
+ } else {
+ row_txfm(input, buf_ptr, cos_bit_row, stage_range);
+ }
+ av1_round_shift_array(buf_ptr, txfm_size_col, -shift[0]);
+ input += txfm_size_col;
+ buf_ptr += txfm_size_col;
+ }
+ // Doing memset for the rows which are not processed in row transform.
+ memset(buf_ptr, 0,
+ sizeof(int32_t) * txfm_size_col * (txfm_size_row - row_start));
+
+ // col tx
+ for (int c = 0; c < txfm_size_col; c++) {
+ if (lr_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + c];
+ } else {
+ // flip left right
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)];
+ }
+ col_txfm(temp_in, temp_out, cos_bit_col, stage_range);
+ av1_round_shift_array(temp_out, txfm_size_row, -shift[1]);
+
+ if (ud_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] =
+ highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
+ }
+ } else {
+ // flip upside down
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] = highbd_clip_pixel_add(
+ output[r * stride + c], temp_out[txfm_size_row - r - 1], bd);
+ }
+ }
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_h_identity_neon(
+ const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ int16x8_t a[16 * 2];
+ int16x8_t b[16 * 2];
+ int eobx, eoby, ud_flip, lr_flip;
+ get_eobx_eoby_scan_h_identity(&eobx, &eoby, tx_size, eob);
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ lowbd_inv_txfm2d_memset_neon(&a[0], (txfm_size_col * (txfm_size_row) >> 3),
+ 0);
+ const int buf_size_w_div8 = txfm_size_col >> 3;
+ const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row);
+ const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3;
+ const int buf_size_nonzero_w_div8 = (eobx + 8) >> 3;
+ const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx];
+ const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby];
+ const int32_t *input_1;
+ int temp_b = 0;
+ const transform_neon row_txfm =
+ lowbd_txfm_all_1d_zeros_w_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x];
+ const transform_neon col_txfm =
+ lowbd_txfm_all_1d_zeros_w_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y];
+
+ assert(col_txfm != NULL);
+ assert(row_txfm != NULL);
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < buf_size_nonzero_h_div8; i++) {
+ input_1 = input;
+ for (int j = 0; j < buf_size_nonzero_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ load_buffer_32bit_to_16bit_neon(input_1, &a[k], txfm_size_col);
+ transpose_s16_8x8q(&a[k], &a[k]);
+ input_1 += 8;
+ }
+ input += (txfm_size_col * 8);
+ if (abs(rect_type) == 1) {
+ int y = i * txfm_size_col;
+ round_shift_for_rect(&a[y], &a[y], txfm_size_col);
+ }
+ row_txfm(&a[i * txfm_size_col], &a[i * txfm_size_col], cos_bit_row, 0);
+ av1_round_shift_array_16_neon(&a[i * txfm_size_col], txfm_size_col,
+ -shift[0]);
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ transpose_s16_8x8q(&a[k], &b[temp_b + txfm_size_row * j]);
+ }
+ temp_b += 8;
+ }
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ col_txfm(&b[j * txfm_size_row], &b[j * txfm_size_row], cos_bit_col, 0);
+ av1_round_shift_array_16_neon(&b[j * txfm_size_row], txfm_size_row,
+ -shift[1]);
+ }
+ if (txfm_size_col >= 16) {
+ for (int i = 0; i < (txfm_size_col >> 4); i++) {
+ lowbd_add_flip_buffer_16xn_neon(&b[i * txfm_size_row * 2],
+ output + 16 * i, stride, ud_flip,
+ txfm_size_row);
+ }
+ } else if (txfm_size_col == 8) {
+ lowbd_add_flip_buffer_8xn_neon(b, output, stride, ud_flip, txfm_size_row);
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_4x4_neon(const int32_t *input,
+ uint8_t *output, int stride,
+ TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ (void)eob;
+ DECLARE_ALIGNED(32, int, txfm_buf[4 * 4 + 8 + 8]);
+ int32_t *temp_in = txfm_buf;
+
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col);
+ int32_t *temp_out = temp_in + buf_offset;
+ int32_t *buf = temp_out + buf_offset;
+ int32_t *buf_ptr = buf;
+ const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16 };
+ int r, bd = 8;
+ const transform_1d_neon row_txfm =
+ lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]];
+ const transform_1d_neon col_txfm =
+ lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < txfm_size_row; i++) {
+ row_txfm(input, buf_ptr, cos_bit_row, stage_range);
+
+ input += txfm_size_col;
+ buf_ptr += txfm_size_col;
+ }
+
+ for (int c = 0; c < txfm_size_col; ++c) {
+ if (lr_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + c];
+ } else {
+ // flip left right
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)];
+ }
+ col_txfm(temp_in, temp_out, cos_bit_col, stage_range);
+ av1_round_shift_array(temp_out, txfm_size_row, -shift[1]);
+
+ if (ud_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] =
+ highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
+ }
+ } else {
+ // flip upside down
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] = highbd_clip_pixel_add(
+ output[r * stride + c], temp_out[txfm_size_row - r - 1], bd);
+ }
+ }
+ }
+}
+
+void lowbd_inv_txfm2d_add_4x8_neon(const int32_t *input, uint8_t *output,
+ int stride, TX_TYPE tx_type, TX_SIZE tx_size,
+ int eob) {
+ (void)eob;
+ DECLARE_ALIGNED(32, int, txfm_buf[4 * 8 + 8 + 8]);
+ int32_t *temp_in = txfm_buf;
+
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col);
+ int32_t *temp_out = temp_in + buf_offset;
+ int32_t *buf = temp_out + buf_offset;
+ int32_t *buf_ptr = buf;
+ const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16 };
+ int r, bd = 8;
+ const transform_1d_neon row_txfm =
+ lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]];
+ const transform_1d_neon col_txfm =
+ lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < txfm_size_row; i++) {
+ for (int j = 0; j < txfm_size_col; j++)
+ temp_in[j] = round_shift((int64_t)input[j] * NewInvSqrt2, NewSqrt2Bits);
+
+ row_txfm(temp_in, buf_ptr, cos_bit_row, stage_range);
+ input += txfm_size_col;
+ buf_ptr += txfm_size_col;
+ }
+
+ for (int c = 0; c < txfm_size_col; ++c) {
+ if (lr_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + c];
+ } else {
+ // flip left right
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)];
+ }
+ col_txfm(temp_in, temp_out, cos_bit_col, stage_range);
+ av1_round_shift_array(temp_out, txfm_size_row, -shift[1]);
+
+ if (ud_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] =
+ highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
+ }
+ } else {
+ // flip upside down
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] = highbd_clip_pixel_add(
+ output[r * stride + c], temp_out[txfm_size_row - r - 1], bd);
+ }
+ }
+ }
+}
+
+void lowbd_inv_txfm2d_add_8x4_neon(const int32_t *input, uint8_t *output,
+ int stride, TX_TYPE tx_type, TX_SIZE tx_size,
+ int eob) {
+ (void)eob;
+ DECLARE_ALIGNED(32, int, txfm_buf[8 * 4 + 8 + 8]);
+ int32_t *temp_in = txfm_buf;
+
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col);
+ int32_t *temp_out = temp_in + buf_offset;
+ int32_t *buf = temp_out + buf_offset;
+ int32_t *buf_ptr = buf;
+ const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16 };
+ int r, bd = 8;
+ const transform_1d_neon row_txfm =
+ lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]];
+ const transform_1d_neon col_txfm =
+ lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < txfm_size_row; i++) {
+ for (int j = 0; j < txfm_size_col; j++)
+ temp_in[j] = round_shift((int64_t)input[j] * NewInvSqrt2, NewSqrt2Bits);
+
+ row_txfm(temp_in, buf_ptr, cos_bit_row, stage_range);
+ input += txfm_size_col;
+ buf_ptr += txfm_size_col;
+ }
+
+ for (int c = 0; c < txfm_size_col; ++c) {
+ if (lr_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + c];
+ } else {
+ // flip left right
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)];
+ }
+ col_txfm(temp_in, temp_out, cos_bit_col, stage_range);
+ av1_round_shift_array(temp_out, txfm_size_row, -shift[1]);
+
+ if (ud_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] =
+ highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
+ }
+ } else {
+ // flip upside down
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] = highbd_clip_pixel_add(
+ output[r * stride + c], temp_out[txfm_size_row - r - 1], bd);
+ }
+ }
+ }
+}
+
+void lowbd_inv_txfm2d_add_4x16_neon(const int32_t *input, uint8_t *output,
+ int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ (void)eob;
+ DECLARE_ALIGNED(32, int, txfm_buf[4 * 16 + 16 + 16]);
+ int32_t *temp_in = txfm_buf;
+
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col);
+ int32_t *temp_out = temp_in + buf_offset;
+ int32_t *buf = temp_out + buf_offset;
+ int32_t *buf_ptr = buf;
+ const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16 };
+ int r, bd = 8;
+ const transform_1d_neon row_txfm =
+ lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]];
+ const transform_1d_neon col_txfm =
+ lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < txfm_size_row; i++) {
+ row_txfm(input, buf_ptr, cos_bit_row, stage_range);
+ av1_round_shift_array(buf_ptr, txfm_size_col, -shift[0]);
+ input += txfm_size_col;
+ buf_ptr += txfm_size_col;
+ }
+
+ for (int c = 0; c < txfm_size_col; ++c) {
+ if (lr_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + c];
+ } else {
+ // flip left right
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)];
+ }
+ col_txfm(temp_in, temp_out, cos_bit_col, stage_range);
+ av1_round_shift_array(temp_out, txfm_size_row, -shift[1]);
+
+ if (ud_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] =
+ highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
+ }
+ } else {
+ // flip upside down
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] = highbd_clip_pixel_add(
+ output[r * stride + c], temp_out[txfm_size_row - r - 1], bd);
+ }
+ }
+ }
+}
+
+void lowbd_inv_txfm2d_add_16x4_neon(const int32_t *input, uint8_t *output,
+ int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ (void)eob;
+
+ DECLARE_ALIGNED(32, int, txfm_buf[16 * 4 + 16 + 16]);
+ int32_t *temp_in = txfm_buf;
+
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col);
+ int32_t *temp_out = temp_in + buf_offset;
+ int32_t *buf = temp_out + buf_offset;
+ int32_t *buf_ptr = buf;
+ const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16 };
+ int r, bd = 8;
+ const transform_1d_neon row_txfm =
+ lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]];
+ const transform_1d_neon col_txfm =
+ lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < txfm_size_row; i++) {
+ row_txfm(input, buf_ptr, cos_bit_row, stage_range);
+ av1_round_shift_array(buf_ptr, txfm_size_col, -shift[0]);
+ input += txfm_size_col;
+ buf_ptr += txfm_size_col;
+ }
+
+ for (int c = 0; c < txfm_size_col; ++c) {
+ if (lr_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + c];
+ } else {
+ // flip left right
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)];
+ }
+ col_txfm(temp_in, temp_out, cos_bit_col, stage_range);
+ av1_round_shift_array(temp_out, txfm_size_row, -shift[1]);
+
+ if (ud_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] =
+ highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
+ }
+ } else {
+ // flip upside down
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] = highbd_clip_pixel_add(
+ output[r * stride + c], temp_out[txfm_size_row - r - 1], bd);
+ }
+ }
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_wxh_no_identity_neon(
+ const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ DECLARE_ALIGNED(32, int, txfm_buf[64 * 64 + 64 + 64]);
+ int32_t *temp_in = txfm_buf;
+
+ int eobx, eoby, ud_flip, lr_flip, row_start;
+ get_eobx_eoby_scan_default(&eobx, &eoby, tx_size, eob);
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3;
+ const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row);
+ const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col);
+
+ int32_t *temp_out = temp_in + buf_offset;
+ int32_t *buf = temp_out + buf_offset;
+ int32_t *buf_ptr = buf;
+ const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16 };
+ const int bd = 8;
+ int r;
+
+ const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx];
+ const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby];
+ const transform_1d_neon row_txfm =
+ lowbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x];
+ const transform_1d_neon col_txfm =
+ lowbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y];
+
+ assert(col_txfm != NULL);
+ assert(row_txfm != NULL);
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ row_start = (buf_size_nonzero_h_div8 << 3);
+
+ for (int i = 0; i < row_start; i++) {
+ if (abs(rect_type) == 1) {
+ for (int j = 0; j < txfm_size_col; j++)
+ temp_in[j] = round_shift((int64_t)input[j] * NewInvSqrt2, NewSqrt2Bits);
+ row_txfm(temp_in, buf_ptr, cos_bit_row, stage_range);
+ } else {
+ row_txfm(input, buf_ptr, cos_bit_row, stage_range);
+ }
+ av1_round_shift_array(buf_ptr, txfm_size_col, -shift[0]);
+ input += txfm_size_col;
+ buf_ptr += txfm_size_col;
+ }
+
+ // Doing memset for the rows which are not processed in row transform.
+ memset(buf_ptr, 0,
+ sizeof(int32_t) * txfm_size_col * (txfm_size_row - row_start));
+
+ for (int c = 0; c < txfm_size_col; c++) {
+ if (lr_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + c];
+ } else {
+ // flip left right
+ for (r = 0; r < txfm_size_row; ++r)
+ temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)];
+ }
+ col_txfm(temp_in, temp_out, cos_bit_col, stage_range);
+ av1_round_shift_array(temp_out, txfm_size_row, -shift[1]);
+
+ if (ud_flip == 0) {
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] =
+ highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
+ }
+ } else {
+ // flip upside down
+ for (r = 0; r < txfm_size_row; ++r) {
+ output[r * stride + c] = highbd_clip_pixel_add(
+ output[r * stride + c], temp_out[txfm_size_row - r - 1], bd);
+ }
+ }
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_no_identity_neon(
+ const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ int16x8_t a[64 * 8];
+ int16x8_t b[64 * 8];
+ int eobx, eoby, ud_flip, lr_flip;
+ get_eobx_eoby_scan_default(&eobx, &eoby, tx_size, eob);
+ const int8_t *shift = inv_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row);
+ const int buf_size_w_div8 = txfm_size_col >> 3;
+ const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3;
+ const int buf_size_nonzero_w_div8 = (eobx + 8) >> 3;
+ const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx];
+ const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby];
+ const int32_t *input_1;
+ int temp_b = 0;
+
+ const transform_neon row_txfm =
+ lowbd_txfm_all_1d_zeros_w_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x];
+ const transform_neon col_txfm =
+ lowbd_txfm_all_1d_zeros_w_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y];
+
+ assert(col_txfm != NULL);
+ assert(row_txfm != NULL);
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < buf_size_nonzero_h_div8; i++) {
+ input_1 = input;
+ for (int j = 0; j < buf_size_nonzero_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ load_buffer_32bit_to_16bit_neon(input_1, &a[k], txfm_size_col);
+ transpose_s16_8x8q(&a[k], &a[k]);
+ input_1 += 8;
+ }
+ input += (txfm_size_col * 8);
+ if (abs(rect_type) == 1) {
+ int y = i * txfm_size_col;
+ round_shift_for_rect(&a[y], &a[y], txfm_size_col);
+ }
+ row_txfm(&a[i * txfm_size_col], &a[i * txfm_size_col], cos_bit_row, 0);
+ av1_round_shift_array_16_neon(&a[i * txfm_size_col], txfm_size_col,
+ -shift[0]);
+ if (lr_flip == 1) {
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ flip_buf_ud_neon(&a[k], 8);
+ transpose_s16_8x8q(
+ &a[k], &b[temp_b + txfm_size_row * (buf_size_w_div8 - 1 - j)]);
+ }
+ temp_b += 8;
+ } else {
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ int k = j * 8 + i * txfm_size_col;
+ transpose_s16_8x8q(&a[k], &b[temp_b + txfm_size_row * j]);
+ }
+ temp_b += 8;
+ }
+ }
+ for (int j = 0; j < buf_size_w_div8; ++j) {
+ col_txfm(&b[j * txfm_size_row], &b[j * txfm_size_row], cos_bit_col, 0);
+ av1_round_shift_array_16_neon(&b[j * txfm_size_row], txfm_size_row,
+ -shift[1]);
+ }
+
+ if (txfm_size_col >= 16) {
+ for (int i = 0; i < (txfm_size_col >> 4); i++) {
+ lowbd_add_flip_buffer_16xn_neon(&b[i * txfm_size_row * 2],
+ output + 16 * i, stride, ud_flip,
+ txfm_size_row);
+ }
+ } else if (txfm_size_col == 8) {
+ lowbd_add_flip_buffer_8xn_neon(b, output, stride, ud_flip, txfm_size_row);
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_wxh_universe_neon(
+ const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ switch (tx_type) {
+ case IDTX:
+ lowbd_inv_txfm2d_add_wxh_idtx_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ break;
+
+ case H_DCT:
+ case H_ADST:
+ case H_FLIPADST:
+ lowbd_inv_txfm2d_add_v_wxh_identity_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ break;
+
+ case V_DCT:
+ case V_ADST:
+ case V_FLIPADST:
+ lowbd_inv_txfm2d_add_h_wxh_identity_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ break;
+
+ default:
+ lowbd_inv_txfm2d_add_wxh_no_identity_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ break;
+ }
+}
+
+static INLINE void lowbd_inv_txfm2d_add_universe_neon(
+ const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type,
+ TX_SIZE tx_size, int eob) {
+ switch (tx_type) {
+ case IDTX:
+ lowbd_inv_txfm2d_add_idtx_neon(input, output, stride, tx_type, tx_size,
+ eob);
+ break;
+
+ case H_DCT:
+ case H_ADST:
+ case H_FLIPADST:
+ lowbd_inv_txfm2d_add_v_identity_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ break;
+
+ case V_DCT:
+ case V_ADST:
+ case V_FLIPADST:
+ lowbd_inv_txfm2d_add_h_identity_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ break;
+
+ default:
+ lowbd_inv_txfm2d_add_no_identity_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ break;
+ }
+}
+
+void av1_lowbd_inv_txfm2d_add_neon(const int32_t *input, uint8_t *output,
+ int stride, TX_TYPE tx_type, TX_SIZE tx_size,
+ int eob) {
+ int row;
+ switch (tx_size) {
+ case TX_4X4:
+ lowbd_inv_txfm2d_add_4x4_neon(input, output, stride, tx_type, tx_size,
+ eob);
+ break;
+
+ case TX_4X8:
+ lowbd_inv_txfm2d_add_4x8_neon(input, output, stride, tx_type, tx_size,
+ eob);
+ break;
+
+ case TX_8X4:
+ lowbd_inv_txfm2d_add_8x4_neon(input, output, stride, tx_type, tx_size,
+ eob);
+ break;
+
+ case TX_4X16:
+ lowbd_inv_txfm2d_add_4x16_neon(input, output, stride, tx_type, tx_size,
+ eob);
+ break;
+
+ case TX_16X4:
+ lowbd_inv_txfm2d_add_16x4_neon(input, output, stride, tx_type, tx_size,
+ eob);
+ break;
+
+ case TX_16X64: {
+ lowbd_inv_txfm2d_add_wxh_universe_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ } break;
+
+ case TX_64X16: {
+ int32_t mod_input[64 * 16];
+ for (row = 0; row < 16; ++row) {
+ memcpy(mod_input + row * 64, input + row * 32, 32 * sizeof(*mod_input));
+ memset(mod_input + row * 64 + 32, 0, 32 * sizeof(*mod_input));
+ }
+ lowbd_inv_txfm2d_add_wxh_universe_neon(mod_input, output, stride, tx_type,
+ tx_size, eob);
+ } break;
+
+ case TX_32X64: {
+ lowbd_inv_txfm2d_add_wxh_universe_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ } break;
+
+ case TX_64X32: {
+ int32_t mod_input[64 * 32];
+ for (row = 0; row < 32; ++row) {
+ memcpy(mod_input + row * 64, input + row * 32, 32 * sizeof(*mod_input));
+ memset(mod_input + row * 64 + 32, 0, 32 * sizeof(*mod_input));
+ }
+ lowbd_inv_txfm2d_add_wxh_universe_neon(mod_input, output, stride, tx_type,
+ tx_size, eob);
+ } break;
+
+ case TX_64X64: {
+ int32_t mod_input[64 * 64];
+ for (row = 0; row < 32; ++row) {
+ memcpy(mod_input + row * 64, input + row * 32, 32 * sizeof(*mod_input));
+ memset(mod_input + row * 64 + 32, 0, 32 * sizeof(*mod_input));
+ }
+ lowbd_inv_txfm2d_add_wxh_universe_neon(mod_input, output, stride, tx_type,
+ tx_size, eob);
+ } break;
+
+ default:
+ lowbd_inv_txfm2d_add_universe_neon(input, output, stride, tx_type,
+ tx_size, eob);
+ break;
+ }
+}
+void av1_inv_txfm_add_neon(const tran_low_t *dqcoeff, uint8_t *dst, int stride,
+ const TxfmParam *txfm_param) {
+ const TX_TYPE tx_type = txfm_param->tx_type;
+ if (!txfm_param->lossless) {
+ av1_lowbd_inv_txfm2d_add_neon(dqcoeff, dst, stride, tx_type,
+ txfm_param->tx_size, txfm_param->eob);
+ } else {
+ av1_inv_txfm_add_c(dqcoeff, dst, stride, txfm_param);
+ }
+}
diff --git a/third_party/aom/av1/common/arm/av1_inv_txfm_neon.h b/third_party/aom/av1/common/arm/av1_inv_txfm_neon.h
new file mode 100644
index 0000000000..9ec658291c
--- /dev/null
+++ b/third_party/aom/av1/common/arm/av1_inv_txfm_neon.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+#ifndef AOM_AV1_COMMON_ARM_AV1_INV_TXFM_NEON_H_
+#define AOM_AV1_COMMON_ARM_AV1_INV_TXFM_NEON_H_
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom/aom_integer.h"
+#include "av1/common/enums.h"
+#include "av1/common/av1_inv_txfm1d.h"
+#include "av1/common/av1_inv_txfm1d_cfg.h"
+#include "av1/common/av1_txfm.h"
+
+typedef void (*transform_1d_neon)(const int32_t *input, int32_t *output,
+ const int8_t cos_bit,
+ const int8_t *stage_ptr);
+typedef void (*transform_neon)(int16x8_t *input, int16x8_t *output,
+ int8_t cos_bit, int bit);
+
+DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_8x8_default[8]) = {
+ 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0707,
+};
+
+DECLARE_ALIGNED(16, static const int16_t,
+ av1_eob_to_eobxy_16x16_default[16]) = {
+ 0x0707, 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f,
+ 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f,
+};
+
+DECLARE_ALIGNED(16, static const int16_t,
+ av1_eob_to_eobxy_32x32_default[32]) = {
+ 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f,
+ 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f,
+ 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f,
+ 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f,
+};
+
+DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_8x16_default[16]) = {
+ 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0f07, 0x0f07, 0x0f07,
+ 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07,
+};
+
+DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_16x8_default[8]) = {
+ 0x0707, 0x0707, 0x070f, 0x070f, 0x070f, 0x070f, 0x070f, 0x070f,
+};
+
+DECLARE_ALIGNED(16, static const int16_t,
+ av1_eob_to_eobxy_16x32_default[32]) = {
+ 0x0707, 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f,
+ 0x0f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f,
+ 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f,
+ 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f,
+};
+
+DECLARE_ALIGNED(16, static const int16_t,
+ av1_eob_to_eobxy_32x16_default[16]) = {
+ 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f,
+ 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f,
+};
+
+DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_8x32_default[32]) = {
+ 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0f07, 0x0f07, 0x0f07,
+ 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x1f07, 0x1f07, 0x1f07,
+ 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07,
+ 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07,
+};
+
+DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_32x8_default[8]) = {
+ 0x0707, 0x070f, 0x070f, 0x071f, 0x071f, 0x071f, 0x071f, 0x071f,
+};
+
+DECLARE_ALIGNED(16, static const int16_t *,
+ av1_eob_to_eobxy_default[TX_SIZES_ALL]) = {
+ NULL,
+ av1_eob_to_eobxy_8x8_default,
+ av1_eob_to_eobxy_16x16_default,
+ av1_eob_to_eobxy_32x32_default,
+ av1_eob_to_eobxy_32x32_default,
+ NULL,
+ NULL,
+ av1_eob_to_eobxy_8x16_default,
+ av1_eob_to_eobxy_16x8_default,
+ av1_eob_to_eobxy_16x32_default,
+ av1_eob_to_eobxy_32x16_default,
+ av1_eob_to_eobxy_32x32_default,
+ av1_eob_to_eobxy_32x32_default,
+ NULL,
+ NULL,
+ av1_eob_to_eobxy_8x32_default,
+ av1_eob_to_eobxy_32x8_default,
+ av1_eob_to_eobxy_16x32_default,
+ av1_eob_to_eobxy_32x16_default,
+};
+
+static const int lowbd_txfm_all_1d_zeros_idx[32] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+};
+
+// Transform block width in log2 for eob (size of 64 map to 32)
+static const int tx_size_wide_log2_eob[TX_SIZES_ALL] = {
+ 2, 3, 4, 5, 5, 2, 3, 3, 4, 4, 5, 5, 5, 2, 4, 3, 5, 4, 5,
+};
+
+static int eob_fill[32] = {
+ 0, 7, 7, 7, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15,
+ 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+};
+
+static INLINE void get_eobx_eoby_scan_default(int *eobx, int *eoby,
+ TX_SIZE tx_size, int eob) {
+ if (eob == 1) {
+ *eobx = 0;
+ *eoby = 0;
+ return;
+ }
+
+ const int tx_w_log2 = tx_size_wide_log2_eob[tx_size];
+ const int eob_row = (eob - 1) >> tx_w_log2;
+ const int eobxy = av1_eob_to_eobxy_default[tx_size][eob_row];
+ *eobx = eobxy & 0xFF;
+ *eoby = eobxy >> 8;
+}
+
+static INLINE void get_eobx_eoby_scan_v_identity(int *eobx, int *eoby,
+ TX_SIZE tx_size, int eob) {
+ eob -= 1;
+ const int txfm_size_row = tx_size_high[tx_size];
+ const int eoby_max = AOMMIN(32, txfm_size_row) - 1;
+ *eobx = eob / (eoby_max + 1);
+ *eoby = (eob >= eoby_max) ? eoby_max : eob_fill[eob];
+}
+
+static INLINE void get_eobx_eoby_scan_h_identity(int *eobx, int *eoby,
+ TX_SIZE tx_size, int eob) {
+ eob -= 1;
+ const int txfm_size_col = tx_size_wide[tx_size];
+ const int eobx_max = AOMMIN(32, txfm_size_col) - 1;
+ *eobx = (eob >= eobx_max) ? eobx_max : eob_fill[eob];
+ const int temp_eoby = eob / (eobx_max + 1);
+ assert(temp_eoby < 32);
+ *eoby = eob_fill[temp_eoby];
+}
+
+#endif // AOM_AV1_COMMON_ARM_AV1_INV_TXFM_NEON_H_
diff --git a/third_party/aom/av1/common/arm/av1_txfm_neon.c b/third_party/aom/av1/common/arm/av1_txfm_neon.c
new file mode 100644
index 0000000000..de3c547248
--- /dev/null
+++ b/third_party/aom/av1/common/arm/av1_txfm_neon.c
@@ -0,0 +1,28 @@
+/*
+ *
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "aom_ports/mem.h"
+#include "av1/common/arm/mem_neon.h"
+
+void av1_round_shift_array_neon(int32_t *arr, int size, int bit) {
+ assert(!(size % 4));
+ if (!bit) return;
+ const int32x4_t dup_bits_n_32x4 = vdupq_n_s32((int32_t)(-bit));
+ for (int i = 0; i < size; i += 4) {
+ int32x4_t tmp_q_s32 = vld1q_s32(arr);
+ tmp_q_s32 = vrshlq_s32(tmp_q_s32, dup_bits_n_32x4);
+ vst1q_s32(arr, tmp_q_s32);
+ arr += 4;
+ }
+}
diff --git a/third_party/aom/av1/common/arm/blend_a64_hmask_neon.c b/third_party/aom/av1/common/arm/blend_a64_hmask_neon.c
new file mode 100644
index 0000000000..7134f183e3
--- /dev/null
+++ b/third_party/aom/av1/common/arm/blend_a64_hmask_neon.c
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/blend.h"
+#include "aom_ports/mem.h"
+#include "av1/common/arm/mem_neon.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "config/aom_dsp_rtcd.h"
+
+void aom_blend_a64_hmask_neon(uint8_t *dst, uint32_t dst_stride,
+ const uint8_t *src0, uint32_t src0_stride,
+ const uint8_t *src1, uint32_t src1_stride,
+ const uint8_t *mask, int w, int h) {
+ assert(IMPLIES(src0 == dst, src0_stride == dst_stride));
+ assert(IMPLIES(src1 == dst, src1_stride == dst_stride));
+
+ assert(h >= 2);
+ assert(w >= 2);
+ assert(IS_POWER_OF_TWO(h));
+ assert(IS_POWER_OF_TWO(w));
+ uint8x8_t tmp0, tmp1;
+ uint8x16_t res_q;
+ uint16x8_t res, res_low, res_high;
+ uint32x2_t tmp0_32 = vdup_n_u32(0), tmp1_32 = vdup_n_u32(0);
+ uint16x4_t tmp0_16 = vdup_n_u16(0), tmp1_16 = vdup_n_u16(0);
+ const uint8x8_t vdup_64 = vdup_n_u8((uint8_t)64);
+
+ if (w >= 16) {
+ const uint8x16_t vdup_64_q = vdupq_n_u8((uint8_t)64);
+ for (int i = 0; i < h; ++i) {
+ for (int j = 0; j < w; j += 16) {
+ __builtin_prefetch(src0);
+ __builtin_prefetch(src1);
+ const uint8x16_t tmp0_q = vld1q_u8(src0);
+ const uint8x16_t tmp1_q = vld1q_u8(src1);
+ const uint8x16_t m_q = vld1q_u8(mask);
+ const uint8x16_t max_minus_m_q = vsubq_u8(vdup_64_q, m_q);
+ res_low = vmull_u8(vget_low_u8(m_q), vget_low_u8(tmp0_q));
+ res_low =
+ vmlal_u8(res_low, vget_low_u8(max_minus_m_q), vget_low_u8(tmp1_q));
+ res_high = vmull_u8(vget_high_u8(m_q), vget_high_u8(tmp0_q));
+ res_high = vmlal_u8(res_high, vget_high_u8(max_minus_m_q),
+ vget_high_u8(tmp1_q));
+ res_q = vcombine_u8(vrshrn_n_u16(res_low, AOM_BLEND_A64_ROUND_BITS),
+ vrshrn_n_u16(res_high, AOM_BLEND_A64_ROUND_BITS));
+ vst1q_u8(dst, res_q);
+ src0 += 16;
+ src1 += 16;
+ dst += 16;
+ mask += 16;
+ }
+ src0 += src0_stride - w;
+ src1 += src1_stride - w;
+ dst += dst_stride - w;
+ mask -= w;
+ }
+ } else if (w == 8) {
+ const uint8x8_t m = vld1_u8(mask);
+ const uint8x8_t max_minus_m = vsub_u8(vdup_64, m);
+ for (int i = 0; i < h; ++i) {
+ __builtin_prefetch(src0);
+ __builtin_prefetch(src1);
+ tmp0 = vld1_u8(src0);
+ tmp1 = vld1_u8(src1);
+ res = vmull_u8(m, tmp0);
+ res = vmlal_u8(res, max_minus_m, tmp1);
+ vst1_u8(dst, vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS));
+ src0 += src0_stride;
+ src1 += src1_stride;
+ dst += dst_stride;
+ }
+ } else if (w == 4) {
+ const uint8x8_t m = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)mask));
+ const uint8x8_t max_minus_m = vsub_u8(vdup_64, m);
+ for (int i = 0; i < h; i += 2) {
+ __builtin_prefetch(src0 + 0 * src0_stride);
+ __builtin_prefetch(src0 + 1 * src0_stride);
+ __builtin_prefetch(src1 + 0 * src1_stride);
+ __builtin_prefetch(src1 + 1 * src1_stride);
+ load_unaligned_u8_4x2(src0, src0_stride, &tmp0_32);
+ tmp0 = vreinterpret_u8_u32(tmp0_32);
+ load_unaligned_u8_4x2(src1, src1_stride, &tmp1_32);
+ tmp1 = vreinterpret_u8_u32(tmp1_32);
+ res = vmull_u8(m, tmp0);
+ res = vmlal_u8(res, max_minus_m, tmp1);
+ vst1_lane_u32(
+ (uint32_t *)(dst + (0 * dst_stride)),
+ vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 0);
+ vst1_lane_u32(
+ (uint32_t *)(dst + (1 * dst_stride)),
+ vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 1);
+ src0 += (2 * src0_stride);
+ src1 += (2 * src1_stride);
+ dst += (2 * dst_stride);
+ }
+ } else if (w == 2) {
+ const uint8x8_t m = vreinterpret_u8_u16(vld1_dup_u16((uint16_t *)mask));
+ const uint8x8_t max_minus_m = vsub_u8(vdup_64, m);
+ for (int i = 0; i < h; i += 2) {
+ __builtin_prefetch(src0 + 0 * src0_stride);
+ __builtin_prefetch(src0 + 1 * src0_stride);
+ __builtin_prefetch(src1 + 0 * src1_stride);
+ __builtin_prefetch(src1 + 1 * src1_stride);
+ load_unaligned_u8_2x2(src0, src0_stride, &tmp0_16);
+ tmp0 = vreinterpret_u8_u16(tmp0_16);
+ load_unaligned_u8_2x2(src1, src1_stride, &tmp1_16);
+ tmp1 = vreinterpret_u8_u16(tmp1_16);
+ res = vmull_u8(m, tmp0);
+ res = vmlal_u8(res, max_minus_m, tmp1);
+ vst1_lane_u16(
+ (uint16_t *)(dst + (0 * dst_stride)),
+ vreinterpret_u16_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 0);
+ vst1_lane_u16(
+ (uint16_t *)(dst + (1 * dst_stride)),
+ vreinterpret_u16_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 1);
+ src0 += (2 * src0_stride);
+ src1 += (2 * src1_stride);
+ dst += (2 * dst_stride);
+ }
+ }
+}
diff --git a/third_party/aom/av1/common/arm/blend_a64_vmask_neon.c b/third_party/aom/av1/common/arm/blend_a64_vmask_neon.c
new file mode 100644
index 0000000000..194e94c8c0
--- /dev/null
+++ b/third_party/aom/av1/common/arm/blend_a64_vmask_neon.c
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/blend.h"
+#include "aom_ports/mem.h"
+#include "av1/common/arm/mem_neon.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "config/aom_dsp_rtcd.h"
+
+void aom_blend_a64_vmask_neon(uint8_t *dst, uint32_t dst_stride,
+ const uint8_t *src0, uint32_t src0_stride,
+ const uint8_t *src1, uint32_t src1_stride,
+ const uint8_t *mask, int w, int h) {
+ uint8x8_t tmp0, tmp1;
+ uint8x16_t tmp0_q, tmp1_q, res_q;
+ uint16x8_t res, res_low, res_high;
+ uint32x2_t tmp0_32 = vdup_n_u32(0), tmp1_32 = vdup_n_u32(0);
+ uint16x4_t tmp0_16 = vdup_n_u16(0), tmp1_16 = vdup_n_u16(0);
+ assert(IMPLIES(src0 == dst, src0_stride == dst_stride));
+ assert(IMPLIES(src1 == dst, src1_stride == dst_stride));
+
+ assert(h >= 2);
+ assert(w >= 2);
+ assert(IS_POWER_OF_TWO(h));
+ assert(IS_POWER_OF_TWO(w));
+
+ if (w >= 16) {
+ for (int i = 0; i < h; ++i) {
+ const uint8x8_t m = vdup_n_u8((uint8_t)mask[i]);
+ const uint8x8_t max_minus_m = vdup_n_u8(64 - (uint8_t)mask[i]);
+ for (int j = 0; j < w; j += 16) {
+ __builtin_prefetch(src0);
+ __builtin_prefetch(src1);
+ tmp0_q = vld1q_u8(src0);
+ tmp1_q = vld1q_u8(src1);
+ res_low = vmull_u8(m, vget_low_u8(tmp0_q));
+ res_low = vmlal_u8(res_low, max_minus_m, vget_low_u8(tmp1_q));
+ res_high = vmull_u8(m, vget_high_u8(tmp0_q));
+ res_high = vmlal_u8(res_high, max_minus_m, vget_high_u8(tmp1_q));
+ res_q = vcombine_u8(vrshrn_n_u16(res_low, AOM_BLEND_A64_ROUND_BITS),
+ vrshrn_n_u16(res_high, AOM_BLEND_A64_ROUND_BITS));
+ vst1q_u8(dst, res_q);
+ src0 += 16;
+ src1 += 16;
+ dst += 16;
+ }
+ src0 += src0_stride - w;
+ src1 += src1_stride - w;
+ dst += dst_stride - w;
+ }
+ } else if (w == 8) {
+ for (int i = 0; i < h; ++i) {
+ __builtin_prefetch(src0);
+ __builtin_prefetch(src1);
+ const uint8x8_t m = vdup_n_u8((uint8_t)mask[i]);
+ const uint8x8_t max_minus_m = vdup_n_u8(64 - (uint8_t)mask[i]);
+ tmp0 = vld1_u8(src0);
+ tmp1 = vld1_u8(src1);
+ res = vmull_u8(m, tmp0);
+ res = vmlal_u8(res, max_minus_m, tmp1);
+ vst1_u8(dst, vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS));
+ src0 += src0_stride;
+ src1 += src1_stride;
+ dst += dst_stride;
+ }
+ } else if (w == 4) {
+ for (int i = 0; i < h; i += 2) {
+ __builtin_prefetch(src0 + 0 * src0_stride);
+ __builtin_prefetch(src0 + 1 * src0_stride);
+ __builtin_prefetch(src1 + 0 * src1_stride);
+ __builtin_prefetch(src1 + 1 * src1_stride);
+ const uint16x4_t m1 = vdup_n_u16((uint16_t)mask[i]);
+ const uint16x4_t m2 = vdup_n_u16((uint16_t)mask[i + 1]);
+ const uint8x8_t m = vmovn_u16(vcombine_u16(m1, m2));
+ const uint16x4_t max_minus_m1 = vdup_n_u16(64 - (uint16_t)mask[i]);
+ const uint16x4_t max_minus_m2 = vdup_n_u16(64 - (uint16_t)mask[i + 1]);
+ const uint8x8_t max_minus_m =
+ vmovn_u16(vcombine_u16(max_minus_m1, max_minus_m2));
+ load_unaligned_u8_4x2(src0, src0_stride, &tmp0_32);
+ tmp0 = vreinterpret_u8_u32(tmp0_32);
+ load_unaligned_u8_4x2(src1, src1_stride, &tmp1_32);
+ tmp1 = vreinterpret_u8_u32(tmp1_32);
+ res = vmull_u8(m, tmp0);
+ res = vmlal_u8(res, max_minus_m, tmp1);
+ vst1_lane_u32(
+ (uint32_t *)(dst + (0 * dst_stride)),
+ vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 0);
+ vst1_lane_u32(
+ (uint32_t *)(dst + (1 * dst_stride)),
+ vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 1);
+ src0 += (2 * src0_stride);
+ src1 += (2 * src1_stride);
+ dst += (2 * dst_stride);
+ }
+ } else if (w == 2) {
+ for (int i = 0; i < h; i += 2) {
+ __builtin_prefetch(src0 + 0 * src0_stride);
+ __builtin_prefetch(src0 + 1 * src0_stride);
+ __builtin_prefetch(src1 + 0 * src1_stride);
+ __builtin_prefetch(src1 + 1 * src1_stride);
+ const uint8x8_t m1 = vdup_n_u8(mask[i]);
+ const uint8x8_t m2 = vdup_n_u8(mask[i + 1]);
+ const uint16x4x2_t m_trn =
+ vtrn_u16(vreinterpret_u16_u8(m1), vreinterpret_u16_u8(m2));
+ const uint8x8_t m = vreinterpret_u8_u16(m_trn.val[0]);
+ const uint8x8_t max_minus_m1 = vdup_n_u8(64 - mask[i]);
+ const uint8x8_t max_minus_m2 = vdup_n_u8(64 - mask[i + 1]);
+ const uint16x4x2_t max_minus_m_trn = vtrn_u16(
+ vreinterpret_u16_u8(max_minus_m1), vreinterpret_u16_u8(max_minus_m2));
+ const uint8x8_t max_minus_m = vreinterpret_u8_u16(max_minus_m_trn.val[0]);
+ load_unaligned_u8_2x2(src0, src0_stride, &tmp0_16);
+ tmp0 = vreinterpret_u8_u16(tmp0_16);
+ load_unaligned_u8_2x2(src1, src1_stride, &tmp1_16);
+ tmp1 = vreinterpret_u8_u16(tmp1_16);
+ res = vmull_u8(m, tmp0);
+ res = vmlal_u8(res, max_minus_m, tmp1);
+ vst1_lane_u16(
+ (uint16_t *)(dst + (0 * dst_stride)),
+ vreinterpret_u16_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 0);
+ vst1_lane_u16(
+ (uint16_t *)(dst + (1 * dst_stride)),
+ vreinterpret_u16_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 1);
+ src0 += (2 * src0_stride);
+ src1 += (2 * src1_stride);
+ dst += (2 * dst_stride);
+ }
+ }
+}
diff --git a/third_party/aom/av1/common/arm/cfl_neon.c b/third_party/aom/av1/common/arm/cfl_neon.c
new file mode 100644
index 0000000000..39025b5e54
--- /dev/null
+++ b/third_party/aom/av1/common/arm/cfl_neon.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+#include <arm_neon.h>
+
+#include "config/av1_rtcd.h"
+
+#include "av1/common/cfl.h"
+
+static INLINE void vldsubstq_s16(int16_t *dst, const uint16_t *src, int offset,
+ int16x8_t sub) {
+ vst1q_s16(dst + offset,
+ vsubq_s16(vreinterpretq_s16_u16(vld1q_u16(src + offset)), sub));
+}
+
+static INLINE uint16x8_t vldaddq_u16(const uint16_t *buf, size_t offset) {
+ return vaddq_u16(vld1q_u16(buf), vld1q_u16(buf + offset));
+}
+
+// Load half of a vector and duplicated in other half
+static INLINE uint8x8_t vldh_dup_u8(const uint8_t *ptr) {
+ return vreinterpret_u8_u32(vld1_dup_u32((const uint32_t *)ptr));
+}
+
+// Store half of a vector.
+static INLINE void vsth_u16(uint16_t *ptr, uint16x4_t val) {
+ *((uint32_t *)ptr) = vreinterpret_u32_u16(val)[0];
+}
+
+// Store half of a vector.
+static INLINE void vsth_u8(uint8_t *ptr, uint8x8_t val) {
+ *((uint32_t *)ptr) = vreinterpret_u32_u8(val)[0];
+}
+
+static void cfl_luma_subsampling_420_lbd_neon(const uint8_t *input,
+ int input_stride,
+ uint16_t *pred_buf_q3, int width,
+ int height) {
+ const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
+ const int luma_stride = input_stride << 1;
+ do {
+ if (width == 4) {
+ const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
+ const uint16x4_t sum = vpadal_u8(top, vldh_dup_u8(input + input_stride));
+ vsth_u16(pred_buf_q3, vshl_n_u16(sum, 1));
+ } else if (width == 8) {
+ const uint16x4_t top = vpaddl_u8(vld1_u8(input));
+ const uint16x4_t sum = vpadal_u8(top, vld1_u8(input + input_stride));
+ vst1_u16(pred_buf_q3, vshl_n_u16(sum, 1));
+ } else if (width == 16) {
+ const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
+ const uint16x8_t sum = vpadalq_u8(top, vld1q_u8(input + input_stride));
+ vst1q_u16(pred_buf_q3, vshlq_n_u16(sum, 1));
+ } else {
+ const uint8x8x4_t top = vld4_u8(input);
+ const uint8x8x4_t bot = vld4_u8(input + input_stride);
+ // equivalent to a vpaddlq_u8 (because vld4q interleaves)
+ const uint16x8_t top_0 = vaddl_u8(top.val[0], top.val[1]);
+ // equivalent to a vpaddlq_u8 (because vld4q interleaves)
+ const uint16x8_t bot_0 = vaddl_u8(bot.val[0], bot.val[1]);
+ // equivalent to a vpaddlq_u8 (because vld4q interleaves)
+ const uint16x8_t top_1 = vaddl_u8(top.val[2], top.val[3]);
+ // equivalent to a vpaddlq_u8 (because vld4q interleaves)
+ const uint16x8_t bot_1 = vaddl_u8(bot.val[2], bot.val[3]);
+ uint16x8x2_t sum;
+ sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
+ sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
+ vst2q_u16(pred_buf_q3, sum);
+ }
+ input += luma_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+}
+
+static void cfl_luma_subsampling_422_lbd_neon(const uint8_t *input,
+ int input_stride,
+ uint16_t *pred_buf_q3, int width,
+ int height) {
+ const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
+ do {
+ if (width == 4) {
+ const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
+ vsth_u16(pred_buf_q3, vshl_n_u16(top, 2));
+ } else if (width == 8) {
+ const uint16x4_t top = vpaddl_u8(vld1_u8(input));
+ vst1_u16(pred_buf_q3, vshl_n_u16(top, 2));
+ } else if (width == 16) {
+ const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
+ vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 2));
+ } else {
+ const uint8x8x4_t top = vld4_u8(input);
+ uint16x8x2_t sum;
+ // vaddl_u8 is equivalent to a vpaddlq_u8 (because vld4q interleaves)
+ sum.val[0] = vshlq_n_u16(vaddl_u8(top.val[0], top.val[1]), 2);
+ sum.val[1] = vshlq_n_u16(vaddl_u8(top.val[2], top.val[3]), 2);
+ vst2q_u16(pred_buf_q3, sum);
+ }
+ input += input_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+}
+
+static void cfl_luma_subsampling_444_lbd_neon(const uint8_t *input,
+ int input_stride,
+ uint16_t *pred_buf_q3, int width,
+ int height) {
+ const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
+ do {
+ if (width == 4) {
+ const uint16x8_t top = vshll_n_u8(vldh_dup_u8(input), 3);
+ vst1_u16(pred_buf_q3, vget_low_u16(top));
+ } else if (width == 8) {
+ const uint16x8_t top = vshll_n_u8(vld1_u8(input), 3);
+ vst1q_u16(pred_buf_q3, top);
+ } else {
+ const uint8x16_t top = vld1q_u8(input);
+ vst1q_u16(pred_buf_q3, vshll_n_u8(vget_low_u8(top), 3));
+ vst1q_u16(pred_buf_q3 + 8, vshll_n_u8(vget_high_u8(top), 3));
+ if (width == 32) {
+ const uint8x16_t next_top = vld1q_u8(input + 16);
+ vst1q_u16(pred_buf_q3 + 16, vshll_n_u8(vget_low_u8(next_top), 3));
+ vst1q_u16(pred_buf_q3 + 24, vshll_n_u8(vget_high_u8(next_top), 3));
+ }
+ }
+ input += input_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+}
+
+#ifndef __aarch64__
+uint16x8_t vpaddq_u16(uint16x8_t a, uint16x8_t b) {
+ return vcombine_u16(vpadd_u16(vget_low_u16(a), vget_high_u16(a)),
+ vpadd_u16(vget_low_u16(b), vget_high_u16(b)));
+}
+#endif
+
+static void cfl_luma_subsampling_420_hbd_neon(const uint16_t *input,
+ int input_stride,
+ uint16_t *pred_buf_q3, int width,
+ int height) {
+ const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
+ const int luma_stride = input_stride << 1;
+ do {
+ if (width == 4) {
+ const uint16x4_t top = vld1_u16(input);
+ const uint16x4_t bot = vld1_u16(input + input_stride);
+ const uint16x4_t sum = vadd_u16(top, bot);
+ const uint16x4_t hsum = vpadd_u16(sum, sum);
+ vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
+ } else if (width < 32) {
+ const uint16x8_t top = vld1q_u16(input);
+ const uint16x8_t bot = vld1q_u16(input + input_stride);
+ const uint16x8_t sum = vaddq_u16(top, bot);
+ if (width == 8) {
+ const uint16x4_t hsum = vget_low_u16(vpaddq_u16(sum, sum));
+ vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
+ } else {
+ const uint16x8_t top_1 = vld1q_u16(input + 8);
+ const uint16x8_t bot_1 = vld1q_u16(input + 8 + input_stride);
+ const uint16x8_t sum_1 = vaddq_u16(top_1, bot_1);
+ const uint16x8_t hsum = vpaddq_u16(sum, sum_1);
+ vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 1));
+ }
+ } else {
+ const uint16x8x4_t top = vld4q_u16(input);
+ const uint16x8x4_t bot = vld4q_u16(input + input_stride);
+ // equivalent to a vpaddq_u16 (because vld4q interleaves)
+ const uint16x8_t top_0 = vaddq_u16(top.val[0], top.val[1]);
+ // equivalent to a vpaddq_u16 (because vld4q interleaves)
+ const uint16x8_t bot_0 = vaddq_u16(bot.val[0], bot.val[1]);
+ // equivalent to a vpaddq_u16 (because vld4q interleaves)
+ const uint16x8_t top_1 = vaddq_u16(top.val[2], top.val[3]);
+ // equivalent to a vpaddq_u16 (because vld4q interleaves)
+ const uint16x8_t bot_1 = vaddq_u16(bot.val[2], bot.val[3]);
+ uint16x8x2_t sum;
+ sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
+ sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
+ vst2q_u16(pred_buf_q3, sum);
+ }
+ input += luma_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+}
+
+static void cfl_luma_subsampling_422_hbd_neon(const uint16_t *input,
+ int input_stride,
+ uint16_t *pred_buf_q3, int width,
+ int height) {
+ const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
+ do {
+ if (width == 4) {
+ const uint16x4_t top = vld1_u16(input);
+ const uint16x4_t hsum = vpadd_u16(top, top);
+ vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
+ } else if (width == 8) {
+ const uint16x4x2_t top = vld2_u16(input);
+ // equivalent to a vpadd_u16 (because vld2 interleaves)
+ const uint16x4_t hsum = vadd_u16(top.val[0], top.val[1]);
+ vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
+ } else if (width == 16) {
+ const uint16x8x2_t top = vld2q_u16(input);
+ // equivalent to a vpaddq_u16 (because vld2q interleaves)
+ const uint16x8_t hsum = vaddq_u16(top.val[0], top.val[1]);
+ vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 2));
+ } else {
+ const uint16x8x4_t top = vld4q_u16(input);
+ // equivalent to a vpaddq_u16 (because vld4q interleaves)
+ const uint16x8_t hsum_0 = vaddq_u16(top.val[0], top.val[1]);
+ // equivalent to a vpaddq_u16 (because vld4q interleaves)
+ const uint16x8_t hsum_1 = vaddq_u16(top.val[2], top.val[3]);
+ uint16x8x2_t result = { { vshlq_n_u16(hsum_0, 2),
+ vshlq_n_u16(hsum_1, 2) } };
+ vst2q_u16(pred_buf_q3, result);
+ }
+ input += input_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+}
+
+static void cfl_luma_subsampling_444_hbd_neon(const uint16_t *input,
+ int input_stride,
+ uint16_t *pred_buf_q3, int width,
+ int height) {
+ const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
+ do {
+ if (width == 4) {
+ const uint16x4_t top = vld1_u16(input);
+ vst1_u16(pred_buf_q3, vshl_n_u16(top, 3));
+ } else if (width == 8) {
+ const uint16x8_t top = vld1q_u16(input);
+ vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 3));
+ } else if (width == 16) {
+ uint16x8x2_t top = vld2q_u16(input);
+ top.val[0] = vshlq_n_u16(top.val[0], 3);
+ top.val[1] = vshlq_n_u16(top.val[1], 3);
+ vst2q_u16(pred_buf_q3, top);
+ } else {
+ uint16x8x4_t top = vld4q_u16(input);
+ top.val[0] = vshlq_n_u16(top.val[0], 3);
+ top.val[1] = vshlq_n_u16(top.val[1], 3);
+ top.val[2] = vshlq_n_u16(top.val[2], 3);
+ top.val[3] = vshlq_n_u16(top.val[3], 3);
+ vst4q_u16(pred_buf_q3, top);
+ }
+ input += input_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+}
+
+CFL_GET_SUBSAMPLE_FUNCTION(neon)
+
+static INLINE void subtract_average_neon(const uint16_t *src, int16_t *dst,
+ int width, int height,
+ int round_offset,
+ const int num_pel_log2) {
+ const uint16_t *const end = src + height * CFL_BUF_LINE;
+
+ // Round offset is not needed, because NEON will handle the rounding.
+ (void)round_offset;
+
+ // To optimize the use of the CPU pipeline, we process 4 rows per iteration
+ const int step = 4 * CFL_BUF_LINE;
+
+ // At this stage, the prediction buffer contains scaled reconstructed luma
+ // pixels, which are positive integer and only require 15 bits. By using
+ // unsigned integer for the sum, we can do one addition operation inside 16
+ // bits (8 lanes) before having to convert to 32 bits (4 lanes).
+ const uint16_t *sum_buf = src;
+ uint32x4_t sum_32x4 = { 0, 0, 0, 0 };
+ do {
+ // For all widths, we load, add and combine the data so it fits in 4 lanes.
+ if (width == 4) {
+ const uint16x4_t a0 =
+ vadd_u16(vld1_u16(sum_buf), vld1_u16(sum_buf + CFL_BUF_LINE));
+ const uint16x4_t a1 = vadd_u16(vld1_u16(sum_buf + 2 * CFL_BUF_LINE),
+ vld1_u16(sum_buf + 3 * CFL_BUF_LINE));
+ sum_32x4 = vaddq_u32(sum_32x4, vaddl_u16(a0, a1));
+ } else if (width == 8) {
+ const uint16x8_t a0 = vldaddq_u16(sum_buf, CFL_BUF_LINE);
+ const uint16x8_t a1 =
+ vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, CFL_BUF_LINE);
+ sum_32x4 = vpadalq_u16(sum_32x4, a0);
+ sum_32x4 = vpadalq_u16(sum_32x4, a1);
+ } else {
+ const uint16x8_t row0 = vldaddq_u16(sum_buf, 8);
+ const uint16x8_t row1 = vldaddq_u16(sum_buf + CFL_BUF_LINE, 8);
+ const uint16x8_t row2 = vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, 8);
+ const uint16x8_t row3 = vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE, 8);
+ sum_32x4 = vpadalq_u16(sum_32x4, row0);
+ sum_32x4 = vpadalq_u16(sum_32x4, row1);
+ sum_32x4 = vpadalq_u16(sum_32x4, row2);
+ sum_32x4 = vpadalq_u16(sum_32x4, row3);
+
+ if (width == 32) {
+ const uint16x8_t row0_1 = vldaddq_u16(sum_buf + 16, 8);
+ const uint16x8_t row1_1 = vldaddq_u16(sum_buf + CFL_BUF_LINE + 16, 8);
+ const uint16x8_t row2_1 =
+ vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE + 16, 8);
+ const uint16x8_t row3_1 =
+ vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE + 16, 8);
+
+ sum_32x4 = vpadalq_u16(sum_32x4, row0_1);
+ sum_32x4 = vpadalq_u16(sum_32x4, row1_1);
+ sum_32x4 = vpadalq_u16(sum_32x4, row2_1);
+ sum_32x4 = vpadalq_u16(sum_32x4, row3_1);
+ }
+ }
+ sum_buf += step;
+ } while (sum_buf < end);
+
+ // Permute and add in such a way that each lane contains the block sum.
+ // [A+C+B+D, B+D+A+C, C+A+D+B, D+B+C+A]
+#ifdef __aarch64__
+ sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
+ sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
+#else
+ uint32x4_t flip =
+ vcombine_u32(vget_high_u32(sum_32x4), vget_low_u32(sum_32x4));
+ sum_32x4 = vaddq_u32(sum_32x4, flip);
+ sum_32x4 = vaddq_u32(sum_32x4, vrev64q_u32(sum_32x4));
+#endif
+
+ // Computing the average could be done using scalars, but getting off the NEON
+ // engine introduces latency, so we use vqrshrn.
+ int16x4_t avg_16x4;
+ // Constant propagation makes for some ugly code.
+ switch (num_pel_log2) {
+ case 4: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 4)); break;
+ case 5: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 5)); break;
+ case 6: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 6)); break;
+ case 7: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 7)); break;
+ case 8: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 8)); break;
+ case 9: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 9)); break;
+ case 10:
+ avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 10));
+ break;
+ default: assert(0);
+ }
+
+ if (width == 4) {
+ do {
+ vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4));
+ src += CFL_BUF_LINE;
+ dst += CFL_BUF_LINE;
+ } while (src < end);
+ } else {
+ const int16x8_t avg_16x8 = vcombine_s16(avg_16x4, avg_16x4);
+ do {
+ vldsubstq_s16(dst, src, 0, avg_16x8);
+ vldsubstq_s16(dst, src, CFL_BUF_LINE, avg_16x8);
+ vldsubstq_s16(dst, src, 2 * CFL_BUF_LINE, avg_16x8);
+ vldsubstq_s16(dst, src, 3 * CFL_BUF_LINE, avg_16x8);
+
+ if (width > 8) {
+ vldsubstq_s16(dst, src, 8, avg_16x8);
+ vldsubstq_s16(dst, src, 8 + CFL_BUF_LINE, avg_16x8);
+ vldsubstq_s16(dst, src, 8 + 2 * CFL_BUF_LINE, avg_16x8);
+ vldsubstq_s16(dst, src, 8 + 3 * CFL_BUF_LINE, avg_16x8);
+ }
+ if (width == 32) {
+ vldsubstq_s16(dst, src, 16, avg_16x8);
+ vldsubstq_s16(dst, src, 16 + CFL_BUF_LINE, avg_16x8);
+ vldsubstq_s16(dst, src, 16 + 2 * CFL_BUF_LINE, avg_16x8);
+ vldsubstq_s16(dst, src, 16 + 3 * CFL_BUF_LINE, avg_16x8);
+ vldsubstq_s16(dst, src, 24, avg_16x8);
+ vldsubstq_s16(dst, src, 24 + CFL_BUF_LINE, avg_16x8);
+ vldsubstq_s16(dst, src, 24 + 2 * CFL_BUF_LINE, avg_16x8);
+ vldsubstq_s16(dst, src, 24 + 3 * CFL_BUF_LINE, avg_16x8);
+ }
+ src += step;
+ dst += step;
+ } while (src < end);
+ }
+}
+
+CFL_SUB_AVG_FN(neon)
+
+// Saturating negate 16-bit integers in a when the corresponding signed 16-bit
+// integer in b is negative.
+// Notes:
+// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
+// practice, as scaled_luma is the multiplication of two absolute values.
+// * In the Intel equivalent, elements in a are zeroed out when the
+// corresponding elements in b are zero. Because vsign is used twice in a
+// row, with b in the first call becoming a in the second call, there's no
+// impact from not zeroing out.
+static int16x4_t vsign_s16(int16x4_t a, int16x4_t b) {
+ const int16x4_t mask = vshr_n_s16(b, 15);
+ return veor_s16(vadd_s16(a, mask), mask);
+}
+
+// Saturating negate 16-bit integers in a when the corresponding signed 16-bit
+// integer in b is negative.
+// Notes:
+// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
+// practice, as scaled_luma is the multiplication of two absolute values.
+// * In the Intel equivalent, elements in a are zeroed out when the
+// corresponding elements in b are zero. Because vsignq is used twice in a
+// row, with b in the first call becoming a in the second call, there's no
+// impact from not zeroing out.
+static int16x8_t vsignq_s16(int16x8_t a, int16x8_t b) {
+ const int16x8_t mask = vshrq_n_s16(b, 15);
+ return veorq_s16(vaddq_s16(a, mask), mask);
+}
+
+static INLINE int16x4_t predict_w4(const int16_t *pred_buf_q3,
+ int16x4_t alpha_sign, int abs_alpha_q12,
+ int16x4_t dc) {
+ const int16x4_t ac_q3 = vld1_s16(pred_buf_q3);
+ const int16x4_t ac_sign = veor_s16(alpha_sign, ac_q3);
+ int16x4_t scaled_luma = vqrdmulh_n_s16(vabs_s16(ac_q3), abs_alpha_q12);
+ return vadd_s16(vsign_s16(scaled_luma, ac_sign), dc);
+}
+
+static INLINE int16x8_t predict_w8(const int16_t *pred_buf_q3,
+ int16x8_t alpha_sign, int abs_alpha_q12,
+ int16x8_t dc) {
+ const int16x8_t ac_q3 = vld1q_s16(pred_buf_q3);
+ const int16x8_t ac_sign = veorq_s16(alpha_sign, ac_q3);
+ int16x8_t scaled_luma = vqrdmulhq_n_s16(vabsq_s16(ac_q3), abs_alpha_q12);
+ return vaddq_s16(vsignq_s16(scaled_luma, ac_sign), dc);
+}
+
+static INLINE int16x8x2_t predict_w16(const int16_t *pred_buf_q3,
+ int16x8_t alpha_sign, int abs_alpha_q12,
+ int16x8_t dc) {
+ // vld2q_s16 interleaves, which is not useful for prediction. vst1q_s16_x2
+ // does not interleave, but is not currently available in the compilier used
+ // by the AOM build system.
+ const int16x8x2_t ac_q3 = vld2q_s16(pred_buf_q3);
+ const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
+ const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
+ const int16x8_t scaled_luma_0 =
+ vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
+ const int16x8_t scaled_luma_1 =
+ vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
+ int16x8x2_t result;
+ result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
+ result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
+ return result;
+}
+
+static INLINE int16x8x4_t predict_w32(const int16_t *pred_buf_q3,
+ int16x8_t alpha_sign, int abs_alpha_q12,
+ int16x8_t dc) {
+ // vld4q_s16 interleaves, which is not useful for prediction. vst1q_s16_x4
+ // does not interleave, but is not currently available in the compilier used
+ // by the AOM build system.
+ const int16x8x4_t ac_q3 = vld4q_s16(pred_buf_q3);
+ const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
+ const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
+ const int16x8_t ac_sign_2 = veorq_s16(alpha_sign, ac_q3.val[2]);
+ const int16x8_t ac_sign_3 = veorq_s16(alpha_sign, ac_q3.val[3]);
+ const int16x8_t scaled_luma_0 =
+ vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
+ const int16x8_t scaled_luma_1 =
+ vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
+ const int16x8_t scaled_luma_2 =
+ vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[2]), abs_alpha_q12);
+ const int16x8_t scaled_luma_3 =
+ vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[3]), abs_alpha_q12);
+ int16x8x4_t result;
+ result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
+ result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
+ result.val[2] = vaddq_s16(vsignq_s16(scaled_luma_2, ac_sign_2), dc);
+ result.val[3] = vaddq_s16(vsignq_s16(scaled_luma_3, ac_sign_3), dc);
+ return result;
+}
+
+static INLINE void cfl_predict_lbd_neon(const int16_t *pred_buf_q3,
+ uint8_t *dst, int dst_stride,
+ int alpha_q3, int width, int height) {
+ const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
+ const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
+ if (width == 4) {
+ const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
+ const int16x4_t dc = vdup_n_s16(*dst);
+ do {
+ const int16x4_t pred =
+ predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
+ vsth_u8(dst, vqmovun_s16(vcombine_s16(pred, pred)));
+ dst += dst_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+ } else {
+ const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
+ const int16x8_t dc = vdupq_n_s16(*dst);
+ do {
+ if (width == 8) {
+ vst1_u8(dst, vqmovun_s16(predict_w8(pred_buf_q3, alpha_sign,
+ abs_alpha_q12, dc)));
+ } else if (width == 16) {
+ const int16x8x2_t pred =
+ predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
+ const uint8x8x2_t predun = { { vqmovun_s16(pred.val[0]),
+ vqmovun_s16(pred.val[1]) } };
+ vst2_u8(dst, predun);
+ } else {
+ const int16x8x4_t pred =
+ predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
+ const uint8x8x4_t predun = {
+ { vqmovun_s16(pred.val[0]), vqmovun_s16(pred.val[1]),
+ vqmovun_s16(pred.val[2]), vqmovun_s16(pred.val[3]) }
+ };
+ vst4_u8(dst, predun);
+ }
+ dst += dst_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+ }
+}
+
+CFL_PREDICT_FN(neon, lbd)
+
+static INLINE uint16x4_t clamp_s16(int16x4_t a, int16x4_t max) {
+ return vreinterpret_u16_s16(vmax_s16(vmin_s16(a, max), vdup_n_s16(0)));
+}
+
+static INLINE uint16x8_t clampq_s16(int16x8_t a, int16x8_t max) {
+ return vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(a, max), vdupq_n_s16(0)));
+}
+
+static INLINE uint16x8x2_t clamp2q_s16(int16x8x2_t a, int16x8_t max) {
+ uint16x8x2_t result;
+ result.val[0] = vreinterpretq_u16_s16(
+ vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
+ result.val[1] = vreinterpretq_u16_s16(
+ vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
+ return result;
+}
+
+static INLINE uint16x8x4_t clamp4q_s16(int16x8x4_t a, int16x8_t max) {
+ uint16x8x4_t result;
+ result.val[0] = vreinterpretq_u16_s16(
+ vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
+ result.val[1] = vreinterpretq_u16_s16(
+ vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
+ result.val[2] = vreinterpretq_u16_s16(
+ vmaxq_s16(vminq_s16(a.val[2], max), vdupq_n_s16(0)));
+ result.val[3] = vreinterpretq_u16_s16(
+ vmaxq_s16(vminq_s16(a.val[3], max), vdupq_n_s16(0)));
+ return result;
+}
+
+static INLINE void cfl_predict_hbd_neon(const int16_t *pred_buf_q3,
+ uint16_t *dst, int dst_stride,
+ int alpha_q3, int bd, int width,
+ int height) {
+ const int max = (1 << bd) - 1;
+ const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
+ const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
+ if (width == 4) {
+ const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
+ const int16x4_t dc = vdup_n_s16(*dst);
+ const int16x4_t max_16x4 = vdup_n_s16(max);
+ do {
+ const int16x4_t scaled_luma =
+ predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
+ vst1_u16(dst, clamp_s16(scaled_luma, max_16x4));
+ dst += dst_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+ } else {
+ const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
+ const int16x8_t dc = vdupq_n_s16(*dst);
+ const int16x8_t max_16x8 = vdupq_n_s16(max);
+ do {
+ if (width == 8) {
+ const int16x8_t pred =
+ predict_w8(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
+ vst1q_u16(dst, clampq_s16(pred, max_16x8));
+ } else if (width == 16) {
+ const int16x8x2_t pred =
+ predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
+ vst2q_u16(dst, clamp2q_s16(pred, max_16x8));
+ } else {
+ const int16x8x4_t pred =
+ predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
+ vst4q_u16(dst, clamp4q_s16(pred, max_16x8));
+ }
+ dst += dst_stride;
+ } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
+ }
+}
+
+CFL_PREDICT_FN(neon, hbd)
diff --git a/third_party/aom/av1/common/arm/convolve_neon.c b/third_party/aom/av1/common/arm/convolve_neon.c
new file mode 100644
index 0000000000..d0c4f8ff67
--- /dev/null
+++ b/third_party/aom/av1/common/arm/convolve_neon.c
@@ -0,0 +1,1455 @@
+/*
+ *
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_ports/mem.h"
+#include "av1/common/convolve.h"
+#include "av1/common/filter.h"
+#include "av1/common/arm/convolve_neon.h"
+#include "av1/common/arm/mem_neon.h"
+#include "av1/common/arm/transpose_neon.h"
+
+static INLINE int16x4_t convolve8_4x4(const int16x4_t s0, const int16x4_t s1,
+ const int16x4_t s2, const int16x4_t s3,
+ const int16x4_t s4, const int16x4_t s5,
+ const int16x4_t s6, const int16x4_t s7,
+ const int16_t *filter) {
+ int16x4_t sum;
+
+ sum = vmul_n_s16(s0, filter[0]);
+ sum = vmla_n_s16(sum, s1, filter[1]);
+ sum = vmla_n_s16(sum, s2, filter[2]);
+ sum = vmla_n_s16(sum, s5, filter[5]);
+ sum = vmla_n_s16(sum, s6, filter[6]);
+ sum = vmla_n_s16(sum, s7, filter[7]);
+ /* filter[3] can take a max value of 128. So the max value of the result :
+ * 128*255 + sum > 16 bits
+ */
+ sum = vqadd_s16(sum, vmul_n_s16(s3, filter[3]));
+ sum = vqadd_s16(sum, vmul_n_s16(s4, filter[4]));
+
+ return sum;
+}
+
+static INLINE uint8x8_t convolve8_horiz_8x8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
+ const int16x8_t s6, const int16x8_t s7, const int16_t *filter,
+ const int16x8_t shift_round_0, const int16x8_t shift_by_bits) {
+ int16x8_t sum;
+
+ sum = vmulq_n_s16(s0, filter[0]);
+ sum = vmlaq_n_s16(sum, s1, filter[1]);
+ sum = vmlaq_n_s16(sum, s2, filter[2]);
+ sum = vmlaq_n_s16(sum, s5, filter[5]);
+ sum = vmlaq_n_s16(sum, s6, filter[6]);
+ sum = vmlaq_n_s16(sum, s7, filter[7]);
+ /* filter[3] can take a max value of 128. So the max value of the result :
+ * 128*255 + sum > 16 bits
+ */
+ sum = vqaddq_s16(sum, vmulq_n_s16(s3, filter[3]));
+ sum = vqaddq_s16(sum, vmulq_n_s16(s4, filter[4]));
+
+ sum = vqrshlq_s16(sum, shift_round_0);
+ sum = vqrshlq_s16(sum, shift_by_bits);
+
+ return vqmovun_s16(sum);
+}
+
+#if !defined(__aarch64__)
+static INLINE uint8x8_t convolve8_horiz_4x1(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
+ const int16x4_t s6, const int16x4_t s7, const int16_t *filter,
+ const int16x4_t shift_round_0, const int16x4_t shift_by_bits) {
+ int16x4_t sum;
+
+ sum = vmul_n_s16(s0, filter[0]);
+ sum = vmla_n_s16(sum, s1, filter[1]);
+ sum = vmla_n_s16(sum, s2, filter[2]);
+ sum = vmla_n_s16(sum, s5, filter[5]);
+ sum = vmla_n_s16(sum, s6, filter[6]);
+ sum = vmla_n_s16(sum, s7, filter[7]);
+ /* filter[3] can take a max value of 128. So the max value of the result :
+ * 128*255 + sum > 16 bits
+ */
+ sum = vqadd_s16(sum, vmul_n_s16(s3, filter[3]));
+ sum = vqadd_s16(sum, vmul_n_s16(s4, filter[4]));
+
+ sum = vqrshl_s16(sum, shift_round_0);
+ sum = vqrshl_s16(sum, shift_by_bits);
+
+ return vqmovun_s16(vcombine_s16(sum, sum));
+}
+#endif // !defined(__arch64__)
+
+static INLINE uint8x8_t convolve8_vert_8x4(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
+ const int16x8_t s6, const int16x8_t s7, const int16_t *filter) {
+ int16x8_t sum;
+
+ sum = vmulq_n_s16(s0, filter[0]);
+ sum = vmlaq_n_s16(sum, s1, filter[1]);
+ sum = vmlaq_n_s16(sum, s2, filter[2]);
+ sum = vmlaq_n_s16(sum, s5, filter[5]);
+ sum = vmlaq_n_s16(sum, s6, filter[6]);
+ sum = vmlaq_n_s16(sum, s7, filter[7]);
+ /* filter[3] can take a max value of 128. So the max value of the result :
+ * 128*255 + sum > 16 bits
+ */
+ sum = vqaddq_s16(sum, vmulq_n_s16(s3, filter[3]));
+ sum = vqaddq_s16(sum, vmulq_n_s16(s4, filter[4]));
+
+ return vqrshrun_n_s16(sum, FILTER_BITS);
+}
+
+static INLINE uint16x4_t convolve8_vert_4x4_s32(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
+ const int16x4_t s6, const int16x4_t s7, const int16_t *y_filter,
+ const int32x4_t round_shift_vec, const int32x4_t offset_const,
+ const int32x4_t sub_const_vec) {
+ int32x4_t sum0;
+ uint16x4_t res;
+ const int32x4_t zero = vdupq_n_s32(0);
+
+ sum0 = vmull_n_s16(s0, y_filter[0]);
+ sum0 = vmlal_n_s16(sum0, s1, y_filter[1]);
+ sum0 = vmlal_n_s16(sum0, s2, y_filter[2]);
+ sum0 = vmlal_n_s16(sum0, s3, y_filter[3]);
+ sum0 = vmlal_n_s16(sum0, s4, y_filter[4]);
+ sum0 = vmlal_n_s16(sum0, s5, y_filter[5]);
+ sum0 = vmlal_n_s16(sum0, s6, y_filter[6]);
+ sum0 = vmlal_n_s16(sum0, s7, y_filter[7]);
+
+ sum0 = vaddq_s32(sum0, offset_const);
+ sum0 = vqrshlq_s32(sum0, round_shift_vec);
+ sum0 = vsubq_s32(sum0, sub_const_vec);
+ sum0 = vmaxq_s32(sum0, zero);
+
+ res = vmovn_u32(vreinterpretq_u32_s32(sum0));
+
+ return res;
+}
+
+static INLINE uint8x8_t convolve8_vert_8x4_s32(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
+ const int16x8_t s6, const int16x8_t s7, const int16_t *y_filter,
+ const int32x4_t round_shift_vec, const int32x4_t offset_const,
+ const int32x4_t sub_const_vec, const int16x8_t vec_round_bits) {
+ int32x4_t sum0, sum1;
+ uint16x8_t res;
+ const int32x4_t zero = vdupq_n_s32(0);
+
+ sum0 = vmull_n_s16(vget_low_s16(s0), y_filter[0]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(s1), y_filter[1]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(s2), y_filter[2]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(s3), y_filter[3]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(s4), y_filter[4]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(s5), y_filter[5]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(s6), y_filter[6]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(s7), y_filter[7]);
+
+ sum1 = vmull_n_s16(vget_high_s16(s0), y_filter[0]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(s1), y_filter[1]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(s2), y_filter[2]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(s3), y_filter[3]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(s4), y_filter[4]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(s5), y_filter[5]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(s6), y_filter[6]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(s7), y_filter[7]);
+
+ sum0 = vaddq_s32(sum0, offset_const);
+ sum1 = vaddq_s32(sum1, offset_const);
+ sum0 = vqrshlq_s32(sum0, round_shift_vec);
+ sum1 = vqrshlq_s32(sum1, round_shift_vec);
+ sum0 = vsubq_s32(sum0, sub_const_vec);
+ sum1 = vsubq_s32(sum1, sub_const_vec);
+ sum0 = vmaxq_s32(sum0, zero);
+ sum1 = vmaxq_s32(sum1, zero);
+ res = vcombine_u16(vqmovn_u32(vreinterpretq_u32_s32(sum0)),
+ vqmovn_u32(vreinterpretq_u32_s32(sum1)));
+
+ res = vqrshlq_u16(res, vec_round_bits);
+
+ return vqmovn_u16(res);
+}
+
+void av1_convolve_x_sr_neon(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_q4, const int subpel_y_q4,
+ ConvolveParams *conv_params) {
+ const uint8_t horiz_offset = filter_params_x->taps / 2 - 1;
+ const int8_t bits = FILTER_BITS - conv_params->round_0;
+
+ (void)subpel_y_q4;
+ (void)conv_params;
+ (void)filter_params_y;
+
+ uint8x8_t t0;
+#if defined(__aarch64__)
+ uint8x8_t t1, t2, t3;
+#endif
+
+ assert(bits >= 0);
+ assert((FILTER_BITS - conv_params->round_1) >= 0 ||
+ ((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS));
+
+ const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_q4 & SUBPEL_MASK);
+
+ const int16x8_t shift_round_0 = vdupq_n_s16(-conv_params->round_0);
+ const int16x8_t shift_by_bits = vdupq_n_s16(-bits);
+
+ src -= horiz_offset;
+#if defined(__aarch64__)
+ if (h == 4) {
+ uint8x8_t d01, d23;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
+ int16x8_t d01_temp, d23_temp;
+
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+
+ load_u8_8x4(src, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+
+ s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+ s1 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
+ s2 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2)));
+ s3 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3)));
+ s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+ s5 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
+ s6 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t2)));
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ src += 7;
+
+ do {
+ load_u8_8x4(src, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+
+ s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+ s8 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
+ s9 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2)));
+ s10 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3)));
+
+ d0 = convolve8_4x4(s0, s1, s2, s3, s4, s5, s6, s7, x_filter);
+
+ d1 = convolve8_4x4(s1, s2, s3, s4, s5, s6, s7, s8, x_filter);
+
+ d2 = convolve8_4x4(s2, s3, s4, s5, s6, s7, s8, s9, x_filter);
+
+ d3 = convolve8_4x4(s3, s4, s5, s6, s7, s8, s9, s10, x_filter);
+
+ d01_temp = vqrshlq_s16(vcombine_s16(d0, d1), shift_round_0);
+ d23_temp = vqrshlq_s16(vcombine_s16(d2, d3), shift_round_0);
+
+ d01_temp = vqrshlq_s16(d01_temp, shift_by_bits);
+ d23_temp = vqrshlq_s16(d23_temp, shift_by_bits);
+
+ d01 = vqmovun_s16(d01_temp);
+ d23 = vqmovun_s16(d23_temp);
+
+ transpose_u8_4x4(&d01, &d23);
+
+ if (w != 2) {
+ vst1_lane_u32((uint32_t *)(dst + 0 * dst_stride), // 00 01 02 03
+ vreinterpret_u32_u8(d01), 0);
+ vst1_lane_u32((uint32_t *)(dst + 1 * dst_stride), // 10 11 12 13
+ vreinterpret_u32_u8(d23), 0);
+ vst1_lane_u32((uint32_t *)(dst + 2 * dst_stride), // 20 21 22 23
+ vreinterpret_u32_u8(d01), 1);
+ vst1_lane_u32((uint32_t *)(dst + 3 * dst_stride), // 30 31 32 33
+ vreinterpret_u32_u8(d23), 1);
+ } else {
+ vst1_lane_u16((uint16_t *)(dst + 0 * dst_stride), // 00 01
+ vreinterpret_u16_u8(d01), 0);
+ vst1_lane_u16((uint16_t *)(dst + 1 * dst_stride), // 10 11
+ vreinterpret_u16_u8(d23), 0);
+ vst1_lane_u16((uint16_t *)(dst + 2 * dst_stride), // 20 21
+ vreinterpret_u16_u8(d01), 2);
+ vst1_lane_u16((uint16_t *)(dst + 3 * dst_stride), // 30 31
+ vreinterpret_u16_u8(d23), 2);
+ }
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ src += 4;
+ dst += 4;
+ w -= 4;
+ } while (w > 0);
+ } else {
+#endif
+ int width;
+ const uint8_t *s;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+#if defined(__aarch64__)
+ int16x8_t s8, s9, s10;
+ uint8x8_t t4, t5, t6, t7;
+#endif
+
+ if (w <= 4) {
+#if defined(__aarch64__)
+ do {
+ load_u8_8x8(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ load_u8_8x8(src + 7, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6,
+ &t7);
+ src += 8 * src_stride;
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(dst + 4 * dst_stride);
+ __builtin_prefetch(dst + 5 * dst_stride);
+ __builtin_prefetch(dst + 6 * dst_stride);
+ __builtin_prefetch(dst + 7 * dst_stride);
+
+ transpose_u8_4x8(&t0, &t1, &t2, &t3, t4, t5, t6, t7);
+
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ __builtin_prefetch(src + 7 * src_stride);
+ t0 = convolve8_horiz_8x8(s0, s1, s2, s3, s4, s5, s6, s7, x_filter,
+ shift_round_0, shift_by_bits);
+ t1 = convolve8_horiz_8x8(s1, s2, s3, s4, s5, s6, s7, s8, x_filter,
+ shift_round_0, shift_by_bits);
+ t2 = convolve8_horiz_8x8(s2, s3, s4, s5, s6, s7, s8, s9, x_filter,
+ shift_round_0, shift_by_bits);
+ t3 = convolve8_horiz_8x8(s3, s4, s5, s6, s7, s8, s9, s10, x_filter,
+ shift_round_0, shift_by_bits);
+
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+
+ if ((w == 4) && (h > 4)) {
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0),
+ 0); // 00 01 02 03
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t1),
+ 0); // 10 11 12 13
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t2),
+ 0); // 20 21 22 23
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t3),
+ 0); // 30 31 32 33
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0),
+ 1); // 40 41 42 43
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t1),
+ 1); // 50 51 52 53
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t2),
+ 1); // 60 61 62 63
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t3),
+ 1); // 70 71 72 73
+ dst += dst_stride;
+ } else if ((w == 4) && (h == 2)) {
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0),
+ 0); // 00 01 02 03
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t1),
+ 0); // 10 11 12 13
+ dst += dst_stride;
+ } else if ((w == 2) && (h > 4)) {
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t0), 0); // 00 01
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t1), 0); // 10 11
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t2), 0); // 20 21
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t3), 0); // 30 31
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t0), 2); // 40 41
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t1), 2); // 50 51
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t2), 2); // 60 61
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t3), 2); // 70 71
+ dst += dst_stride;
+ } else if ((w == 2) && (h == 2)) {
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t0), 0); // 00 01
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t1), 0); // 10 11
+ dst += dst_stride;
+ }
+ h -= 8;
+ } while (h > 0);
+#else
+ int16x8_t tt0;
+ int16x4_t x0, x1, x2, x3, x4, x5, x6, x7;
+ const int16x4_t shift_round_0_low = vget_low_s16(shift_round_0);
+ const int16x4_t shift_by_bits_low = vget_low_s16(shift_by_bits);
+ do {
+ t0 = vld1_u8(src); // a0 a1 a2 a3 a4 a5 a6 a7
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ x0 = vget_low_s16(tt0); // a0 a1 a2 a3
+ x4 = vget_high_s16(tt0); // a4 a5 a6 a7
+
+ t0 = vld1_u8(src + 8); // a8 a9 a10 a11 a12 a13 a14 a15
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ x7 = vget_low_s16(tt0); // a8 a9 a10 a11
+
+ x1 = vext_s16(x0, x4, 1); // a1 a2 a3 a4
+ x2 = vext_s16(x0, x4, 2); // a2 a3 a4 a5
+ x3 = vext_s16(x0, x4, 3); // a3 a4 a5 a6
+ x5 = vext_s16(x4, x7, 1); // a5 a6 a7 a8
+ x6 = vext_s16(x4, x7, 2); // a6 a7 a8 a9
+ x7 = vext_s16(x4, x7, 3); // a7 a8 a9 a10
+
+ src += src_stride;
+
+ t0 = convolve8_horiz_4x1(x0, x1, x2, x3, x4, x5, x6, x7, x_filter,
+ shift_round_0_low, shift_by_bits_low);
+
+ if (w == 4) {
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0),
+ 0); // 00 01 02 03
+ dst += dst_stride;
+ } else if (w == 2) {
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(t0), 0); // 00 01
+ dst += dst_stride;
+ }
+ h -= 1;
+ } while (h > 0);
+#endif
+ } else {
+ uint8_t *d;
+ int16x8_t s11;
+#if defined(__aarch64__)
+ int16x8_t s12, s13, s14;
+ do {
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ __builtin_prefetch(src + 7 * src_stride);
+ load_u8_8x8(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ width = w;
+ s = src + 7;
+ d = dst;
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(dst + 4 * dst_stride);
+ __builtin_prefetch(dst + 5 * dst_stride);
+ __builtin_prefetch(dst + 6 * dst_stride);
+ __builtin_prefetch(dst + 7 * dst_stride);
+
+ do {
+ load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
+ s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
+
+ t0 = convolve8_horiz_8x8(s0, s1, s2, s3, s4, s5, s6, s7, x_filter,
+ shift_round_0, shift_by_bits);
+
+ t1 = convolve8_horiz_8x8(s1, s2, s3, s4, s5, s6, s7, s8, x_filter,
+ shift_round_0, shift_by_bits);
+
+ t2 = convolve8_horiz_8x8(s2, s3, s4, s5, s6, s7, s8, s9, x_filter,
+ shift_round_0, shift_by_bits);
+
+ t3 = convolve8_horiz_8x8(s3, s4, s5, s6, s7, s8, s9, s10, x_filter,
+ shift_round_0, shift_by_bits);
+
+ t4 = convolve8_horiz_8x8(s4, s5, s6, s7, s8, s9, s10, s11, x_filter,
+ shift_round_0, shift_by_bits);
+
+ t5 = convolve8_horiz_8x8(s5, s6, s7, s8, s9, s10, s11, s12, x_filter,
+ shift_round_0, shift_by_bits);
+
+ t6 = convolve8_horiz_8x8(s6, s7, s8, s9, s10, s11, s12, s13, x_filter,
+ shift_round_0, shift_by_bits);
+
+ t7 = convolve8_horiz_8x8(s7, s8, s9, s10, s11, s12, s13, s14,
+ x_filter, shift_round_0, shift_by_bits);
+
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ if (h != 2) {
+ store_u8_8x8(d, dst_stride, t0, t1, t2, t3, t4, t5, t6, t7);
+ } else {
+ store_row2_u8_8x8(d, dst_stride, t0, t1);
+ }
+ s0 = s8;
+ s1 = s9;
+ s2 = s10;
+ s3 = s11;
+ s4 = s12;
+ s5 = s13;
+ s6 = s14;
+ s += 8;
+ d += 8;
+ width -= 8;
+ } while (width > 0);
+ src += 8 * src_stride;
+ dst += 8 * dst_stride;
+ h -= 8;
+ } while (h > 0);
+#else
+ do {
+ t0 = vld1_u8(src); // a0 a1 a2 a3 a4 a5 a6 a7
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+
+ width = w;
+ s = src + 8;
+ d = dst;
+ __builtin_prefetch(dst);
+
+ do {
+ t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s11 = s0;
+ s0 = s7;
+
+ s1 = vextq_s16(s11, s7, 1); // a1 a2 a3 a4 a5 a6 a7 a8
+ s2 = vextq_s16(s11, s7, 2); // a2 a3 a4 a5 a6 a7 a8 a9
+ s3 = vextq_s16(s11, s7, 3); // a3 a4 a5 a6 a7 a8 a9 a10
+ s4 = vextq_s16(s11, s7, 4); // a4 a5 a6 a7 a8 a9 a10 a11
+ s5 = vextq_s16(s11, s7, 5); // a5 a6 a7 a8 a9 a10 a11 a12
+ s6 = vextq_s16(s11, s7, 6); // a6 a7 a8 a9 a10 a11 a12 a13
+ s7 = vextq_s16(s11, s7, 7); // a7 a8 a9 a10 a11 a12 a13 a14
+
+ t0 = convolve8_horiz_8x8(s11, s1, s2, s3, s4, s5, s6, s7, x_filter,
+ shift_round_0, shift_by_bits);
+ vst1_u8(d, t0);
+
+ s += 8;
+ d += 8;
+ width -= 8;
+ } while (width > 0);
+ src += src_stride;
+ dst += dst_stride;
+ h -= 1;
+ } while (h > 0);
+#endif
+ }
+#if defined(__aarch64__)
+ }
+#endif
+}
+
+void av1_convolve_y_sr_neon(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_q4, const int subpel_y_q4,
+ ConvolveParams *conv_params) {
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+
+ src -= vert_offset * src_stride;
+
+ (void)filter_params_x;
+ (void)subpel_x_q4;
+ (void)conv_params;
+
+ assert(conv_params->round_0 <= FILTER_BITS);
+ assert(((conv_params->round_0 + conv_params->round_1) <= (FILTER_BITS + 1)) ||
+ ((conv_params->round_0 + conv_params->round_1) == (2 * FILTER_BITS)));
+
+ const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_q4 & SUBPEL_MASK);
+
+ if (w <= 4) {
+ uint8x8_t d01;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, d0;
+#if defined(__aarch64__)
+ uint8x8_t d23;
+ int16x4_t s8, s9, s10, d1, d2, d3;
+#endif
+ s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+
+ do {
+ s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+#if defined(__aarch64__)
+ s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s10 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ d0 = convolve8_4x4(s0, s1, s2, s3, s4, s5, s6, s7, y_filter);
+ d1 = convolve8_4x4(s1, s2, s3, s4, s5, s6, s7, s8, y_filter);
+ d2 = convolve8_4x4(s2, s3, s4, s5, s6, s7, s8, s9, y_filter);
+ d3 = convolve8_4x4(s3, s4, s5, s6, s7, s8, s9, s10, y_filter);
+
+ d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS);
+ d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS);
+ if ((w == 4) && (h != 2)) {
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01),
+ 0); // 00 01 02 03
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01),
+ 1); // 10 11 12 13
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23),
+ 0); // 20 21 22 23
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23),
+ 1); // 30 31 32 33
+ dst += dst_stride;
+ } else if ((w == 4) && (h == 2)) {
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01),
+ 0); // 00 01 02 03
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01),
+ 1); // 10 11 12 13
+ dst += dst_stride;
+ } else if ((w == 2) && (h != 2)) {
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(d01), 0); // 00 01
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(d01), 2); // 10 11
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(d23), 0); // 20 21
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(d23), 2); // 30 31
+ dst += dst_stride;
+ } else if ((w == 2) && (h == 2)) {
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(d01), 0); // 00 01
+ dst += dst_stride;
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(d01), 2); // 10 11
+ dst += dst_stride;
+ }
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ h -= 4;
+#else
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(src + 0 * src_stride);
+
+ d0 = convolve8_4x4(s0, s1, s2, s3, s4, s5, s6, s7, y_filter);
+
+ d01 = vqrshrun_n_s16(vcombine_s16(d0, d0), FILTER_BITS);
+
+ if (w == 4) {
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 0);
+ dst += dst_stride;
+ } else if (w == 2) {
+ vst1_lane_u16((uint16_t *)dst, vreinterpret_u16_u8(d01), 0);
+ dst += dst_stride;
+ }
+ s0 = s1;
+ s1 = s2;
+ s2 = s3;
+ s3 = s4;
+ s4 = s5;
+ s5 = s6;
+ s6 = s7;
+ h -= 1;
+#endif
+ } while (h > 0);
+ } else {
+ int height;
+ const uint8_t *s;
+ uint8_t *d;
+ uint8x8_t t0;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+#if defined(__aarch64__)
+ uint8x8_t t1, t2, t3;
+ int16x8_t s8, s9, s10;
+#endif
+ do {
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ s = src;
+ s0 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s1 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s2 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s3 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s4 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s5 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s6 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ d = dst;
+ height = h;
+
+ do {
+ s7 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+#if defined(__aarch64__)
+ s8 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s9 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s10 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+
+ __builtin_prefetch(d + 0 * dst_stride);
+ __builtin_prefetch(d + 1 * dst_stride);
+ __builtin_prefetch(d + 2 * dst_stride);
+ __builtin_prefetch(d + 3 * dst_stride);
+ __builtin_prefetch(s + 0 * src_stride);
+ __builtin_prefetch(s + 1 * src_stride);
+ __builtin_prefetch(s + 2 * src_stride);
+ __builtin_prefetch(s + 3 * src_stride);
+ t0 = convolve8_vert_8x4(s0, s1, s2, s3, s4, s5, s6, s7, y_filter);
+ t1 = convolve8_vert_8x4(s1, s2, s3, s4, s5, s6, s7, s8, y_filter);
+ t2 = convolve8_vert_8x4(s2, s3, s4, s5, s6, s7, s8, s9, y_filter);
+ t3 = convolve8_vert_8x4(s3, s4, s5, s6, s7, s8, s9, s10, y_filter);
+ if (h != 2) {
+ vst1_u8(d, t0);
+ d += dst_stride;
+ vst1_u8(d, t1);
+ d += dst_stride;
+ vst1_u8(d, t2);
+ d += dst_stride;
+ vst1_u8(d, t3);
+ d += dst_stride;
+ } else {
+ vst1_u8(d, t0);
+ d += dst_stride;
+ vst1_u8(d, t1);
+ d += dst_stride;
+ }
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ height -= 4;
+#else
+ __builtin_prefetch(d);
+ __builtin_prefetch(s);
+
+ t0 = convolve8_vert_8x4(s0, s1, s2, s3, s4, s5, s6, s7, y_filter);
+
+ vst1_u8(d, t0);
+ d += dst_stride;
+
+ s0 = s1;
+ s1 = s2;
+ s2 = s3;
+ s3 = s4;
+ s4 = s5;
+ s5 = s6;
+ s6 = s7;
+ height -= 1;
+#endif
+ } while (height > 0);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ } while (w > 0);
+ }
+}
+
+void av1_convolve_2d_sr_neon(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_q4, const int subpel_y_q4,
+ ConvolveParams *conv_params) {
+ int im_dst_stride;
+ int width, height;
+ uint8x8_t t0;
+#if defined(__aarch64__)
+ uint8x8_t t1, t2, t3, t4, t5, t6, t7;
+#endif
+
+ DECLARE_ALIGNED(16, int16_t,
+ im_block[(MAX_SB_SIZE + HORIZ_EXTRA_ROWS) * MAX_SB_SIZE]);
+
+ const int bd = 8;
+ const int im_h = h + filter_params_y->taps - 1;
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+
+ const uint8_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
+ const uint8_t *s;
+ int16_t *dst_ptr;
+
+ dst_ptr = im_block;
+ im_dst_stride = im_stride;
+ height = im_h;
+ width = w;
+
+ const int16_t round_bits =
+ FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1;
+ const int16x8_t vec_round_bits = vdupq_n_s16(-round_bits);
+ const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_q4 & SUBPEL_MASK);
+
+ int16_t x_filter_tmp[8];
+ int16x8_t filter_x_coef = vld1q_s16(x_filter);
+
+ // filter coeffs are even, so downshifting by 1 to reduce intermediate
+ // precision requirements.
+ filter_x_coef = vshrq_n_s16(filter_x_coef, 1);
+ vst1q_s16(&x_filter_tmp[0], filter_x_coef);
+
+ assert(conv_params->round_0 > 0);
+
+ if (w <= 4) {
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, d0;
+#if defined(__aarch64__)
+ int16x4_t s8, s9, s10, d1, d2, d3;
+#endif
+
+ const int16x4_t horiz_const = vdup_n_s16((1 << (bd + FILTER_BITS - 2)));
+ const int16x4_t shift_round_0 = vdup_n_s16(-(conv_params->round_0 - 1));
+
+ do {
+ s = src_ptr;
+
+#if defined(__aarch64__)
+ __builtin_prefetch(s + 0 * src_stride);
+ __builtin_prefetch(s + 1 * src_stride);
+ __builtin_prefetch(s + 2 * src_stride);
+ __builtin_prefetch(s + 3 * src_stride);
+
+ load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+
+ s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+ s1 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
+ s2 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2)));
+ s3 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3)));
+ s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+ s5 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
+ s6 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t2)));
+
+ __builtin_prefetch(dst_ptr + 0 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 1 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 2 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 3 * im_dst_stride);
+ s += 7;
+
+ load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+
+ s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+ s8 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
+ s9 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2)));
+ s10 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3)));
+
+ d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ horiz_const, shift_round_0);
+ d1 = convolve8_4x4_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
+ horiz_const, shift_round_0);
+ d2 = convolve8_4x4_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
+ horiz_const, shift_round_0);
+ d3 = convolve8_4x4_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
+ horiz_const, shift_round_0);
+
+ transpose_s16_4x4d(&d0, &d1, &d2, &d3);
+ if (w == 4) {
+ vst1_s16((dst_ptr + 0 * im_dst_stride), d0);
+ vst1_s16((dst_ptr + 1 * im_dst_stride), d1);
+ vst1_s16((dst_ptr + 2 * im_dst_stride), d2);
+ vst1_s16((dst_ptr + 3 * im_dst_stride), d3);
+ } else if (w == 2) {
+ vst1_lane_u32((uint32_t *)(dst_ptr + 0 * im_dst_stride),
+ vreinterpret_u32_s16(d0), 0);
+ vst1_lane_u32((uint32_t *)(dst_ptr + 1 * im_dst_stride),
+ vreinterpret_u32_s16(d1), 0);
+ vst1_lane_u32((uint32_t *)(dst_ptr + 2 * im_dst_stride),
+ vreinterpret_u32_s16(d2), 0);
+ vst1_lane_u32((uint32_t *)(dst_ptr + 3 * im_dst_stride),
+ vreinterpret_u32_s16(d3), 0);
+ }
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * im_dst_stride;
+ height -= 4;
+#else
+ int16x8_t tt0;
+
+ __builtin_prefetch(s);
+
+ t0 = vld1_u8(s); // a0 a1 a2 a3 a4 a5 a6 a7
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s0 = vget_low_s16(tt0);
+ s4 = vget_high_s16(tt0);
+
+ __builtin_prefetch(dst_ptr);
+ s += 8;
+
+ t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15
+ s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+
+ s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4
+ s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5
+ s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6
+ s5 = vext_s16(s4, s7, 1); // a5 a6 a7 a8
+ s6 = vext_s16(s4, s7, 2); // a6 a7 a8 a9
+ s7 = vext_s16(s4, s7, 3); // a7 a8 a9 a10
+
+ d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ horiz_const, shift_round_0);
+
+ if (w == 4) {
+ vst1_s16(dst_ptr, d0);
+ dst_ptr += im_dst_stride;
+ } else if (w == 2) {
+ vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_s16(d0), 0);
+ dst_ptr += im_dst_stride;
+ }
+
+ src_ptr += src_stride;
+ height -= 1;
+#endif
+ } while (height > 0);
+ } else {
+ int16_t *d_tmp;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, res0;
+#if defined(__aarch64__)
+ int16x8_t s8, s9, s10, res1, res2, res3, res4, res5, res6, res7;
+ int16x8_t s11, s12, s13, s14;
+#endif
+
+ const int16x8_t horiz_const = vdupq_n_s16((1 << (bd + FILTER_BITS - 2)));
+ const int16x8_t shift_round_0 = vdupq_n_s16(-(conv_params->round_0 - 1));
+
+#if defined(__aarch64__)
+ do {
+ __builtin_prefetch(src_ptr + 0 * src_stride);
+ __builtin_prefetch(src_ptr + 1 * src_stride);
+ __builtin_prefetch(src_ptr + 2 * src_stride);
+ __builtin_prefetch(src_ptr + 3 * src_stride);
+ __builtin_prefetch(src_ptr + 4 * src_stride);
+ __builtin_prefetch(src_ptr + 5 * src_stride);
+ __builtin_prefetch(src_ptr + 6 * src_stride);
+ __builtin_prefetch(src_ptr + 7 * src_stride);
+
+ load_u8_8x8(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ width = w;
+ s = src_ptr + 7;
+ d_tmp = dst_ptr;
+
+ __builtin_prefetch(dst_ptr + 0 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 1 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 2 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 3 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 4 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 5 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 6 * im_dst_stride);
+ __builtin_prefetch(dst_ptr + 7 * im_dst_stride);
+
+ do {
+ load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
+ s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
+
+ res0 = convolve8_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res1 = convolve8_8x8_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res2 = convolve8_8x8_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res3 = convolve8_8x8_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res4 = convolve8_8x8_s16(s4, s5, s6, s7, s8, s9, s10, s11, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res5 = convolve8_8x8_s16(s5, s6, s7, s8, s9, s10, s11, s12,
+ x_filter_tmp, horiz_const, shift_round_0);
+ res6 = convolve8_8x8_s16(s6, s7, s8, s9, s10, s11, s12, s13,
+ x_filter_tmp, horiz_const, shift_round_0);
+ res7 = convolve8_8x8_s16(s7, s8, s9, s10, s11, s12, s13, s14,
+ x_filter_tmp, horiz_const, shift_round_0);
+
+ transpose_s16_8x8(&res0, &res1, &res2, &res3, &res4, &res5, &res6,
+ &res7);
+
+ store_s16_8x8(d_tmp, im_dst_stride, res0, res1, res2, res3, res4, res5,
+ res6, res7);
+
+ s0 = s8;
+ s1 = s9;
+ s2 = s10;
+ s3 = s11;
+ s4 = s12;
+ s5 = s13;
+ s6 = s14;
+ s += 8;
+ d_tmp += 8;
+ width -= 8;
+ } while (width > 0);
+ src_ptr += 8 * src_stride;
+ dst_ptr += 8 * im_dst_stride;
+ height -= 8;
+ } while (height > 0);
+#else
+ do {
+ t0 = vld1_u8(src_ptr);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7
+
+ width = w;
+ s = src_ptr + 8;
+ d_tmp = dst_ptr;
+
+ __builtin_prefetch(dst_ptr);
+
+ do {
+ t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ int16x8_t sum = s0;
+ s0 = s7;
+
+ s1 = vextq_s16(sum, s7, 1); // a1 a2 a3 a4 a5 a6 a7 a8
+ s2 = vextq_s16(sum, s7, 2); // a2 a3 a4 a5 a6 a7 a8 a9
+ s3 = vextq_s16(sum, s7, 3); // a3 a4 a5 a6 a7 a8 a9 a10
+ s4 = vextq_s16(sum, s7, 4); // a4 a5 a6 a7 a8 a9 a10 a11
+ s5 = vextq_s16(sum, s7, 5); // a5 a6 a7 a8 a9 a10 a11 a12
+ s6 = vextq_s16(sum, s7, 6); // a6 a7 a8 a9 a10 a11 a12 a13
+ s7 = vextq_s16(sum, s7, 7); // a7 a8 a9 a10 a11 a12 a13 a14
+
+ res0 = convolve8_8x8_s16(sum, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ horiz_const, shift_round_0);
+
+ vst1q_s16(d_tmp, res0);
+
+ s += 8;
+ d_tmp += 8;
+ width -= 8;
+ } while (width > 0);
+ src_ptr += src_stride;
+ dst_ptr += im_dst_stride;
+ height -= 1;
+ } while (height > 0);
+#endif
+ }
+
+ // vertical
+ {
+ uint8_t *dst_u8_ptr, *d_u8;
+ int16_t *v_src_ptr, *v_s;
+
+ const int32_t sub_const = (1 << (offset_bits - conv_params->round_1)) +
+ (1 << (offset_bits - conv_params->round_1 - 1));
+ const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_q4 & SUBPEL_MASK);
+
+ const int32x4_t round_shift_vec = vdupq_n_s32(-(conv_params->round_1));
+ const int32x4_t offset_const = vdupq_n_s32(1 << offset_bits);
+ const int32x4_t sub_const_vec = vdupq_n_s32(sub_const);
+
+ src_stride = im_stride;
+ v_src_ptr = im_block;
+ dst_u8_ptr = dst;
+
+ height = h;
+ width = w;
+
+ if (width <= 4) {
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+ uint16x4_t d0;
+ uint16x8_t dd0;
+ uint8x8_t d01;
+
+#if defined(__aarch64__)
+ int16x4_t s8, s9, s10;
+ uint16x4_t d1, d2, d3;
+ uint16x8_t dd1;
+ uint8x8_t d23;
+#endif
+
+ d_u8 = dst_u8_ptr;
+ v_s = v_src_ptr;
+
+ __builtin_prefetch(v_s + 0 * im_stride);
+ __builtin_prefetch(v_s + 1 * im_stride);
+ __builtin_prefetch(v_s + 2 * im_stride);
+ __builtin_prefetch(v_s + 3 * im_stride);
+ __builtin_prefetch(v_s + 4 * im_stride);
+ __builtin_prefetch(v_s + 5 * im_stride);
+ __builtin_prefetch(v_s + 6 * im_stride);
+ __builtin_prefetch(v_s + 7 * im_stride);
+
+ load_s16_4x8(v_s, im_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+ v_s += (7 * im_stride);
+
+ do {
+#if defined(__aarch64__)
+ load_s16_4x4(v_s, im_stride, &s7, &s8, &s9, &s10);
+ v_s += (im_stride << 2);
+
+ __builtin_prefetch(d_u8 + 0 * dst_stride);
+ __builtin_prefetch(d_u8 + 1 * dst_stride);
+ __builtin_prefetch(d_u8 + 2 * dst_stride);
+ __builtin_prefetch(d_u8 + 3 * dst_stride);
+
+ d0 = convolve8_vert_4x4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter,
+ round_shift_vec, offset_const,
+ sub_const_vec);
+ d1 = convolve8_vert_4x4_s32(s1, s2, s3, s4, s5, s6, s7, s8, y_filter,
+ round_shift_vec, offset_const,
+ sub_const_vec);
+ d2 = convolve8_vert_4x4_s32(s2, s3, s4, s5, s6, s7, s8, s9, y_filter,
+ round_shift_vec, offset_const,
+ sub_const_vec);
+ d3 = convolve8_vert_4x4_s32(s3, s4, s5, s6, s7, s8, s9, s10, y_filter,
+ round_shift_vec, offset_const,
+ sub_const_vec);
+
+ dd0 = vqrshlq_u16(vcombine_u16(d0, d1), vec_round_bits);
+ dd1 = vqrshlq_u16(vcombine_u16(d2, d3), vec_round_bits);
+
+ d01 = vqmovn_u16(dd0);
+ d23 = vqmovn_u16(dd1);
+
+ if ((w == 4) && (h != 2)) {
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(d01),
+ 0); // 00 01 02 03
+ d_u8 += dst_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(d01),
+ 1); // 10 11 12 13
+ d_u8 += dst_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(d23),
+ 0); // 20 21 22 23
+ d_u8 += dst_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(d23),
+ 1); // 30 31 32 33
+ d_u8 += dst_stride;
+ } else if ((w == 2) && (h != 2)) {
+ vst1_lane_u16((uint16_t *)d_u8, vreinterpret_u16_u8(d01),
+ 0); // 00 01
+ d_u8 += dst_stride;
+ vst1_lane_u16((uint16_t *)d_u8, vreinterpret_u16_u8(d01),
+ 2); // 10 11
+ d_u8 += dst_stride;
+ vst1_lane_u16((uint16_t *)d_u8, vreinterpret_u16_u8(d23),
+ 0); // 20 21
+ d_u8 += dst_stride;
+ vst1_lane_u16((uint16_t *)d_u8, vreinterpret_u16_u8(d23),
+ 2); // 30 31
+ d_u8 += dst_stride;
+ } else if ((w == 4) && (h == 2)) {
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(d01),
+ 0); // 00 01 02 03
+ d_u8 += dst_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(d01),
+ 1); // 10 11 12 13
+ d_u8 += dst_stride;
+ } else if ((w == 2) && (h == 2)) {
+ vst1_lane_u16((uint16_t *)d_u8, vreinterpret_u16_u8(d01),
+ 0); // 00 01
+ d_u8 += dst_stride;
+ vst1_lane_u16((uint16_t *)d_u8, vreinterpret_u16_u8(d01),
+ 2); // 10 11
+ d_u8 += dst_stride;
+ }
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ height -= 4;
+#else
+ s7 = vld1_s16(v_s);
+ v_s += im_stride;
+
+ __builtin_prefetch(d_u8 + 0 * dst_stride);
+
+ d0 = convolve8_vert_4x4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter,
+ round_shift_vec, offset_const,
+ sub_const_vec);
+
+ dd0 = vqrshlq_u16(vcombine_u16(d0, d0), vec_round_bits);
+ d01 = vqmovn_u16(dd0);
+
+ if (w == 4) {
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(d01),
+ 0); // 00 01 02 03
+ d_u8 += dst_stride;
+
+ } else if (w == 2) {
+ vst1_lane_u16((uint16_t *)d_u8, vreinterpret_u16_u8(d01),
+ 0); // 00 01
+ d_u8 += dst_stride;
+ }
+
+ s0 = s1;
+ s1 = s2;
+ s2 = s3;
+ s3 = s4;
+ s4 = s5;
+ s5 = s6;
+ s6 = s7;
+ height -= 1;
+#endif
+ } while (height > 0);
+ } else {
+ // if width is a multiple of 8 & height is a multiple of 4
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ uint8x8_t res0;
+#if defined(__aarch64__)
+ int16x8_t s8, s9, s10;
+ uint8x8_t res1, res2, res3;
+#endif
+
+ do {
+ __builtin_prefetch(v_src_ptr + 0 * im_stride);
+ __builtin_prefetch(v_src_ptr + 1 * im_stride);
+ __builtin_prefetch(v_src_ptr + 2 * im_stride);
+ __builtin_prefetch(v_src_ptr + 3 * im_stride);
+ __builtin_prefetch(v_src_ptr + 4 * im_stride);
+ __builtin_prefetch(v_src_ptr + 5 * im_stride);
+ __builtin_prefetch(v_src_ptr + 6 * im_stride);
+ __builtin_prefetch(v_src_ptr + 7 * im_stride);
+
+ v_s = v_src_ptr;
+ load_s16_8x8(v_s, im_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+ v_s += (7 * im_stride);
+
+ d_u8 = dst_u8_ptr;
+ height = h;
+
+ do {
+#if defined(__aarch64__)
+ load_s16_8x4(v_s, im_stride, &s7, &s8, &s9, &s10);
+ v_s += (im_stride << 2);
+
+ __builtin_prefetch(d_u8 + 4 * dst_stride);
+ __builtin_prefetch(d_u8 + 5 * dst_stride);
+ __builtin_prefetch(d_u8 + 6 * dst_stride);
+ __builtin_prefetch(d_u8 + 7 * dst_stride);
+
+ res0 = convolve8_vert_8x4_s32(s0, s1, s2, s3, s4, s5, s6, s7,
+ y_filter, round_shift_vec, offset_const,
+ sub_const_vec, vec_round_bits);
+ res1 = convolve8_vert_8x4_s32(s1, s2, s3, s4, s5, s6, s7, s8,
+ y_filter, round_shift_vec, offset_const,
+ sub_const_vec, vec_round_bits);
+ res2 = convolve8_vert_8x4_s32(s2, s3, s4, s5, s6, s7, s8, s9,
+ y_filter, round_shift_vec, offset_const,
+ sub_const_vec, vec_round_bits);
+ res3 = convolve8_vert_8x4_s32(s3, s4, s5, s6, s7, s8, s9, s10,
+ y_filter, round_shift_vec, offset_const,
+ sub_const_vec, vec_round_bits);
+
+ if (h != 2) {
+ vst1_u8(d_u8, res0);
+ d_u8 += dst_stride;
+ vst1_u8(d_u8, res1);
+ d_u8 += dst_stride;
+ vst1_u8(d_u8, res2);
+ d_u8 += dst_stride;
+ vst1_u8(d_u8, res3);
+ d_u8 += dst_stride;
+ } else {
+ vst1_u8(d_u8, res0);
+ d_u8 += dst_stride;
+ vst1_u8(d_u8, res1);
+ d_u8 += dst_stride;
+ }
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ height -= 4;
+#else
+ s7 = vld1q_s16(v_s);
+ v_s += im_stride;
+
+ __builtin_prefetch(d_u8 + 0 * dst_stride);
+
+ res0 = convolve8_vert_8x4_s32(s0, s1, s2, s3, s4, s5, s6, s7,
+ y_filter, round_shift_vec, offset_const,
+ sub_const_vec, vec_round_bits);
+
+ vst1_u8(d_u8, res0);
+ d_u8 += dst_stride;
+
+ s0 = s1;
+ s1 = s2;
+ s2 = s3;
+ s3 = s4;
+ s4 = s5;
+ s5 = s6;
+ s6 = s7;
+ height -= 1;
+#endif
+ } while (height > 0);
+ v_src_ptr += 8;
+ dst_u8_ptr += 8;
+ w -= 8;
+ } while (w > 0);
+ }
+ }
+}
+void av1_convolve_2d_copy_sr_neon(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_q4, const int subpel_y_q4,
+ ConvolveParams *conv_params) {
+ (void)filter_params_x;
+ (void)filter_params_y;
+ (void)subpel_x_q4;
+ (void)subpel_y_q4;
+ (void)conv_params;
+
+ const uint8_t *src1;
+ uint8_t *dst1;
+ int y;
+
+ if (!(w & 0x0F)) {
+ for (y = 0; y < h; ++y) {
+ src1 = src;
+ dst1 = dst;
+ for (int x = 0; x < (w >> 4); ++x) {
+ vst1q_u8(dst1, vld1q_u8(src1));
+ src1 += 16;
+ dst1 += 16;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+ } else if (!(w & 0x07)) {
+ for (y = 0; y < h; ++y) {
+ vst1_u8(dst, vld1_u8(src));
+ src += src_stride;
+ dst += dst_stride;
+ }
+ } else if (!(w & 0x03)) {
+ for (y = 0; y < h; ++y) {
+ vst1_lane_u32((uint32_t *)(dst), vreinterpret_u32_u8(vld1_u8(src)), 0);
+ src += src_stride;
+ dst += dst_stride;
+ }
+ } else if (!(w & 0x01)) {
+ for (y = 0; y < h; ++y) {
+ vst1_lane_u16((uint16_t *)(dst), vreinterpret_u16_u8(vld1_u8(src)), 0);
+ src += src_stride;
+ dst += dst_stride;
+ }
+ }
+}
diff --git a/third_party/aom/av1/common/arm/convolve_neon.h b/third_party/aom/av1/common/arm/convolve_neon.h
new file mode 100644
index 0000000000..f382984f27
--- /dev/null
+++ b/third_party/aom/av1/common/arm/convolve_neon.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AOM_AV1_COMMON_ARM_CONVOLVE_NEON_H_
+#define AOM_AV1_COMMON_ARM_CONVOLVE_NEON_H_
+
+#include <arm_neon.h>
+
+#define HORIZ_EXTRA_ROWS ((SUBPEL_TAPS + 7) & ~0x07)
+
+static INLINE uint8x8_t wiener_convolve8_vert_4x8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
+ const int16x8_t s6, int16_t *filter_y, const int bd,
+ const int round1_bits) {
+ int16x8_t ss0, ss1, ss2;
+ int32x4_t sum0, sum1;
+ uint16x4_t tmp0, tmp1;
+ uint16x8_t tmp;
+ uint8x8_t res;
+
+ const int32_t round_const = (1 << (bd + round1_bits - 1));
+ const int32x4_t round_bits = vdupq_n_s32(-round1_bits);
+ const int32x4_t zero = vdupq_n_s32(0);
+ const int32x4_t round_vec = vdupq_n_s32(round_const);
+
+ ss0 = vaddq_s16(s0, s6);
+ ss1 = vaddq_s16(s1, s5);
+ ss2 = vaddq_s16(s2, s4);
+
+ sum0 = vmull_n_s16(vget_low_s16(ss0), filter_y[0]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(ss1), filter_y[1]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(ss2), filter_y[2]);
+ sum0 = vmlal_n_s16(sum0, vget_low_s16(s3), filter_y[3]);
+
+ sum1 = vmull_n_s16(vget_high_s16(ss0), filter_y[0]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(ss1), filter_y[1]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(ss2), filter_y[2]);
+ sum1 = vmlal_n_s16(sum1, vget_high_s16(s3), filter_y[3]);
+
+ sum0 = vsubq_s32(sum0, round_vec);
+ sum1 = vsubq_s32(sum1, round_vec);
+
+ /* right shift & rounding */
+ sum0 = vrshlq_s32(sum0, round_bits);
+ sum1 = vrshlq_s32(sum1, round_bits);
+
+ sum0 = vmaxq_s32(sum0, zero);
+ sum1 = vmaxq_s32(sum1, zero);
+
+ /* from int32x4_t to uint8x8_t */
+ tmp0 = vqmovn_u32(vreinterpretq_u32_s32(sum0));
+ tmp1 = vqmovn_u32(vreinterpretq_u32_s32(sum1));
+ tmp = vcombine_u16(tmp0, tmp1);
+ res = vqmovn_u16(tmp);
+
+ return res;
+}
+
+static INLINE uint16x8_t wiener_convolve8_horiz_8x8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, int16_t *filter_x, const int bd,
+ const int round0_bits) {
+ int16x8_t sum;
+ uint16x8_t res;
+ int32x4_t sum_0, sum_1;
+ int32x4_t s3_0, s3_1;
+ const int32_t round_const_0 = (1 << (bd + FILTER_BITS - 1));
+ const int32_t round_const_1 = (1 << ((bd) + 1 + FILTER_BITS - round0_bits));
+
+ /* for the purpose of right shift by { conv_params->round_0 } */
+ const int32x4_t round_bits = vdupq_n_s32(-round0_bits);
+
+ const int32x4_t round_vec_0 = vdupq_n_s32(round_const_0);
+ const int32x4_t round_vec_1 = vdupq_n_s32(round_const_1);
+
+ sum = vmulq_n_s16(s0, filter_x[0]);
+ sum = vmlaq_n_s16(sum, s1, filter_x[1]);
+ sum = vmlaq_n_s16(sum, s2, filter_x[2]);
+
+ /* sum from 16x8 to 2 32x4 registers */
+ sum_0 = vmovl_s16(vget_low_s16(sum));
+ sum_1 = vmovl_s16(vget_high_s16(sum));
+
+ /* s[3]*128 -- and filter coef max can be 128
+ * then max value possible = 128*128*255 exceeding 16 bit
+ */
+
+ s3_0 = vmull_n_s16(vget_low_s16(s3), filter_x[3]);
+ s3_1 = vmull_n_s16(vget_high_s16(s3), filter_x[3]);
+ sum_0 = vaddq_s32(sum_0, s3_0);
+ sum_1 = vaddq_s32(sum_1, s3_1);
+
+ /* Add the constant value */
+ sum_0 = vaddq_s32(sum_0, round_vec_0);
+ sum_1 = vaddq_s32(sum_1, round_vec_0);
+
+ /* right shift & rounding & saturating */
+ sum_0 = vqrshlq_s32(sum_0, round_bits);
+ sum_1 = vqrshlq_s32(sum_1, round_bits);
+
+ /* Clipping to max value */
+ sum_0 = vminq_s32(sum_0, round_vec_1);
+ sum_1 = vminq_s32(sum_1, round_vec_1);
+
+ res = vcombine_u16(vqmovun_s32(sum_0), vqmovun_s32(sum_1));
+ return res;
+}
+
+static INLINE uint16x4_t wiener_convolve8_horiz_4x8(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
+ const int16x4_t s6, int16_t *filter_x, const int bd,
+ const int round0_bits) {
+ uint16x4_t res;
+ int32x4_t sum_0, s3_0;
+ int16x4_t sum, temp0, temp1, temp2;
+
+ const int32_t round_const_0 = (1 << (bd + FILTER_BITS - 1));
+ const int32_t round_const_1 = (1 << ((bd) + 1 + FILTER_BITS - round0_bits));
+ const int32x4_t round_bits = vdupq_n_s32(-round0_bits);
+ const int32x4_t zero = vdupq_n_s32(0);
+ const int32x4_t round_vec_0 = vdupq_n_s32(round_const_0);
+ const int32x4_t round_vec_1 = vdupq_n_s32(round_const_1);
+
+ temp0 = vadd_s16(s0, s6);
+ temp1 = vadd_s16(s1, s5);
+ temp2 = vadd_s16(s2, s4);
+
+ sum = vmul_n_s16(temp0, filter_x[0]);
+ sum = vmla_n_s16(sum, temp1, filter_x[1]);
+ sum = vmla_n_s16(sum, temp2, filter_x[2]);
+ sum_0 = vmovl_s16(sum);
+
+ /* s[3]*128 -- and filter coff max can be 128.
+ * then max value possible = 128*128*255 Therefore, 32 bits are required to
+ * hold the result.
+ */
+ s3_0 = vmull_n_s16(s3, filter_x[3]);
+ sum_0 = vaddq_s32(sum_0, s3_0);
+
+ sum_0 = vaddq_s32(sum_0, round_vec_0);
+ sum_0 = vrshlq_s32(sum_0, round_bits);
+
+ sum_0 = vmaxq_s32(sum_0, zero);
+ sum_0 = vminq_s32(sum_0, round_vec_1);
+ res = vqmovun_s32(sum_0);
+ return res;
+}
+
+static INLINE int16x8_t
+convolve8_8x8_s16(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
+ const int16x8_t s6, const int16x8_t s7, const int16_t *filter,
+ const int16x8_t horiz_const, const int16x8_t shift_round_0) {
+ int16x8_t sum;
+ int16x8_t res;
+
+ sum = horiz_const;
+ sum = vmlaq_n_s16(sum, s0, filter[0]);
+ sum = vmlaq_n_s16(sum, s1, filter[1]);
+ sum = vmlaq_n_s16(sum, s2, filter[2]);
+ sum = vmlaq_n_s16(sum, s3, filter[3]);
+ sum = vmlaq_n_s16(sum, s4, filter[4]);
+ sum = vmlaq_n_s16(sum, s5, filter[5]);
+ sum = vmlaq_n_s16(sum, s6, filter[6]);
+ sum = vmlaq_n_s16(sum, s7, filter[7]);
+
+ res = vqrshlq_s16(sum, shift_round_0);
+
+ return res;
+}
+
+static INLINE int16x4_t
+convolve8_4x4_s16(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
+ const int16x4_t s6, const int16x4_t s7, const int16_t *filter,
+ const int16x4_t horiz_const, const int16x4_t shift_round_0) {
+ int16x4_t sum;
+ sum = horiz_const;
+ sum = vmla_n_s16(sum, s0, filter[0]);
+ sum = vmla_n_s16(sum, s1, filter[1]);
+ sum = vmla_n_s16(sum, s2, filter[2]);
+ sum = vmla_n_s16(sum, s3, filter[3]);
+ sum = vmla_n_s16(sum, s4, filter[4]);
+ sum = vmla_n_s16(sum, s5, filter[5]);
+ sum = vmla_n_s16(sum, s6, filter[6]);
+ sum = vmla_n_s16(sum, s7, filter[7]);
+
+ sum = vqrshl_s16(sum, shift_round_0);
+
+ return sum;
+}
+
+static INLINE uint16x4_t convolve8_4x4_s32(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
+ const int16x4_t s6, const int16x4_t s7, const int16_t *y_filter,
+ const int32x4_t round_shift_vec, const int32x4_t offset_const) {
+ int32x4_t sum0;
+ uint16x4_t res;
+ const int32x4_t zero = vdupq_n_s32(0);
+
+ sum0 = vmull_n_s16(s0, y_filter[0]);
+ sum0 = vmlal_n_s16(sum0, s1, y_filter[1]);
+ sum0 = vmlal_n_s16(sum0, s2, y_filter[2]);
+ sum0 = vmlal_n_s16(sum0, s3, y_filter[3]);
+ sum0 = vmlal_n_s16(sum0, s4, y_filter[4]);
+ sum0 = vmlal_n_s16(sum0, s5, y_filter[5]);
+ sum0 = vmlal_n_s16(sum0, s6, y_filter[6]);
+ sum0 = vmlal_n_s16(sum0, s7, y_filter[7]);
+
+ sum0 = vaddq_s32(sum0, offset_const);
+ sum0 = vqrshlq_s32(sum0, round_shift_vec);
+ sum0 = vmaxq_s32(sum0, zero);
+ res = vmovn_u32(vreinterpretq_u32_s32(sum0));
+
+ return res;
+}
+
+#endif // AOM_AV1_COMMON_ARM_CONVOLVE_NEON_H_
diff --git a/third_party/aom/av1/common/arm/jnt_convolve_neon.c b/third_party/aom/av1/common/arm/jnt_convolve_neon.c
new file mode 100644
index 0000000000..e5674ef7c2
--- /dev/null
+++ b/third_party/aom/av1/common/arm/jnt_convolve_neon.c
@@ -0,0 +1,1740 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/txfm_common.h"
+#include "aom_ports/mem.h"
+#include "av1/common/common.h"
+#include "av1/common/arm/convolve_neon.h"
+#include "av1/common/arm/mem_neon.h"
+#include "av1/common/arm/transpose_neon.h"
+
+#if !defined(__aarch64__)
+static INLINE void compute_avg_4x1(uint16x4_t res0, uint16x4_t d0,
+ const uint16_t fwd_offset,
+ const uint16_t bck_offset,
+ const int16x4_t sub_const_vec,
+ const int16_t round_bits,
+ const int use_jnt_comp_avg, uint8x8_t *t0) {
+ int16x4_t tmp0;
+ uint16x4_t tmp_u0;
+ uint32x4_t sum0;
+ int32x4_t dst0;
+ int16x8_t tmp4;
+
+ if (use_jnt_comp_avg) {
+ const int32x4_t round_bits_vec = vdupq_n_s32((int32_t)(-round_bits));
+
+ sum0 = vmull_n_u16(res0, fwd_offset);
+ sum0 = vmlal_n_u16(sum0, d0, bck_offset);
+
+ sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS);
+
+ dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), vmovl_s16(sub_const_vec));
+
+ dst0 = vqrshlq_s32(dst0, round_bits_vec);
+
+ tmp0 = vqmovn_s32(dst0);
+ tmp4 = vcombine_s16(tmp0, tmp0);
+
+ *t0 = vqmovun_s16(tmp4);
+ } else {
+ const int16x4_t round_bits_vec = vdup_n_s16(-round_bits);
+ tmp_u0 = vhadd_u16(res0, d0);
+
+ tmp0 = vsub_s16(vreinterpret_s16_u16(tmp_u0), sub_const_vec);
+
+ tmp0 = vqrshl_s16(tmp0, round_bits_vec);
+
+ tmp4 = vcombine_s16(tmp0, tmp0);
+
+ *t0 = vqmovun_s16(tmp4);
+ }
+}
+
+static INLINE void compute_avg_8x1(uint16x8_t res0, uint16x8_t d0,
+ const uint16_t fwd_offset,
+ const uint16_t bck_offset,
+ const int16x4_t sub_const,
+ const int16_t round_bits,
+ const int use_jnt_comp_avg, uint8x8_t *t0) {
+ int16x4_t tmp0, tmp2;
+ int16x8_t f0;
+ uint32x4_t sum0, sum2;
+ int32x4_t dst0, dst2;
+
+ uint16x8_t tmp_u0;
+
+ if (use_jnt_comp_avg) {
+ const int32x4_t sub_const_vec = vmovl_s16(sub_const);
+ const int32x4_t round_bits_vec = vdupq_n_s32(-(int32_t)round_bits);
+
+ sum0 = vmull_n_u16(vget_low_u16(res0), fwd_offset);
+ sum0 = vmlal_n_u16(sum0, vget_low_u16(d0), bck_offset);
+ sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS);
+
+ sum2 = vmull_n_u16(vget_high_u16(res0), fwd_offset);
+ sum2 = vmlal_n_u16(sum2, vget_high_u16(d0), bck_offset);
+ sum2 = vshrq_n_u32(sum2, DIST_PRECISION_BITS);
+
+ dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec);
+ dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec);
+
+ dst0 = vqrshlq_s32(dst0, round_bits_vec);
+ dst2 = vqrshlq_s32(dst2, round_bits_vec);
+
+ tmp0 = vqmovn_s32(dst0);
+ tmp2 = vqmovn_s32(dst2);
+
+ f0 = vcombine_s16(tmp0, tmp2);
+
+ *t0 = vqmovun_s16(f0);
+
+ } else {
+ const int16x8_t sub_const_vec = vcombine_s16(sub_const, sub_const);
+ const int16x8_t round_bits_vec = vdupq_n_s16(-round_bits);
+
+ tmp_u0 = vhaddq_u16(res0, d0);
+
+ f0 = vsubq_s16(vreinterpretq_s16_u16(tmp_u0), sub_const_vec);
+
+ f0 = vqrshlq_s16(f0, round_bits_vec);
+
+ *t0 = vqmovun_s16(f0);
+ }
+}
+#endif // !defined(__arch64__)
+
+static INLINE void compute_avg_4x4(
+ uint16x4_t res0, uint16x4_t res1, uint16x4_t res2, uint16x4_t res3,
+ uint16x4_t d0, uint16x4_t d1, uint16x4_t d2, uint16x4_t d3,
+ const uint16_t fwd_offset, const uint16_t bck_offset,
+ const int16x4_t sub_const_vec, const int16_t round_bits,
+ const int use_jnt_comp_avg, uint8x8_t *t0, uint8x8_t *t1) {
+ int16x4_t tmp0, tmp1, tmp2, tmp3;
+ uint16x4_t tmp_u0, tmp_u1, tmp_u2, tmp_u3;
+ uint32x4_t sum0, sum1, sum2, sum3;
+
+ int32x4_t dst0, dst1, dst2, dst3;
+ int16x8_t tmp4, tmp5;
+ const int16x8_t zero = vdupq_n_s16(0);
+
+ if (use_jnt_comp_avg) {
+ const int32x4_t round_bits_vec = vdupq_n_s32((int32_t)(-round_bits));
+ const int32x4_t const_vec = vmovl_s16(sub_const_vec);
+
+ sum0 = vmull_n_u16(res0, fwd_offset);
+ sum0 = vmlal_n_u16(sum0, d0, bck_offset);
+ sum1 = vmull_n_u16(res1, fwd_offset);
+ sum1 = vmlal_n_u16(sum1, d1, bck_offset);
+ sum2 = vmull_n_u16(res2, fwd_offset);
+ sum2 = vmlal_n_u16(sum2, d2, bck_offset);
+ sum3 = vmull_n_u16(res3, fwd_offset);
+ sum3 = vmlal_n_u16(sum3, d3, bck_offset);
+
+ sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS);
+ sum1 = vshrq_n_u32(sum1, DIST_PRECISION_BITS);
+ sum2 = vshrq_n_u32(sum2, DIST_PRECISION_BITS);
+ sum3 = vshrq_n_u32(sum3, DIST_PRECISION_BITS);
+
+ dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), const_vec);
+ dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), const_vec);
+ dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), const_vec);
+ dst3 = vsubq_s32(vreinterpretq_s32_u32(sum3), const_vec);
+
+ dst0 = vqrshlq_s32(dst0, round_bits_vec);
+ dst1 = vqrshlq_s32(dst1, round_bits_vec);
+ dst2 = vqrshlq_s32(dst2, round_bits_vec);
+ dst3 = vqrshlq_s32(dst3, round_bits_vec);
+
+ tmp0 = vqmovn_s32(dst0);
+ tmp1 = vqmovn_s32(dst1);
+ tmp2 = vqmovn_s32(dst2);
+ tmp3 = vqmovn_s32(dst3);
+ tmp4 = vcombine_s16(tmp0, tmp1);
+ tmp5 = vcombine_s16(tmp2, tmp3);
+ tmp4 = vmaxq_s16(tmp4, zero);
+ tmp5 = vmaxq_s16(tmp5, zero);
+
+ *t0 = vqmovn_u16(vreinterpretq_u16_s16(tmp4));
+ *t1 = vqmovn_u16(vreinterpretq_u16_s16(tmp5));
+ } else {
+ const int16x4_t round_bits_vec = vdup_n_s16(-round_bits);
+ tmp_u0 = vhadd_u16(res0, d0);
+ tmp_u1 = vhadd_u16(res1, d1);
+ tmp_u2 = vhadd_u16(res2, d2);
+ tmp_u3 = vhadd_u16(res3, d3);
+
+ tmp0 = vsub_s16(vreinterpret_s16_u16(tmp_u0), sub_const_vec);
+ tmp1 = vsub_s16(vreinterpret_s16_u16(tmp_u1), sub_const_vec);
+ tmp2 = vsub_s16(vreinterpret_s16_u16(tmp_u2), sub_const_vec);
+ tmp3 = vsub_s16(vreinterpret_s16_u16(tmp_u3), sub_const_vec);
+
+ tmp0 = vqrshl_s16(tmp0, round_bits_vec);
+ tmp1 = vqrshl_s16(tmp1, round_bits_vec);
+ tmp2 = vqrshl_s16(tmp2, round_bits_vec);
+ tmp3 = vqrshl_s16(tmp3, round_bits_vec);
+
+ tmp4 = vcombine_s16(tmp0, tmp1);
+ tmp5 = vcombine_s16(tmp2, tmp3);
+ tmp4 = vmaxq_s16(tmp4, zero);
+ tmp5 = vmaxq_s16(tmp5, zero);
+
+ *t0 = vqmovn_u16(vreinterpretq_u16_s16(tmp4));
+ *t1 = vqmovn_u16(vreinterpretq_u16_s16(tmp5));
+ }
+}
+
+static INLINE void compute_avg_8x4(
+ uint16x8_t res0, uint16x8_t res1, uint16x8_t res2, uint16x8_t res3,
+ uint16x8_t d0, uint16x8_t d1, uint16x8_t d2, uint16x8_t d3,
+ const uint16_t fwd_offset, const uint16_t bck_offset,
+ const int16x4_t sub_const, const int16_t round_bits,
+ const int use_jnt_comp_avg, uint8x8_t *t0, uint8x8_t *t1, uint8x8_t *t2,
+ uint8x8_t *t3) {
+ int16x4_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int16x8_t f0, f1, f2, f3;
+ uint32x4_t sum0, sum1, sum2, sum3;
+ uint32x4_t sum4, sum5, sum6, sum7;
+ int32x4_t dst0, dst1, dst2, dst3;
+ int32x4_t dst4, dst5, dst6, dst7;
+ uint16x8_t tmp_u0, tmp_u1, tmp_u2, tmp_u3;
+ const int16x8_t zero = vdupq_n_s16(0);
+
+ if (use_jnt_comp_avg) {
+ const int32x4_t sub_const_vec = vmovl_s16(sub_const);
+ const int32x4_t round_bits_vec = vdupq_n_s32(-(int32_t)round_bits);
+
+ sum0 = vmull_n_u16(vget_low_u16(res0), fwd_offset);
+ sum0 = vmlal_n_u16(sum0, vget_low_u16(d0), bck_offset);
+ sum1 = vmull_n_u16(vget_low_u16(res1), fwd_offset);
+ sum1 = vmlal_n_u16(sum1, vget_low_u16(d1), bck_offset);
+ sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS);
+ sum1 = vshrq_n_u32(sum1, DIST_PRECISION_BITS);
+
+ sum2 = vmull_n_u16(vget_high_u16(res0), fwd_offset);
+ sum2 = vmlal_n_u16(sum2, vget_high_u16(d0), bck_offset);
+ sum3 = vmull_n_u16(vget_high_u16(res1), fwd_offset);
+ sum3 = vmlal_n_u16(sum3, vget_high_u16(d1), bck_offset);
+ sum2 = vshrq_n_u32(sum2, DIST_PRECISION_BITS);
+ sum3 = vshrq_n_u32(sum3, DIST_PRECISION_BITS);
+
+ sum4 = vmull_n_u16(vget_low_u16(res2), fwd_offset);
+ sum4 = vmlal_n_u16(sum4, vget_low_u16(d2), bck_offset);
+ sum5 = vmull_n_u16(vget_low_u16(res3), fwd_offset);
+ sum5 = vmlal_n_u16(sum5, vget_low_u16(d3), bck_offset);
+ sum4 = vshrq_n_u32(sum4, DIST_PRECISION_BITS);
+ sum5 = vshrq_n_u32(sum5, DIST_PRECISION_BITS);
+
+ sum6 = vmull_n_u16(vget_high_u16(res2), fwd_offset);
+ sum6 = vmlal_n_u16(sum6, vget_high_u16(d2), bck_offset);
+ sum7 = vmull_n_u16(vget_high_u16(res3), fwd_offset);
+ sum7 = vmlal_n_u16(sum7, vget_high_u16(d3), bck_offset);
+ sum6 = vshrq_n_u32(sum6, DIST_PRECISION_BITS);
+ sum7 = vshrq_n_u32(sum7, DIST_PRECISION_BITS);
+
+ dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec);
+ dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), sub_const_vec);
+ dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec);
+ dst3 = vsubq_s32(vreinterpretq_s32_u32(sum3), sub_const_vec);
+ dst4 = vsubq_s32(vreinterpretq_s32_u32(sum4), sub_const_vec);
+ dst5 = vsubq_s32(vreinterpretq_s32_u32(sum5), sub_const_vec);
+ dst6 = vsubq_s32(vreinterpretq_s32_u32(sum6), sub_const_vec);
+ dst7 = vsubq_s32(vreinterpretq_s32_u32(sum7), sub_const_vec);
+
+ dst0 = vqrshlq_s32(dst0, round_bits_vec);
+ dst1 = vqrshlq_s32(dst1, round_bits_vec);
+ dst2 = vqrshlq_s32(dst2, round_bits_vec);
+ dst3 = vqrshlq_s32(dst3, round_bits_vec);
+ dst4 = vqrshlq_s32(dst4, round_bits_vec);
+ dst5 = vqrshlq_s32(dst5, round_bits_vec);
+ dst6 = vqrshlq_s32(dst6, round_bits_vec);
+ dst7 = vqrshlq_s32(dst7, round_bits_vec);
+
+ tmp0 = vqmovn_s32(dst0);
+ tmp1 = vqmovn_s32(dst1);
+ tmp2 = vqmovn_s32(dst2);
+ tmp3 = vqmovn_s32(dst3);
+ tmp4 = vqmovn_s32(dst4);
+ tmp5 = vqmovn_s32(dst5);
+ tmp6 = vqmovn_s32(dst6);
+ tmp7 = vqmovn_s32(dst7);
+
+ f0 = vcombine_s16(tmp0, tmp2);
+ f1 = vcombine_s16(tmp1, tmp3);
+ f2 = vcombine_s16(tmp4, tmp6);
+ f3 = vcombine_s16(tmp5, tmp7);
+
+ f0 = vmaxq_s16(f0, zero);
+ f1 = vmaxq_s16(f1, zero);
+ f2 = vmaxq_s16(f2, zero);
+ f3 = vmaxq_s16(f3, zero);
+
+ *t0 = vqmovn_u16(vreinterpretq_u16_s16(f0));
+ *t1 = vqmovn_u16(vreinterpretq_u16_s16(f1));
+ *t2 = vqmovn_u16(vreinterpretq_u16_s16(f2));
+ *t3 = vqmovn_u16(vreinterpretq_u16_s16(f3));
+
+ } else {
+ const int16x8_t sub_const_vec = vcombine_s16(sub_const, sub_const);
+ const int16x8_t round_bits_vec = vdupq_n_s16(-round_bits);
+
+ tmp_u0 = vhaddq_u16(res0, d0);
+ tmp_u1 = vhaddq_u16(res1, d1);
+ tmp_u2 = vhaddq_u16(res2, d2);
+ tmp_u3 = vhaddq_u16(res3, d3);
+
+ f0 = vsubq_s16(vreinterpretq_s16_u16(tmp_u0), sub_const_vec);
+ f1 = vsubq_s16(vreinterpretq_s16_u16(tmp_u1), sub_const_vec);
+ f2 = vsubq_s16(vreinterpretq_s16_u16(tmp_u2), sub_const_vec);
+ f3 = vsubq_s16(vreinterpretq_s16_u16(tmp_u3), sub_const_vec);
+
+ f0 = vqrshlq_s16(f0, round_bits_vec);
+ f1 = vqrshlq_s16(f1, round_bits_vec);
+ f2 = vqrshlq_s16(f2, round_bits_vec);
+ f3 = vqrshlq_s16(f3, round_bits_vec);
+
+ f0 = vmaxq_s16(f0, zero);
+ f1 = vmaxq_s16(f1, zero);
+ f2 = vmaxq_s16(f2, zero);
+ f3 = vmaxq_s16(f3, zero);
+
+ *t0 = vqmovn_u16(vreinterpretq_u16_s16(f0));
+ *t1 = vqmovn_u16(vreinterpretq_u16_s16(f1));
+ *t2 = vqmovn_u16(vreinterpretq_u16_s16(f2));
+ *t3 = vqmovn_u16(vreinterpretq_u16_s16(f3));
+ }
+}
+
+static INLINE void jnt_convolve_2d_horiz_neon(
+ const uint8_t *src, int src_stride, int16_t *im_block, const int im_stride,
+ int16_t *x_filter_tmp, const int im_h, int w, const int round_0) {
+ const int bd = 8;
+ const uint8_t *s;
+ int16_t *dst_ptr;
+ int dst_stride;
+ int width, height;
+
+ dst_ptr = im_block;
+ dst_stride = im_stride;
+ height = im_h;
+ width = w;
+
+ if (w == 4) {
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, d0;
+ int16x8_t tt0;
+ uint8x8_t t0;
+
+ const int16x4_t horiz_const = vdup_n_s16((1 << (bd + FILTER_BITS - 2)));
+ const int16x4_t shift_round_0 = vdup_n_s16(-(round_0));
+
+#if defined(__aarch64__)
+ int16x4_t s8, s9, s10, d1, d2, d3;
+ int16x8_t tt1, tt2, tt3;
+ uint8x8_t t1, t2, t3;
+#endif
+ do {
+ s = src;
+ __builtin_prefetch(s + 0 * src_stride);
+#if defined(__aarch64__)
+ __builtin_prefetch(s + 1 * src_stride);
+ __builtin_prefetch(s + 2 * src_stride);
+ __builtin_prefetch(s + 3 * src_stride);
+
+ load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s0 = vget_low_s16(tt0);
+ s1 = vget_low_s16(tt1);
+ s2 = vget_low_s16(tt2);
+ s3 = vget_low_s16(tt3);
+ s4 = vget_high_s16(tt0);
+ s5 = vget_high_s16(tt1);
+ s6 = vget_high_s16(tt2);
+ __builtin_prefetch(dst_ptr + 0 * dst_stride);
+ __builtin_prefetch(dst_ptr + 1 * dst_stride);
+ __builtin_prefetch(dst_ptr + 2 * dst_stride);
+ __builtin_prefetch(dst_ptr + 3 * dst_stride);
+ s += 7;
+
+ load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s7 = vget_low_s16(tt0);
+ s8 = vget_low_s16(tt1);
+ s9 = vget_low_s16(tt2);
+ s10 = vget_low_s16(tt3);
+
+ d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ horiz_const, shift_round_0);
+ d1 = convolve8_4x4_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
+ horiz_const, shift_round_0);
+ d2 = convolve8_4x4_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
+ horiz_const, shift_round_0);
+ d3 = convolve8_4x4_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
+ horiz_const, shift_round_0);
+
+ transpose_s16_4x4d(&d0, &d1, &d2, &d3);
+
+ vst1_s16((dst_ptr + 0 * dst_stride), d0);
+ vst1_s16((dst_ptr + 1 * dst_stride), d1);
+ vst1_s16((dst_ptr + 2 * dst_stride), d2);
+ vst1_s16((dst_ptr + 3 * dst_stride), d3);
+
+ src += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ height -= 4;
+#else
+ t0 = vld1_u8(s); // a0 a1 a2 a3 a4 a5 a6 a7
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7
+ s0 = vget_low_s16(tt0); // a0 a1 a2 a3
+ s4 = vget_high_s16(tt0); // a4 a5 a6 a7
+ __builtin_prefetch(dst_ptr);
+ s += 8;
+ t0 = vld1_u8(s); // a8 a9 a10 a11
+
+ // a8 a9 a10 a11
+ s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+
+ s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4
+ s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5
+ s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6
+ s5 = vext_s16(s4, s7, 1); // a5 a6 a7 a8
+ s6 = vext_s16(s4, s7, 2); // a6 a7 a8 a9
+ s7 = vext_s16(s4, s7, 3); // a7 a8 a9 a10
+
+ d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ horiz_const, shift_round_0);
+
+ vst1_s16(dst_ptr, d0);
+
+ src += src_stride;
+ dst_ptr += dst_stride;
+ height -= 1;
+#endif
+ } while (height > 0);
+ } else {
+ int16_t *d_tmp;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ int16x8_t res0;
+ uint8x8_t t0;
+
+ const int16x8_t horiz_const = vdupq_n_s16((1 << (bd + FILTER_BITS - 2)));
+ const int16x8_t shift_round_0 = vdupq_n_s16(-(round_0));
+ do {
+#if defined(__aarch64__)
+ uint8x8_t t1, t2, t3, t4, t5, t6, t7;
+ int16x8_t s8, s9, s10, s11, s12, s13, s14;
+ int16x8_t res1, res2, res3, res4, res5, res6, res7;
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ __builtin_prefetch(src + 7 * src_stride);
+ load_u8_8x8(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ width = w;
+ s = src + 7;
+ d_tmp = dst_ptr;
+ __builtin_prefetch(dst_ptr + 0 * dst_stride);
+ __builtin_prefetch(dst_ptr + 1 * dst_stride);
+ __builtin_prefetch(dst_ptr + 2 * dst_stride);
+ __builtin_prefetch(dst_ptr + 3 * dst_stride);
+ __builtin_prefetch(dst_ptr + 4 * dst_stride);
+ __builtin_prefetch(dst_ptr + 5 * dst_stride);
+ __builtin_prefetch(dst_ptr + 6 * dst_stride);
+ __builtin_prefetch(dst_ptr + 7 * dst_stride);
+
+ do {
+ load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
+ s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
+
+ res0 = convolve8_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res1 = convolve8_8x8_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res2 = convolve8_8x8_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res3 = convolve8_8x8_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res4 = convolve8_8x8_s16(s4, s5, s6, s7, s8, s9, s10, s11, x_filter_tmp,
+ horiz_const, shift_round_0);
+ res5 = convolve8_8x8_s16(s5, s6, s7, s8, s9, s10, s11, s12,
+ x_filter_tmp, horiz_const, shift_round_0);
+ res6 = convolve8_8x8_s16(s6, s7, s8, s9, s10, s11, s12, s13,
+ x_filter_tmp, horiz_const, shift_round_0);
+ res7 = convolve8_8x8_s16(s7, s8, s9, s10, s11, s12, s13, s14,
+ x_filter_tmp, horiz_const, shift_round_0);
+
+ transpose_s16_8x8(&res0, &res1, &res2, &res3, &res4, &res5, &res6,
+ &res7);
+
+ store_s16_8x8(d_tmp, dst_stride, res0, res1, res2, res3, res4, res5,
+ res6, res7);
+ s0 = s8;
+ s1 = s9;
+ s2 = s10;
+ s3 = s11;
+ s4 = s12;
+ s5 = s13;
+ s6 = s14;
+ s += 8;
+ d_tmp += 8;
+ width -= 8;
+ } while (width > 0);
+ src += 8 * src_stride;
+ dst_ptr += 8 * dst_stride;
+ height -= 8;
+#else
+ int16x8_t temp_0;
+ t0 = vld1_u8(src);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7
+
+ width = w;
+ s = src + 8;
+ d_tmp = dst_ptr;
+ __builtin_prefetch(dst_ptr);
+
+ do {
+ t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ temp_0 = s0;
+ s0 = s7;
+
+ s1 = vextq_s16(temp_0, s7, 1); // a1 a2 a3 a4 a5 a6 a7 a8
+ s2 = vextq_s16(temp_0, s7, 2); // a2 a3 a4 a5 a6 a7 a8 a9
+ s3 = vextq_s16(temp_0, s7, 3); // a3 a4 a5 a6 a7 a8 a9 a10
+ s4 = vextq_s16(temp_0, s7, 4); // a4 a5 a6 a7 a8 a9 a10 a11
+ s5 = vextq_s16(temp_0, s7, 5); // a5 a6 a7 a8 a9 a10 a11 a12
+ s6 = vextq_s16(temp_0, s7, 6); // a6 a7 a8 a9 a10 a11 a12 a13
+ s7 = vextq_s16(temp_0, s7, 7); // a7 a8 a9 a10 a11 a12 a13 a14
+
+ res0 = convolve8_8x8_s16(temp_0, s1, s2, s3, s4, s5, s6, s7,
+ x_filter_tmp, horiz_const, shift_round_0);
+ vst1q_s16(d_tmp, res0);
+
+ s += 8;
+ d_tmp += 8;
+ width -= 8;
+ } while (width > 0);
+ src += src_stride;
+ dst_ptr += dst_stride;
+ height -= 1;
+#endif
+ } while (height > 0);
+ }
+}
+
+static INLINE void jnt_convolve_2d_vert_neon(
+ int16_t *im_block, const int im_stride, uint8_t *dst8, int dst8_stride,
+ ConvolveParams *conv_params, const int16_t *y_filter, int h, int w) {
+ uint8_t *dst_u8_ptr, *d_u8;
+ CONV_BUF_TYPE *dst_ptr, *dst;
+ int16_t *src_ptr, *s;
+ uint16_t *d;
+
+ const int bd = 8;
+ int height;
+ int dst_stride = conv_params->dst_stride;
+ const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int16_t sub_const = (1 << (offset_bits - conv_params->round_1)) +
+ (1 << (offset_bits - conv_params->round_1 - 1));
+
+ const int16_t round_bits =
+ 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
+ const int offset = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int32x4_t round_shift_vec = vdupq_n_s32(-(conv_params->round_1));
+ const int32x4_t offset_const = vdupq_n_s32(1 << offset);
+ const int16x4_t sub_const_vec = vdup_n_s16(sub_const);
+ const uint16_t fwd_offset = conv_params->fwd_offset;
+ const uint16_t bck_offset = conv_params->bck_offset;
+ const int do_average = conv_params->do_average;
+ const int use_jnt_comp_avg = conv_params->use_jnt_comp_avg;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+ uint16x4_t res4, d0;
+ uint8x8_t t0;
+
+#if defined(__aarch64__)
+ int16x4_t s8, s9, s10;
+ uint16x4_t res5, res6, res7, d1, d2, d3;
+ uint8x8_t t1;
+#endif
+
+ dst = conv_params->dst;
+ src_ptr = im_block;
+ dst_u8_ptr = dst8;
+ dst_ptr = dst;
+ height = h;
+
+ do {
+ d = dst_ptr;
+ d_u8 = dst_u8_ptr;
+ s = src_ptr;
+ height = h;
+
+ __builtin_prefetch(s + 0 * im_stride);
+ __builtin_prefetch(s + 1 * im_stride);
+ __builtin_prefetch(s + 2 * im_stride);
+ __builtin_prefetch(s + 3 * im_stride);
+ __builtin_prefetch(s + 4 * im_stride);
+ __builtin_prefetch(s + 5 * im_stride);
+ __builtin_prefetch(s + 6 * im_stride);
+ __builtin_prefetch(s + 7 * im_stride);
+
+ load_s16_4x8(s, im_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+ s += (7 * im_stride);
+
+ do {
+#if defined(__aarch64__)
+ load_s16_4x4(s, im_stride, &s7, &s8, &s9, &s10);
+ s += (im_stride << 2);
+
+ __builtin_prefetch(d + 0 * dst_stride);
+ __builtin_prefetch(d + 1 * dst_stride);
+ __builtin_prefetch(d + 2 * dst_stride);
+ __builtin_prefetch(d + 3 * dst_stride);
+
+ __builtin_prefetch(d_u8 + 4 * dst8_stride);
+ __builtin_prefetch(d_u8 + 5 * dst8_stride);
+ __builtin_prefetch(d_u8 + 6 * dst8_stride);
+ __builtin_prefetch(d_u8 + 7 * dst8_stride);
+
+ d0 = convolve8_4x4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter,
+ round_shift_vec, offset_const);
+ d1 = convolve8_4x4_s32(s1, s2, s3, s4, s5, s6, s7, s8, y_filter,
+ round_shift_vec, offset_const);
+ d2 = convolve8_4x4_s32(s2, s3, s4, s5, s6, s7, s8, s9, y_filter,
+ round_shift_vec, offset_const);
+ d3 = convolve8_4x4_s32(s3, s4, s5, s6, s7, s8, s9, s10, y_filter,
+ round_shift_vec, offset_const);
+
+ if (do_average) {
+ load_u16_4x4(d, dst_stride, &res4, &res5, &res6, &res7);
+ d += (dst_stride << 2);
+
+ compute_avg_4x4(res4, res5, res6, res7, d0, d1, d2, d3, fwd_offset,
+ bck_offset, sub_const_vec, round_bits, use_jnt_comp_avg,
+ &t0, &t1);
+
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0);
+ d_u8 += dst8_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 1);
+ d_u8 += dst8_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 0);
+ d_u8 += dst8_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 1);
+ d_u8 += dst8_stride;
+
+ } else {
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+ d += (dst_stride << 2);
+ }
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ height -= 4;
+#else
+ s7 = vld1_s16(s);
+ s += (im_stride);
+
+ __builtin_prefetch(d + 0 * dst_stride);
+ __builtin_prefetch(d_u8 + 0 * dst8_stride);
+
+ d0 = convolve8_4x4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter,
+ round_shift_vec, offset_const);
+
+ if (do_average) {
+ res4 = vld1_u16(d);
+ d += (dst_stride);
+
+ compute_avg_4x1(res4, d0, fwd_offset, bck_offset, sub_const_vec,
+ round_bits, use_jnt_comp_avg, &t0);
+
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0);
+ d_u8 += dst8_stride;
+
+ } else {
+ vst1_u16(d, d0);
+ d += (dst_stride);
+ }
+ s0 = s1;
+ s1 = s2;
+ s2 = s3;
+ s3 = s4;
+ s4 = s5;
+ s5 = s6;
+ s6 = s7;
+ height--;
+#endif
+ } while (height > 0);
+ src_ptr += 4;
+ dst_ptr += 4;
+ dst_u8_ptr += 4;
+ w -= 4;
+ } while (w > 0);
+}
+
+void av1_jnt_convolve_2d_neon(const uint8_t *src, int src_stride, uint8_t *dst8,
+ int dst8_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_q4, const int subpel_y_q4,
+ ConvolveParams *conv_params) {
+ assert(!(w % 4));
+ assert(!(h % 4));
+
+ DECLARE_ALIGNED(16, int16_t,
+ im_block[(MAX_SB_SIZE + HORIZ_EXTRA_ROWS) * MAX_SB_SIZE]);
+
+ const int im_h = h + filter_params_y->taps - 1;
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const int round_0 = conv_params->round_0 - 1;
+ const uint8_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
+ const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_q4 & SUBPEL_MASK);
+ const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_q4 & SUBPEL_MASK);
+
+ int16_t x_filter_tmp[8];
+ int16x8_t filter_x_coef = vld1q_s16(x_filter);
+
+ // filter coeffs are even, so downshifting by 1 to reduce intermediate
+ // precision requirements.
+ filter_x_coef = vshrq_n_s16(filter_x_coef, 1);
+ vst1q_s16(&x_filter_tmp[0], filter_x_coef);
+
+ jnt_convolve_2d_horiz_neon(src_ptr, src_stride, im_block, im_stride,
+ x_filter_tmp, im_h, w, round_0);
+
+ jnt_convolve_2d_vert_neon(im_block, im_stride, dst8, dst8_stride, conv_params,
+ y_filter, h, w);
+}
+
+void av1_jnt_convolve_2d_copy_neon(const uint8_t *src, int src_stride,
+ uint8_t *dst8, int dst8_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_q4, const int subpel_y_q4,
+ ConvolveParams *conv_params) {
+ uint8x8_t res0_8, res1_8, res2_8, res3_8, tmp_shift0, tmp_shift1, tmp_shift2,
+ tmp_shift3;
+ uint16x8_t res_q0, res_q1, res_q2, res_q3, tmp_q0, tmp_q1, tmp_q2, tmp_q3;
+ uint16x4_t tmp4, tmp5, tmp6, tmp7, res4, res5, res6, res7;
+ const uint8_t *src1, *src2;
+ uint8_t *dst8_1;
+ CONV_BUF_TYPE *dst = conv_params->dst, *dst_1, *dst_2;
+ const int dst_stride = conv_params->dst_stride;
+ int x, y;
+ const int16_t bits =
+ FILTER_BITS * 2 - conv_params->round_1 - conv_params->round_0;
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int round_offset = (1 << (offset_bits - conv_params->round_1)) +
+ (1 << (offset_bits - conv_params->round_1 - 1));
+ const int16x4_t sub_const_vec = vdup_n_s16((int16_t)round_offset);
+ const uint16x8_t dup_round_offset16x8 = vdupq_n_u16((uint16_t)round_offset);
+ const int16x4_t dup_bits16x4 = vdup_n_s16(bits);
+ const int16x8_t dup_bits16x8 = vdupq_n_s16(bits);
+
+ (void)filter_params_x;
+ (void)filter_params_y;
+ (void)subpel_x_q4;
+ (void)subpel_y_q4;
+
+ if (!(w & 0x07)) {
+ for (y = 0; y < (h >> 2); ++y) {
+ src1 = src;
+ dst8_1 = dst8;
+ dst_1 = dst;
+ for (x = 0; x < (w >> 3); ++x) {
+ src2 = src1;
+ load_u8_8x4(src2, src_stride, &res0_8, &res1_8, &res2_8, &res3_8);
+
+ res_q0 = vaddq_u16(vshlq_u16(vmovl_u8(res0_8), dup_bits16x8),
+ dup_round_offset16x8);
+ res_q1 = vaddq_u16(vshlq_u16(vmovl_u8(res1_8), dup_bits16x8),
+ dup_round_offset16x8);
+ res_q2 = vaddq_u16(vshlq_u16(vmovl_u8(res2_8), dup_bits16x8),
+ dup_round_offset16x8);
+ res_q3 = vaddq_u16(vshlq_u16(vmovl_u8(res3_8), dup_bits16x8),
+ dup_round_offset16x8);
+
+ if (conv_params->do_average) {
+ dst_2 = dst_1;
+ load_u16_8x4(dst_2, dst_stride, &tmp_q0, &tmp_q1, &tmp_q2, &tmp_q3);
+
+ compute_avg_8x4(tmp_q0, tmp_q1, tmp_q2, tmp_q3, res_q0, res_q1,
+ res_q2, res_q3, conv_params->fwd_offset,
+ conv_params->bck_offset, sub_const_vec, bits,
+ conv_params->use_jnt_comp_avg, &tmp_shift0,
+ &tmp_shift1, &tmp_shift2, &tmp_shift3);
+
+ vst1_u8(dst8_1 + (0 * dst8_stride), tmp_shift0);
+ vst1_u8(dst8_1 + (1 * dst8_stride), tmp_shift1);
+ vst1_u8(dst8_1 + (2 * dst8_stride), tmp_shift2);
+ vst1_u8(dst8_1 + (3 * dst8_stride), tmp_shift3);
+
+ } else {
+ vst1q_u16(dst_1 + (0 * dst_stride), res_q0);
+ vst1q_u16(dst_1 + (1 * dst_stride), res_q1);
+ vst1q_u16(dst_1 + (2 * dst_stride), res_q2);
+ vst1q_u16(dst_1 + (3 * dst_stride), res_q3);
+ }
+ src1 = src1 + 8;
+ dst_1 = dst_1 + 8;
+ dst8_1 = dst8_1 + 8;
+ }
+ src += src_stride * 4;
+ dst8 += dst8_stride * 4;
+ dst += dst_stride * 4;
+ }
+ } else if (!(w & 0x03)) {
+ for (y = 0; y < (h >> 2); ++y) {
+ src1 = src;
+ dst8_1 = dst8;
+ dst_1 = dst;
+
+ load_u8_8x4(src1, src_stride, &res0_8, &res1_8, &res2_8, &res3_8);
+
+ res4 = vadd_u16(vshl_u16(vget_low_u16(vmovl_u8(res0_8)), dup_bits16x4),
+ vreinterpret_u16_s16(sub_const_vec));
+ res5 = vadd_u16(vshl_u16(vget_low_u16(vmovl_u8(res1_8)), dup_bits16x4),
+ vreinterpret_u16_s16(sub_const_vec));
+ res6 = vadd_u16(vshl_u16(vget_low_u16(vmovl_u8(res2_8)), dup_bits16x4),
+ vreinterpret_u16_s16(sub_const_vec));
+ res7 = vadd_u16(vshl_u16(vget_low_u16(vmovl_u8(res3_8)), dup_bits16x4),
+ vreinterpret_u16_s16(sub_const_vec));
+ if (conv_params->do_average) {
+ load_u16_4x4(dst_1, dst_stride, &tmp4, &tmp5, &tmp6, &tmp7);
+
+ compute_avg_4x4(tmp4, tmp5, tmp6, tmp7, res4, res5, res6, res7,
+ conv_params->fwd_offset, conv_params->bck_offset,
+ sub_const_vec, bits, conv_params->use_jnt_comp_avg,
+ &tmp_shift0, &tmp_shift1);
+
+ vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift0), 0);
+ dst8_1 += dst8_stride;
+ vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift0), 1);
+ dst8_1 += dst8_stride;
+ vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift1), 0);
+ dst8_1 += dst8_stride;
+ vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift1), 1);
+
+ } else {
+ vst1_u16(dst_1, res4);
+ dst_1 += dst_stride;
+ vst1_u16(dst_1, res5);
+ dst_1 += dst_stride;
+ vst1_u16(dst_1, res6);
+ dst_1 += dst_stride;
+ vst1_u16(dst_1, res7);
+ }
+ src += src_stride * 4;
+ dst += dst_stride * 4;
+ dst8 += dst8_stride * 4;
+ }
+ }
+}
+
+void av1_jnt_convolve_x_neon(const uint8_t *src, int src_stride, uint8_t *dst8,
+ int dst8_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_q4, const int subpel_y_q4,
+ ConvolveParams *conv_params) {
+ assert(!(w % 4));
+ assert(!(h % 4));
+
+ CONV_BUF_TYPE *dst = conv_params->dst;
+ int dst_stride = conv_params->dst_stride;
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const int bits = FILTER_BITS - conv_params->round_1;
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int round_offset = (1 << (offset_bits - conv_params->round_1)) +
+ (1 << (offset_bits - conv_params->round_1 - 1));
+ const int round_bits =
+ 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
+ const uint16_t fwd_offset = conv_params->fwd_offset;
+ const uint16_t bck_offset = conv_params->bck_offset;
+ const int use_jnt_comp_avg = conv_params->use_jnt_comp_avg;
+
+ (void)filter_params_y;
+ (void)subpel_y_q4;
+
+ // horizontal filter
+ const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_q4 & SUBPEL_MASK);
+
+ const uint8_t *src_ptr = src - horiz_offset;
+
+ int16_t x_filter_tmp[8];
+ int16x8_t filter_x_coef = vld1q_s16(x_filter);
+
+ // filter coeffs are even, so downshifting by 1 to reduce intermediate
+ // precision requirements.
+ filter_x_coef = vshrq_n_s16(filter_x_coef, 1);
+ vst1q_s16(&x_filter_tmp[0], filter_x_coef);
+
+ const uint8_t *s;
+ uint8_t *d_u8;
+ uint8_t *dst_u8_ptr;
+ CONV_BUF_TYPE *d, *dst_ptr;
+ int width, height;
+ uint8x8_t t0;
+#if defined(__aarch64__)
+ uint8x8_t t1, t2, t3, t4, t5, t6, t7;
+#endif
+ s = src_ptr;
+ dst_ptr = dst;
+ dst_u8_ptr = dst8;
+ width = w;
+ height = h;
+
+ if ((w == 4) || (h == 4)) {
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, d0;
+ int16x8_t tt0;
+ uint16x4_t res4;
+#if defined(__aarch64__)
+ int16x4_t s8, s9, s10, d1, d2, d3;
+ int16x8_t tt1, tt2, tt3;
+ uint16x4_t res5, res6, res7;
+ uint32x2_t tu0 = vdup_n_u32(0), tu1 = vdup_n_u32(0);
+ int16x8_t u0, u1;
+#else
+ int16x4_t temp_0;
+#endif
+ const int16x4_t zero = vdup_n_s16(0);
+ const int16x4_t round_offset_vec = vdup_n_s16(round_offset);
+ const int16x4_t shift_round_0 = vdup_n_s16(-conv_params->round_0 + 1);
+ const int16x4_t horiz_const = vdup_n_s16(bits);
+ do {
+ s = src_ptr;
+ d = dst_ptr;
+ d_u8 = dst_u8_ptr;
+ width = w;
+ __builtin_prefetch(s + 0 * src_stride);
+#if defined(__aarch64__)
+ __builtin_prefetch(s + 1 * src_stride);
+ __builtin_prefetch(s + 2 * src_stride);
+ __builtin_prefetch(s + 3 * src_stride);
+
+ load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s0 = vget_low_s16(tt0);
+ s1 = vget_low_s16(tt1);
+ s2 = vget_low_s16(tt2);
+ s3 = vget_low_s16(tt3);
+ s4 = vget_high_s16(tt0);
+ s5 = vget_high_s16(tt1);
+ s6 = vget_high_s16(tt2);
+ __builtin_prefetch(d + 0 * dst_stride);
+ __builtin_prefetch(d + 1 * dst_stride);
+ __builtin_prefetch(d + 2 * dst_stride);
+ __builtin_prefetch(d + 3 * dst_stride);
+ s += 7;
+ do {
+ load_unaligned_u8_4x4(s, src_stride, &tu0, &tu1);
+ t0 = vreinterpret_u8_u32(tu0);
+ t1 = vreinterpret_u8_u32(tu1);
+
+ transpose_u8_4x4(&t0, &t1);
+ u0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ u1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+
+ s7 = vget_low_s16(u0);
+ s8 = vget_low_s16(u1);
+ s9 = vget_high_s16(u0);
+ s10 = vget_high_s16(u1);
+
+ d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ zero, shift_round_0);
+ d0 = vrshl_s16(d0, horiz_const);
+ d0 = vadd_s16(d0, round_offset_vec);
+ d1 = convolve8_4x4_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
+ zero, shift_round_0);
+ d1 = vrshl_s16(d1, horiz_const);
+ d1 = vadd_s16(d1, round_offset_vec);
+ d2 = convolve8_4x4_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
+ zero, shift_round_0);
+ d2 = vrshl_s16(d2, horiz_const);
+ d2 = vadd_s16(d2, round_offset_vec);
+ d3 = convolve8_4x4_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
+ zero, shift_round_0);
+ d3 = vrshl_s16(d3, horiz_const);
+ d3 = vadd_s16(d3, round_offset_vec);
+
+ transpose_s16_4x4d(&d0, &d1, &d2, &d3);
+
+ if (conv_params->do_average) {
+ __builtin_prefetch(d + 0 * dst_stride);
+ __builtin_prefetch(d + 1 * dst_stride);
+ __builtin_prefetch(d + 2 * dst_stride);
+ __builtin_prefetch(d + 3 * dst_stride);
+
+ __builtin_prefetch(d_u8 + 0 * dst8_stride);
+ __builtin_prefetch(d_u8 + 1 * dst8_stride);
+ __builtin_prefetch(d_u8 + 2 * dst8_stride);
+ __builtin_prefetch(d_u8 + 3 * dst8_stride);
+
+ load_u16_4x4(d, dst_stride, &res4, &res5, &res6, &res7);
+
+ compute_avg_4x4(res4, res5, res6, res7, vreinterpret_u16_s16(d0),
+ vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
+ vreinterpret_u16_s16(d3), fwd_offset, bck_offset,
+ round_offset_vec, round_bits, use_jnt_comp_avg, &t0,
+ &t1);
+
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0),
+ 0); // 00 01 02 03
+ vst1_lane_u32((uint32_t *)(d_u8 + dst8_stride),
+ vreinterpret_u32_u8(t0),
+ 1); // 10 11 12 13
+ vst1_lane_u32((uint32_t *)(d_u8 + 2 * dst8_stride),
+ vreinterpret_u32_u8(t1),
+ 0); // 20 21 22 23
+ vst1_lane_u32((uint32_t *)(d_u8 + 3 * dst8_stride),
+ vreinterpret_u32_u8(t1),
+ 1); // 30 31 32 33
+ } else {
+ store_u16_4x4(d, dst_stride, vreinterpret_u16_s16(d0),
+ vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
+ vreinterpret_u16_s16(d3));
+ }
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+
+ s += 4;
+ width -= 4;
+ d += 4;
+ d_u8 += 4;
+ } while (width > 0);
+ src_ptr += (src_stride << 2);
+ dst_ptr += (dst_stride << 2);
+ dst_u8_ptr += (dst8_stride << 2);
+ height -= 4;
+#else
+ t0 = vld1_u8(s); // a0 a1 a2 a3 a4 a5 a6 a7
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7
+ s0 = vget_low_s16(tt0); // a0 a1 a2 a3
+ s4 = vget_high_s16(tt0); // a4 a5 a6 a7
+ __builtin_prefetch(d);
+
+ s += 8;
+ do {
+ t0 = vld1_u8(s); // a8 a9 a10 a11
+
+ // a8 a9 a10 a11
+ s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+ temp_0 = s7;
+ s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4
+ s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5
+ s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6
+ s5 = vext_s16(s4, s7, 1); // a5 a6 a7 a8
+ s6 = vext_s16(s4, s7, 2); // a6 a7 a8 a9
+ s7 = vext_s16(s4, s7, 3); // a7 a8 a9 a10
+
+ d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ zero, shift_round_0);
+ d0 = vrshl_s16(d0, horiz_const);
+ d0 = vadd_s16(d0, round_offset_vec);
+ s0 = s4;
+ s4 = temp_0;
+ if (conv_params->do_average) {
+ __builtin_prefetch(d);
+ __builtin_prefetch(d_u8);
+
+ res4 = vld1_u16(d);
+
+ compute_avg_4x1(res4, vreinterpret_u16_s16(d0), fwd_offset,
+ bck_offset, round_offset_vec, round_bits,
+ use_jnt_comp_avg, &t0);
+
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0),
+ 0); // 00 01 02 03
+ } else {
+ vst1_u16(d, vreinterpret_u16_s16(d0));
+ }
+
+ s += 4;
+ width -= 4;
+ d += 4;
+ d_u8 += 4;
+ } while (width > 0);
+ src_ptr += (src_stride);
+ dst_ptr += (dst_stride);
+ dst_u8_ptr += (dst8_stride);
+ height--;
+#endif
+ } while (height > 0);
+ } else {
+ CONV_BUF_TYPE *d_tmp;
+ uint8_t *d_u8_tmp;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ int16x8_t res0;
+ uint16x8_t res8;
+ const int16x8_t round_offset128 = vdupq_n_s16(round_offset);
+ const int16x4_t round_offset64 = vdup_n_s16(round_offset);
+ const int16x8_t shift_round_0 = vdupq_n_s16(-conv_params->round_0 + 1);
+ const int16x8_t horiz_const = vdupq_n_s16(bits);
+ const int16x8_t zero = vdupq_n_s16(0);
+
+ d = dst_ptr = dst;
+ d_u8 = dst_u8_ptr = dst8;
+ do {
+#if defined(__aarch64__)
+ int16x8_t s11, s12, s13, s14;
+ int16x8_t s8, s9, s10;
+ int16x8_t res1, res2, res3, res4, res5, res6, res7;
+ uint16x8_t res9, res10, res11;
+ __builtin_prefetch(src_ptr + 0 * src_stride);
+ __builtin_prefetch(src_ptr + 1 * src_stride);
+ __builtin_prefetch(src_ptr + 2 * src_stride);
+ __builtin_prefetch(src_ptr + 3 * src_stride);
+ __builtin_prefetch(src_ptr + 4 * src_stride);
+ __builtin_prefetch(src_ptr + 5 * src_stride);
+ __builtin_prefetch(src_ptr + 6 * src_stride);
+ __builtin_prefetch(src_ptr + 7 * src_stride);
+ load_u8_8x8(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ width = w;
+ s = src_ptr + 7;
+ d = dst_ptr;
+ d_u8_tmp = dst_u8_ptr;
+
+ __builtin_prefetch(dst_ptr + 0 * dst_stride);
+ __builtin_prefetch(dst_ptr + 1 * dst_stride);
+ __builtin_prefetch(dst_ptr + 2 * dst_stride);
+ __builtin_prefetch(dst_ptr + 3 * dst_stride);
+ __builtin_prefetch(dst_ptr + 4 * dst_stride);
+ __builtin_prefetch(dst_ptr + 5 * dst_stride);
+ __builtin_prefetch(dst_ptr + 6 * dst_stride);
+ __builtin_prefetch(dst_ptr + 7 * dst_stride);
+
+ do {
+ d_u8 = d_u8_tmp;
+ d_tmp = d;
+
+ load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
+ s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
+
+ res0 = convolve8_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
+ zero, shift_round_0);
+
+ res0 = vrshlq_s16(res0, horiz_const);
+ res0 = vaddq_s16(res0, round_offset128);
+
+ res1 = convolve8_8x8_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
+ zero, shift_round_0);
+ res1 = vrshlq_s16(res1, horiz_const);
+ res1 = vaddq_s16(res1, round_offset128);
+ res2 = convolve8_8x8_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
+ zero, shift_round_0);
+ res2 = vrshlq_s16(res2, horiz_const);
+ res2 = vaddq_s16(res2, round_offset128);
+ res3 = convolve8_8x8_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
+ zero, shift_round_0);
+ res3 = vrshlq_s16(res3, horiz_const);
+ res3 = vaddq_s16(res3, round_offset128);
+ res4 = convolve8_8x8_s16(s4, s5, s6, s7, s8, s9, s10, s11, x_filter_tmp,
+ zero, shift_round_0);
+ res4 = vrshlq_s16(res4, horiz_const);
+ res4 = vaddq_s16(res4, round_offset128);
+ res5 = convolve8_8x8_s16(s5, s6, s7, s8, s9, s10, s11, s12,
+ x_filter_tmp, zero, shift_round_0);
+ res5 = vrshlq_s16(res5, horiz_const);
+ res5 = vaddq_s16(res5, round_offset128);
+ res6 = convolve8_8x8_s16(s6, s7, s8, s9, s10, s11, s12, s13,
+ x_filter_tmp, zero, shift_round_0);
+ res6 = vrshlq_s16(res6, horiz_const);
+ res6 = vaddq_s16(res6, round_offset128);
+ res7 = convolve8_8x8_s16(s7, s8, s9, s10, s11, s12, s13, s14,
+ x_filter_tmp, zero, shift_round_0);
+ res7 = vrshlq_s16(res7, horiz_const);
+ res7 = vaddq_s16(res7, round_offset128);
+
+ transpose_s16_8x8(&res0, &res1, &res2, &res3, &res4, &res5, &res6,
+ &res7);
+
+ if (conv_params->do_average) {
+ load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
+ d_tmp += (dst_stride << 2);
+
+ compute_avg_8x4(
+ res8, res9, res10, res11, vreinterpretq_u16_s16(res0),
+ vreinterpretq_u16_s16(res1), vreinterpretq_u16_s16(res2),
+ vreinterpretq_u16_s16(res3), fwd_offset, bck_offset,
+ round_offset64, round_bits, use_jnt_comp_avg, &t0, &t1, &t2, &t3);
+
+ store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
+ d_u8 += (dst8_stride << 2);
+
+ load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
+ d_tmp += (dst_stride << 2);
+
+ compute_avg_8x4(
+ res8, res9, res10, res11, vreinterpretq_u16_s16(res4),
+ vreinterpretq_u16_s16(res5), vreinterpretq_u16_s16(res6),
+ vreinterpretq_u16_s16(res7), fwd_offset, bck_offset,
+ round_offset64, round_bits, use_jnt_comp_avg, &t0, &t1, &t2, &t3);
+
+ store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
+ d_u8 += (dst8_stride << 2);
+ } else {
+ store_u16_8x8(
+ d_tmp, dst_stride, vreinterpretq_u16_s16(res0),
+ vreinterpretq_u16_s16(res1), vreinterpretq_u16_s16(res2),
+ vreinterpretq_u16_s16(res3), vreinterpretq_u16_s16(res4),
+ vreinterpretq_u16_s16(res5), vreinterpretq_u16_s16(res6),
+ vreinterpretq_u16_s16(res7));
+ d_tmp += (dst_stride << 3);
+ }
+
+ s0 = s8;
+ s1 = s9;
+ s2 = s10;
+ s3 = s11;
+ s4 = s12;
+ s5 = s13;
+ s6 = s14;
+ s += 8;
+ d += 8;
+ width -= 8;
+ d_u8_tmp += 8;
+ } while (width > 0);
+ src_ptr += 8 * src_stride;
+ dst_ptr += 8 * dst_stride;
+ dst_u8_ptr += 8 * dst8_stride;
+ height -= 8;
+#else
+ int16x8_t temp_0;
+ __builtin_prefetch(src_ptr);
+ t0 = vld1_u8(src_ptr);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7
+
+ width = w;
+ s = src_ptr + 8;
+ d = dst_ptr;
+ d_u8_tmp = dst_u8_ptr;
+
+ __builtin_prefetch(dst_ptr);
+
+ do {
+ d_u8 = d_u8_tmp;
+ d_tmp = d;
+
+ t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ temp_0 = s0;
+ s0 = s7;
+
+ s1 = vextq_s16(temp_0, s7, 1); // a1 a2 a3 a4 a5 a6 a7 a8
+ s2 = vextq_s16(temp_0, s7, 2); // a2 a3 a4 a5 a6 a7 a8 a9
+ s3 = vextq_s16(temp_0, s7, 3); // a3 a4 a5 a6 a7 a8 a9 a10
+ s4 = vextq_s16(temp_0, s7, 4); // a4 a5 a6 a7 a8 a9 a10 a11
+ s5 = vextq_s16(temp_0, s7, 5); // a5 a6 a7 a8 a9 a10 a11 a12
+ s6 = vextq_s16(temp_0, s7, 6); // a6 a7 a8 a9 a10 a11 a12 a13
+ s7 = vextq_s16(temp_0, s7, 7); // a7 a8 a9 a10 a11 a12 a13 a14
+
+ res0 = convolve8_8x8_s16(temp_0, s1, s2, s3, s4, s5, s6, s7,
+ x_filter_tmp, zero, shift_round_0);
+
+ res0 = vrshlq_s16(res0, horiz_const);
+ res0 = vaddq_s16(res0, round_offset128);
+
+ if (conv_params->do_average) {
+ res8 = vld1q_u16(d_tmp);
+ d_tmp += (dst_stride);
+
+ compute_avg_8x1(res8, vreinterpretq_u16_s16(res0), fwd_offset,
+ bck_offset, round_offset64, round_bits,
+ use_jnt_comp_avg, &t0);
+
+ vst1_u8(d_u8, t0);
+ d_u8 += (dst8_stride);
+ } else {
+ vst1q_u16(d_tmp, vreinterpretq_u16_s16(res0));
+ d_tmp += (dst_stride);
+ }
+
+ s += 8;
+ d += 8;
+ width -= 8;
+ d_u8_tmp += 8;
+ } while (width > 0);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ dst_u8_ptr += dst8_stride;
+ height--;
+#endif
+ } while (height > 0);
+ }
+}
+
+void av1_jnt_convolve_y_neon(const uint8_t *src, int src_stride, uint8_t *dst8,
+ int dst8_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_q4, const int subpel_y_q4,
+ ConvolveParams *conv_params) {
+ assert(!(w % 4));
+ assert(!(h % 4));
+
+ CONV_BUF_TYPE *dst = conv_params->dst;
+ const int dst_stride = conv_params->dst_stride;
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+ const int bits = FILTER_BITS - conv_params->round_0;
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int round_offset = (1 << (offset_bits - conv_params->round_1)) +
+ (1 << (offset_bits - conv_params->round_1 - 1));
+ const int round_bits =
+ 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
+ const uint16_t fwd_offset = conv_params->fwd_offset;
+ const uint16_t bck_offset = conv_params->bck_offset;
+ const int use_jnt_comp_avg = conv_params->use_jnt_comp_avg;
+ const int shift_value = (conv_params->round_1 - 1 - bits);
+
+ (void)filter_params_x;
+ (void)subpel_x_q4;
+
+ // vertical filter
+ const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_q4 & SUBPEL_MASK);
+
+ const uint8_t *src_ptr = src - (vert_offset * src_stride);
+
+ int16_t y_filter_tmp[8];
+ int16x8_t filter_y_coef = vld1q_s16(y_filter);
+
+ // filter coeffs are even, so downshifting by 1 to reduce intermediate
+ // precision requirements.
+ filter_y_coef = vshrq_n_s16(filter_y_coef, 1);
+ vst1q_s16(&y_filter_tmp[0], filter_y_coef);
+
+ const uint8_t *s;
+ uint8_t *d_u8;
+ uint8_t *dst_u8_ptr;
+ CONV_BUF_TYPE *d, *dst_ptr;
+ int width, height;
+
+ s = src_ptr;
+ dst_ptr = dst;
+ dst_u8_ptr = dst8;
+ width = w;
+ height = h;
+
+ // used to get rid of multiplication = (vertical filter output sum) *
+ // (1<<bits).
+ assert((conv_params->round_1 - 2) >= bits);
+
+ if ((w == 4) || (h == 4)) {
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, d0;
+ uint16x4_t res4;
+ uint32x2_t tu0 = vdup_n_u32(0), tu1 = vdup_n_u32(0), tu2 = vdup_n_u32(0),
+ tu3 = vdup_n_u32(0);
+ int16x8_t u0, u1, u2, u3;
+ uint8x8_t t0;
+
+#if defined(__aarch64__)
+ int16x4_t s8, s9, s10, d1, d2, d3;
+ uint16x4_t res5, res6, res7;
+ uint8x8_t t1;
+#endif
+ const int16x4_t round_offset64 = vdup_n_s16(round_offset);
+ const int16x4_t shift_vec = vdup_n_s16(-shift_value);
+ const int16x4_t zero = vdup_n_s16(0);
+
+ do {
+ s = src_ptr;
+ d = dst_ptr;
+ d_u8 = dst_u8_ptr;
+ height = h;
+ __builtin_prefetch(s + 0 * src_stride);
+ __builtin_prefetch(s + 1 * src_stride);
+ __builtin_prefetch(s + 2 * src_stride);
+ __builtin_prefetch(s + 3 * src_stride);
+
+ load_unaligned_u8_4x8(s, src_stride, &tu0, &tu1, &tu2, &tu3);
+
+ u0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0)));
+ u1 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu1)));
+ u2 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu2)));
+ u3 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu3)));
+
+ s0 = vget_low_s16(u0);
+ s1 = vget_high_s16(u0);
+ s2 = vget_low_s16(u1);
+ s3 = vget_high_s16(u1);
+ s4 = vget_low_s16(u2);
+ s5 = vget_high_s16(u2);
+ s6 = vget_low_s16(u3);
+
+ __builtin_prefetch(d + 0 * dst_stride);
+ __builtin_prefetch(d + 1 * dst_stride);
+ __builtin_prefetch(d + 2 * dst_stride);
+ __builtin_prefetch(d + 3 * dst_stride);
+
+ s += (7 * src_stride);
+ do {
+#if defined(__aarch64__)
+ load_unaligned_u8_4x4(s, src_stride, &tu0, &tu1);
+
+ u0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0)));
+ u1 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu1)));
+
+ s7 = vget_low_s16(u0);
+ s8 = vget_high_s16(u0);
+ s9 = vget_low_s16(u1);
+ s10 = vget_high_s16(u1);
+
+ d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter_tmp,
+ zero, shift_vec);
+ d0 = vadd_s16(d0, round_offset64);
+ d1 = convolve8_4x4_s16(s1, s2, s3, s4, s5, s6, s7, s8, y_filter_tmp,
+ zero, shift_vec);
+ d1 = vadd_s16(d1, round_offset64);
+ d2 = convolve8_4x4_s16(s2, s3, s4, s5, s6, s7, s8, s9, y_filter_tmp,
+ zero, shift_vec);
+ d2 = vadd_s16(d2, round_offset64);
+ d3 = convolve8_4x4_s16(s3, s4, s5, s6, s7, s8, s9, s10, y_filter_tmp,
+ zero, shift_vec);
+ d3 = vadd_s16(d3, round_offset64);
+
+ if (conv_params->do_average) {
+ __builtin_prefetch(d + 0 * dst_stride);
+ __builtin_prefetch(d + 1 * dst_stride);
+ __builtin_prefetch(d + 2 * dst_stride);
+ __builtin_prefetch(d + 3 * dst_stride);
+
+ __builtin_prefetch(d_u8 + 0 * dst8_stride);
+ __builtin_prefetch(d_u8 + 1 * dst8_stride);
+ __builtin_prefetch(d_u8 + 2 * dst8_stride);
+ __builtin_prefetch(d_u8 + 3 * dst8_stride);
+
+ load_u16_4x4(d, dst_stride, &res4, &res5, &res6, &res7);
+ d += (dst_stride << 2);
+
+ compute_avg_4x4(res4, res5, res6, res7, vreinterpret_u16_s16(d0),
+ vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
+ vreinterpret_u16_s16(d3), fwd_offset, bck_offset,
+ round_offset64, round_bits, use_jnt_comp_avg, &t0,
+ &t1);
+
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0);
+ d_u8 += dst8_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 1);
+ d_u8 += dst8_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 0);
+ d_u8 += dst8_stride;
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 1);
+ d_u8 += dst8_stride;
+ } else {
+ store_u16_4x4(d, dst_stride, vreinterpret_u16_s16(d0),
+ vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
+ vreinterpret_u16_s16(d3));
+ d += (dst_stride << 2);
+ }
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+
+ s += (src_stride << 2);
+ height -= 4;
+#else
+ load_unaligned_u8_4x1(s, src_stride, &tu0);
+ u0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0)));
+ s7 = vget_low_s16(u0);
+
+ d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter_tmp,
+ zero, shift_vec);
+
+ d0 = vadd_s16(d0, round_offset64);
+
+ if (conv_params->do_average) {
+ __builtin_prefetch(d);
+
+ res4 = vld1_u16(d);
+ d += (dst_stride);
+
+ compute_avg_4x1(res4, vreinterpret_u16_s16(d0), fwd_offset,
+ bck_offset, round_offset64, round_bits,
+ use_jnt_comp_avg, &t0);
+
+ vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0);
+ d_u8 += dst8_stride;
+ } else {
+ vst1_u16(d, vreinterpret_u16_s16(d0));
+ d += (dst_stride);
+ }
+
+ s0 = s1;
+ s1 = s2;
+ s2 = s3;
+ s3 = s4;
+ s4 = s5;
+ s5 = s6;
+ s6 = s7;
+
+ s += (src_stride);
+ height--;
+#endif
+ } while (height > 0);
+ src_ptr += 4;
+ dst_ptr += 4;
+ dst_u8_ptr += 4;
+ width -= 4;
+ } while (width > 0);
+ } else {
+ CONV_BUF_TYPE *d_tmp;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ int16x8_t res0;
+ uint16x8_t res8;
+ uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7;
+ const int16x8_t round_offset128 = vdupq_n_s16(round_offset);
+ const int16x8_t shift_vec = vdupq_n_s16(-shift_value);
+ const int16x4_t round_offset64 = vdup_n_s16(round_offset);
+ const int16x8_t zero = vdupq_n_s16(0);
+#if defined(__aarch64__)
+ int16x8_t s8, s9, s10, s11, s12, s13, s14;
+ int16x8_t res1, res2, res3, res4, res5, res6, res7;
+ uint16x8_t res10, res11, res9;
+#endif
+ dst_ptr = dst;
+ dst_u8_ptr = dst8;
+ do {
+ __builtin_prefetch(src_ptr + 0 * src_stride);
+ __builtin_prefetch(src_ptr + 1 * src_stride);
+ __builtin_prefetch(src_ptr + 2 * src_stride);
+ __builtin_prefetch(src_ptr + 3 * src_stride);
+ __builtin_prefetch(src_ptr + 4 * src_stride);
+ __builtin_prefetch(src_ptr + 5 * src_stride);
+ __builtin_prefetch(src_ptr + 6 * src_stride);
+ __builtin_prefetch(src_ptr + 7 * src_stride);
+ load_u8_8x8(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ height = h;
+ s = src_ptr + (7 * src_stride);
+ d_tmp = dst_ptr;
+ d_u8 = dst_u8_ptr;
+
+ do {
+#if defined(__aarch64__)
+ load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
+ s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
+
+ __builtin_prefetch(dst_ptr + 0 * dst_stride);
+ __builtin_prefetch(dst_ptr + 1 * dst_stride);
+ __builtin_prefetch(dst_ptr + 2 * dst_stride);
+ __builtin_prefetch(dst_ptr + 3 * dst_stride);
+
+ res0 = convolve8_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter_tmp,
+ zero, shift_vec);
+ res0 = vaddq_s16(res0, round_offset128);
+ res1 = convolve8_8x8_s16(s1, s2, s3, s4, s5, s6, s7, s8, y_filter_tmp,
+ zero, shift_vec);
+ res1 = vaddq_s16(res1, round_offset128);
+ res2 = convolve8_8x8_s16(s2, s3, s4, s5, s6, s7, s8, s9, y_filter_tmp,
+ zero, shift_vec);
+ res2 = vaddq_s16(res2, round_offset128);
+ res3 = convolve8_8x8_s16(s3, s4, s5, s6, s7, s8, s9, s10, y_filter_tmp,
+ zero, shift_vec);
+ res3 = vaddq_s16(res3, round_offset128);
+ res4 = convolve8_8x8_s16(s4, s5, s6, s7, s8, s9, s10, s11, y_filter_tmp,
+ zero, shift_vec);
+ res4 = vaddq_s16(res4, round_offset128);
+ res5 = convolve8_8x8_s16(s5, s6, s7, s8, s9, s10, s11, s12,
+ y_filter_tmp, zero, shift_vec);
+ res5 = vaddq_s16(res5, round_offset128);
+ res6 = convolve8_8x8_s16(s6, s7, s8, s9, s10, s11, s12, s13,
+ y_filter_tmp, zero, shift_vec);
+ res6 = vaddq_s16(res6, round_offset128);
+ res7 = convolve8_8x8_s16(s7, s8, s9, s10, s11, s12, s13, s14,
+ y_filter_tmp, zero, shift_vec);
+ res7 = vaddq_s16(res7, round_offset128);
+
+ if (conv_params->do_average) {
+ __builtin_prefetch(d_tmp + 0 * dst8_stride);
+ __builtin_prefetch(d_tmp + 1 * dst8_stride);
+ __builtin_prefetch(d_tmp + 2 * dst8_stride);
+ __builtin_prefetch(d_tmp + 3 * dst8_stride);
+
+ load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
+ d_tmp += (dst_stride << 2);
+
+ compute_avg_8x4(
+ res8, res9, res10, res11, vreinterpretq_u16_s16(res0),
+ vreinterpretq_u16_s16(res1), vreinterpretq_u16_s16(res2),
+ vreinterpretq_u16_s16(res3), fwd_offset, bck_offset,
+ round_offset64, round_bits, use_jnt_comp_avg, &t0, &t1, &t2, &t3);
+
+ store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
+ d_u8 += (dst8_stride << 2);
+
+ load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
+ d_tmp += (dst_stride << 2);
+
+ compute_avg_8x4(
+ res8, res9, res10, res11, vreinterpretq_u16_s16(res4),
+ vreinterpretq_u16_s16(res5), vreinterpretq_u16_s16(res6),
+ vreinterpretq_u16_s16(res7), fwd_offset, bck_offset,
+ round_offset64, round_bits, use_jnt_comp_avg, &t0, &t1, &t2, &t3);
+
+ store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
+ d_u8 += (dst8_stride << 2);
+ } else {
+ store_u16_8x8(
+ d_tmp, dst_stride, vreinterpretq_u16_s16(res0),
+ vreinterpretq_u16_s16(res1), vreinterpretq_u16_s16(res2),
+ vreinterpretq_u16_s16(res3), vreinterpretq_u16_s16(res4),
+ vreinterpretq_u16_s16(res5), vreinterpretq_u16_s16(res6),
+ vreinterpretq_u16_s16(res7));
+ d_tmp += (dst_stride << 3);
+ }
+
+ s0 = s8;
+ s1 = s9;
+ s2 = s10;
+ s3 = s11;
+ s4 = s12;
+ s5 = s13;
+ s6 = s14;
+ s += (8 * src_stride);
+ height -= 8;
+#else
+ s7 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+
+ __builtin_prefetch(dst_ptr);
+
+ res0 = convolve8_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter_tmp,
+ zero, shift_vec);
+ res0 = vaddq_s16(res0, round_offset128);
+
+ s0 = s1;
+ s1 = s2;
+ s2 = s3;
+ s3 = s4;
+ s4 = s5;
+ s5 = s6;
+ s6 = s7;
+
+ if (conv_params->do_average) {
+ __builtin_prefetch(d_tmp);
+
+ res8 = vld1q_u16(d_tmp);
+ d_tmp += (dst_stride);
+
+ compute_avg_8x1(res8, vreinterpretq_u16_s16(res0), fwd_offset,
+ bck_offset, round_offset64, round_bits,
+ use_jnt_comp_avg, &t0);
+
+ vst1_u8(d_u8, t0);
+ d_u8 += (dst8_stride);
+ } else {
+ vst1q_u16(d_tmp, vreinterpretq_u16_s16(res0));
+ d_tmp += dst_stride;
+ }
+
+ s += (src_stride);
+ height--;
+#endif
+ } while (height > 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ dst_u8_ptr += 8;
+ width -= 8;
+ } while (width > 0);
+ }
+}
diff --git a/third_party/aom/av1/common/arm/mem_neon.h b/third_party/aom/av1/common/arm/mem_neon.h
new file mode 100644
index 0000000000..c4ae2e7849
--- /dev/null
+++ b/third_party/aom/av1/common/arm/mem_neon.h
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AOM_AV1_COMMON_ARM_MEM_NEON_H_
+#define AOM_AV1_COMMON_ARM_MEM_NEON_H_
+
+#include <arm_neon.h>
+#include <string.h>
+
+static INLINE void store_row2_u8_8x8(uint8_t *s, int p, const uint8x8_t s0,
+ const uint8x8_t s1) {
+ vst1_u8(s, s0);
+ s += p;
+ vst1_u8(s, s1);
+ s += p;
+}
+
+/* These intrinsics require immediate values, so we must use #defines
+ to enforce that. */
+#define load_u8_4x1(s, s0, lane) \
+ do { \
+ *(s0) = vreinterpret_u8_u32( \
+ vld1_lane_u32((uint32_t *)(s), vreinterpret_u32_u8(*(s0)), lane)); \
+ } while (0)
+
+static INLINE void load_u8_8x8(const uint8_t *s, ptrdiff_t p,
+ uint8x8_t *const s0, uint8x8_t *const s1,
+ uint8x8_t *const s2, uint8x8_t *const s3,
+ uint8x8_t *const s4, uint8x8_t *const s5,
+ uint8x8_t *const s6, uint8x8_t *const s7) {
+ *s0 = vld1_u8(s);
+ s += p;
+ *s1 = vld1_u8(s);
+ s += p;
+ *s2 = vld1_u8(s);
+ s += p;
+ *s3 = vld1_u8(s);
+ s += p;
+ *s4 = vld1_u8(s);
+ s += p;
+ *s5 = vld1_u8(s);
+ s += p;
+ *s6 = vld1_u8(s);
+ s += p;
+ *s7 = vld1_u8(s);
+}
+
+static INLINE void load_u8_8x16(const uint8_t *s, ptrdiff_t p,
+ uint8x16_t *const s0, uint8x16_t *const s1,
+ uint8x16_t *const s2, uint8x16_t *const s3) {
+ *s0 = vld1q_u8(s);
+ s += p;
+ *s1 = vld1q_u8(s);
+ s += p;
+ *s2 = vld1q_u8(s);
+ s += p;
+ *s3 = vld1q_u8(s);
+}
+
+static INLINE void load_u8_8x4(const uint8_t *s, const ptrdiff_t p,
+ uint8x8_t *const s0, uint8x8_t *const s1,
+ uint8x8_t *const s2, uint8x8_t *const s3) {
+ *s0 = vld1_u8(s);
+ s += p;
+ *s1 = vld1_u8(s);
+ s += p;
+ *s2 = vld1_u8(s);
+ s += p;
+ *s3 = vld1_u8(s);
+}
+
+static INLINE void load_u16_4x4(const uint16_t *s, const ptrdiff_t p,
+ uint16x4_t *const s0, uint16x4_t *const s1,
+ uint16x4_t *const s2, uint16x4_t *const s3) {
+ *s0 = vld1_u16(s);
+ s += p;
+ *s1 = vld1_u16(s);
+ s += p;
+ *s2 = vld1_u16(s);
+ s += p;
+ *s3 = vld1_u16(s);
+ s += p;
+}
+
+static INLINE void load_u16_8x4(const uint16_t *s, const ptrdiff_t p,
+ uint16x8_t *const s0, uint16x8_t *const s1,
+ uint16x8_t *const s2, uint16x8_t *const s3) {
+ *s0 = vld1q_u16(s);
+ s += p;
+ *s1 = vld1q_u16(s);
+ s += p;
+ *s2 = vld1q_u16(s);
+ s += p;
+ *s3 = vld1q_u16(s);
+ s += p;
+}
+
+static INLINE void load_s16_4x8(const int16_t *s, ptrdiff_t p,
+ int16x4_t *const s0, int16x4_t *const s1,
+ int16x4_t *const s2, int16x4_t *const s3,
+ int16x4_t *const s4, int16x4_t *const s5,
+ int16x4_t *const s6, int16x4_t *const s7) {
+ *s0 = vld1_s16(s);
+ s += p;
+ *s1 = vld1_s16(s);
+ s += p;
+ *s2 = vld1_s16(s);
+ s += p;
+ *s3 = vld1_s16(s);
+ s += p;
+ *s4 = vld1_s16(s);
+ s += p;
+ *s5 = vld1_s16(s);
+ s += p;
+ *s6 = vld1_s16(s);
+ s += p;
+ *s7 = vld1_s16(s);
+}
+
+static INLINE void load_s16_4x4(const int16_t *s, ptrdiff_t p,
+ int16x4_t *const s0, int16x4_t *const s1,
+ int16x4_t *const s2, int16x4_t *const s3) {
+ *s0 = vld1_s16(s);
+ s += p;
+ *s1 = vld1_s16(s);
+ s += p;
+ *s2 = vld1_s16(s);
+ s += p;
+ *s3 = vld1_s16(s);
+}
+
+/* These intrinsics require immediate values, so we must use #defines
+ to enforce that. */
+#define store_u8_4x1(s, s0, lane) \
+ do { \
+ vst1_lane_u32((uint32_t *)(s), vreinterpret_u32_u8(s0), lane); \
+ } while (0)
+
+static INLINE void store_u8_8x8(uint8_t *s, ptrdiff_t p, const uint8x8_t s0,
+ const uint8x8_t s1, const uint8x8_t s2,
+ const uint8x8_t s3, const uint8x8_t s4,
+ const uint8x8_t s5, const uint8x8_t s6,
+ const uint8x8_t s7) {
+ vst1_u8(s, s0);
+ s += p;
+ vst1_u8(s, s1);
+ s += p;
+ vst1_u8(s, s2);
+ s += p;
+ vst1_u8(s, s3);
+ s += p;
+ vst1_u8(s, s4);
+ s += p;
+ vst1_u8(s, s5);
+ s += p;
+ vst1_u8(s, s6);
+ s += p;
+ vst1_u8(s, s7);
+}
+
+static INLINE void store_u8_8x4(uint8_t *s, ptrdiff_t p, const uint8x8_t s0,
+ const uint8x8_t s1, const uint8x8_t s2,
+ const uint8x8_t s3) {
+ vst1_u8(s, s0);
+ s += p;
+ vst1_u8(s, s1);
+ s += p;
+ vst1_u8(s, s2);
+ s += p;
+ vst1_u8(s, s3);
+}
+
+static INLINE void store_u8_8x16(uint8_t *s, ptrdiff_t p, const uint8x16_t s0,
+ const uint8x16_t s1, const uint8x16_t s2,
+ const uint8x16_t s3) {
+ vst1q_u8(s, s0);
+ s += p;
+ vst1q_u8(s, s1);
+ s += p;
+ vst1q_u8(s, s2);
+ s += p;
+ vst1q_u8(s, s3);
+}
+
+static INLINE void store_u16_8x8(uint16_t *s, ptrdiff_t dst_stride,
+ const uint16x8_t s0, const uint16x8_t s1,
+ const uint16x8_t s2, const uint16x8_t s3,
+ const uint16x8_t s4, const uint16x8_t s5,
+ const uint16x8_t s6, const uint16x8_t s7) {
+ vst1q_u16(s, s0);
+ s += dst_stride;
+ vst1q_u16(s, s1);
+ s += dst_stride;
+ vst1q_u16(s, s2);
+ s += dst_stride;
+ vst1q_u16(s, s3);
+ s += dst_stride;
+ vst1q_u16(s, s4);
+ s += dst_stride;
+ vst1q_u16(s, s5);
+ s += dst_stride;
+ vst1q_u16(s, s6);
+ s += dst_stride;
+ vst1q_u16(s, s7);
+}
+
+static INLINE void store_u16_4x4(uint16_t *s, ptrdiff_t dst_stride,
+ const uint16x4_t s0, const uint16x4_t s1,
+ const uint16x4_t s2, const uint16x4_t s3) {
+ vst1_u16(s, s0);
+ s += dst_stride;
+ vst1_u16(s, s1);
+ s += dst_stride;
+ vst1_u16(s, s2);
+ s += dst_stride;
+ vst1_u16(s, s3);
+}
+
+static INLINE void store_u16_8x4(uint16_t *s, ptrdiff_t dst_stride,
+ const uint16x8_t s0, const uint16x8_t s1,
+ const uint16x8_t s2, const uint16x8_t s3) {
+ vst1q_u16(s, s0);
+ s += dst_stride;
+ vst1q_u16(s, s1);
+ s += dst_stride;
+ vst1q_u16(s, s2);
+ s += dst_stride;
+ vst1q_u16(s, s3);
+}
+
+static INLINE void store_s16_8x8(int16_t *s, ptrdiff_t dst_stride,
+ const int16x8_t s0, const int16x8_t s1,
+ const int16x8_t s2, const int16x8_t s3,
+ const int16x8_t s4, const int16x8_t s5,
+ const int16x8_t s6, const int16x8_t s7) {
+ vst1q_s16(s, s0);
+ s += dst_stride;
+ vst1q_s16(s, s1);
+ s += dst_stride;
+ vst1q_s16(s, s2);
+ s += dst_stride;
+ vst1q_s16(s, s3);
+ s += dst_stride;
+ vst1q_s16(s, s4);
+ s += dst_stride;
+ vst1q_s16(s, s5);
+ s += dst_stride;
+ vst1q_s16(s, s6);
+ s += dst_stride;
+ vst1q_s16(s, s7);
+}
+
+static INLINE void store_s16_4x4(int16_t *s, ptrdiff_t dst_stride,
+ const int16x4_t s0, const int16x4_t s1,
+ const int16x4_t s2, const int16x4_t s3) {
+ vst1_s16(s, s0);
+ s += dst_stride;
+ vst1_s16(s, s1);
+ s += dst_stride;
+ vst1_s16(s, s2);
+ s += dst_stride;
+ vst1_s16(s, s3);
+}
+
+static INLINE void store_s16_8x4(int16_t *s, ptrdiff_t dst_stride,
+ const int16x8_t s0, const int16x8_t s1,
+ const int16x8_t s2, const int16x8_t s3) {
+ vst1q_s16(s, s0);
+ s += dst_stride;
+ vst1q_s16(s, s1);
+ s += dst_stride;
+ vst1q_s16(s, s2);
+ s += dst_stride;
+ vst1q_s16(s, s3);
+}
+
+static INLINE void load_s16_8x8(const int16_t *s, ptrdiff_t p,
+ int16x8_t *const s0, int16x8_t *const s1,
+ int16x8_t *const s2, int16x8_t *const s3,
+ int16x8_t *const s4, int16x8_t *const s5,
+ int16x8_t *const s6, int16x8_t *const s7) {
+ *s0 = vld1q_s16(s);
+ s += p;
+ *s1 = vld1q_s16(s);
+ s += p;
+ *s2 = vld1q_s16(s);
+ s += p;
+ *s3 = vld1q_s16(s);
+ s += p;
+ *s4 = vld1q_s16(s);
+ s += p;
+ *s5 = vld1q_s16(s);
+ s += p;
+ *s6 = vld1q_s16(s);
+ s += p;
+ *s7 = vld1q_s16(s);
+}
+
+static INLINE void load_s16_8x4(const int16_t *s, ptrdiff_t p,
+ int16x8_t *const s0, int16x8_t *const s1,
+ int16x8_t *const s2, int16x8_t *const s3) {
+ *s0 = vld1q_s16(s);
+ s += p;
+ *s1 = vld1q_s16(s);
+ s += p;
+ *s2 = vld1q_s16(s);
+ s += p;
+ *s3 = vld1q_s16(s);
+}
+
+static INLINE void load_unaligned_u8_4x8(const uint8_t *buf, int stride,
+ uint32x2_t *tu0, uint32x2_t *tu1,
+ uint32x2_t *tu2, uint32x2_t *tu3) {
+ uint32_t a;
+
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu0 = vset_lane_u32(a, *tu0, 0);
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu0 = vset_lane_u32(a, *tu0, 1);
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu1 = vset_lane_u32(a, *tu1, 0);
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu1 = vset_lane_u32(a, *tu1, 1);
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu2 = vset_lane_u32(a, *tu2, 0);
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu2 = vset_lane_u32(a, *tu2, 1);
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu3 = vset_lane_u32(a, *tu3, 0);
+ memcpy(&a, buf, 4);
+ *tu3 = vset_lane_u32(a, *tu3, 1);
+}
+
+static INLINE void load_unaligned_u8_4x4(const uint8_t *buf, int stride,
+ uint32x2_t *tu0, uint32x2_t *tu1) {
+ uint32_t a;
+
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu0 = vset_lane_u32(a, *tu0, 0);
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu0 = vset_lane_u32(a, *tu0, 1);
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu1 = vset_lane_u32(a, *tu1, 0);
+ memcpy(&a, buf, 4);
+ *tu1 = vset_lane_u32(a, *tu1, 1);
+}
+
+static INLINE void load_unaligned_u8_4x1(const uint8_t *buf, int stride,
+ uint32x2_t *tu0) {
+ uint32_t a;
+
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu0 = vset_lane_u32(a, *tu0, 0);
+}
+
+static INLINE void load_unaligned_u8_4x2(const uint8_t *buf, int stride,
+ uint32x2_t *tu0) {
+ uint32_t a;
+
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu0 = vset_lane_u32(a, *tu0, 0);
+ memcpy(&a, buf, 4);
+ buf += stride;
+ *tu0 = vset_lane_u32(a, *tu0, 1);
+}
+
+static INLINE void load_unaligned_u8_2x2(const uint8_t *buf, int stride,
+ uint16x4_t *tu0) {
+ uint16_t a;
+
+ memcpy(&a, buf, 2);
+ buf += stride;
+ *tu0 = vset_lane_u16(a, *tu0, 0);
+ memcpy(&a, buf, 2);
+ buf += stride;
+ *tu0 = vset_lane_u16(a, *tu0, 1);
+}
+
+static INLINE void load_u8_16x8(const uint8_t *s, ptrdiff_t p,
+ uint8x16_t *const s0, uint8x16_t *const s1,
+ uint8x16_t *const s2, uint8x16_t *const s3,
+ uint8x16_t *const s4, uint8x16_t *const s5,
+ uint8x16_t *const s6, uint8x16_t *const s7) {
+ *s0 = vld1q_u8(s);
+ s += p;
+ *s1 = vld1q_u8(s);
+ s += p;
+ *s2 = vld1q_u8(s);
+ s += p;
+ *s3 = vld1q_u8(s);
+ s += p;
+ *s4 = vld1q_u8(s);
+ s += p;
+ *s5 = vld1q_u8(s);
+ s += p;
+ *s6 = vld1q_u8(s);
+ s += p;
+ *s7 = vld1q_u8(s);
+}
+
+static INLINE void load_u8_16x4(const uint8_t *s, ptrdiff_t p,
+ uint8x16_t *const s0, uint8x16_t *const s1,
+ uint8x16_t *const s2, uint8x16_t *const s3) {
+ *s0 = vld1q_u8(s);
+ s += p;
+ *s1 = vld1q_u8(s);
+ s += p;
+ *s2 = vld1q_u8(s);
+ s += p;
+ *s3 = vld1q_u8(s);
+}
+
+static INLINE void load_unaligned_u16_4x4(const uint16_t *buf, uint32_t stride,
+ uint64x2_t *tu0, uint64x2_t *tu1) {
+ uint64_t a;
+
+ memcpy(&a, buf, 8);
+ buf += stride;
+ *tu0 = vsetq_lane_u64(a, *tu0, 0);
+ memcpy(&a, buf, 8);
+ buf += stride;
+ *tu0 = vsetq_lane_u64(a, *tu0, 1);
+ memcpy(&a, buf, 8);
+ buf += stride;
+ *tu1 = vsetq_lane_u64(a, *tu1, 0);
+ memcpy(&a, buf, 8);
+ *tu1 = vsetq_lane_u64(a, *tu1, 1);
+}
+
+static INLINE void load_s32_4x4(int32_t *s, int32_t p, int32x4_t *s1,
+ int32x4_t *s2, int32x4_t *s3, int32x4_t *s4) {
+ *s1 = vld1q_s32(s);
+ s += p;
+ *s2 = vld1q_s32(s);
+ s += p;
+ *s3 = vld1q_s32(s);
+ s += p;
+ *s4 = vld1q_s32(s);
+}
+
+static INLINE void store_s32_4x4(int32_t *s, int32_t p, int32x4_t s1,
+ int32x4_t s2, int32x4_t s3, int32x4_t s4) {
+ vst1q_s32(s, s1);
+ s += p;
+ vst1q_s32(s, s2);
+ s += p;
+ vst1q_s32(s, s3);
+ s += p;
+ vst1q_s32(s, s4);
+}
+
+static INLINE void load_u32_4x4(uint32_t *s, int32_t p, uint32x4_t *s1,
+ uint32x4_t *s2, uint32x4_t *s3,
+ uint32x4_t *s4) {
+ *s1 = vld1q_u32(s);
+ s += p;
+ *s2 = vld1q_u32(s);
+ s += p;
+ *s3 = vld1q_u32(s);
+ s += p;
+ *s4 = vld1q_u32(s);
+}
+
+static INLINE void store_u32_4x4(uint32_t *s, int32_t p, uint32x4_t s1,
+ uint32x4_t s2, uint32x4_t s3, uint32x4_t s4) {
+ vst1q_u32(s, s1);
+ s += p;
+ vst1q_u32(s, s2);
+ s += p;
+ vst1q_u32(s, s3);
+ s += p;
+ vst1q_u32(s, s4);
+}
+
+#endif // AOM_AV1_COMMON_ARM_MEM_NEON_H_
diff --git a/third_party/aom/av1/common/arm/reconinter_neon.c b/third_party/aom/av1/common/arm/reconinter_neon.c
new file mode 100644
index 0000000000..44e064195e
--- /dev/null
+++ b/third_party/aom/av1/common/arm/reconinter_neon.c
@@ -0,0 +1,86 @@
+/*
+ *
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/blend.h"
+#include "aom_ports/mem.h"
+#include "av1/common/arm/mem_neon.h"
+#include "av1/common/blockd.h"
+#include "config/av1_rtcd.h"
+
+void av1_build_compound_diffwtd_mask_d16_neon(
+ uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const CONV_BUF_TYPE *src0,
+ int src0_stride, const CONV_BUF_TYPE *src1, int src1_stride, int h, int w,
+ ConvolveParams *conv_params, int bd) {
+ assert(h >= 4);
+ assert(w >= 4);
+ assert((mask_type == DIFFWTD_38_INV) || (mask_type == DIFFWTD_38));
+ const int round =
+ 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1 + (bd - 8);
+ uint16x8_t diff_q, tmp0, tmp1;
+ uint8x8_t diff_d, diff_select;
+ const CONV_BUF_TYPE *src0_1, *src1_1;
+ const int16x8_t dup_round = vdupq_n_s16((int16_t)(-round));
+ const uint8x8_t dup_38 = vdup_n_u8(38);
+ const uint8x8_t dup_64 = vdup_n_u8(AOM_BLEND_A64_MAX_ALPHA);
+ if (mask_type == DIFFWTD_38) {
+ diff_select = vdup_n_u8(255);
+ } else {
+ diff_select = vdup_n_u8(0);
+ }
+ if (w >= 8) {
+ for (int i = 0; i < h; ++i) {
+ src0_1 = src0;
+ src1_1 = src1;
+ for (int j = 0; j < w; j += 8) {
+ __builtin_prefetch(src0_1);
+ __builtin_prefetch(src1_1);
+ diff_q = vabdq_u16(vld1q_u16(src0_1), vld1q_u16(src1_1));
+ diff_q = vrshlq_u16(diff_q, dup_round);
+ diff_d = vshrn_n_u16(diff_q, DIFF_FACTOR_LOG2);
+ diff_d = vmin_u8(vadd_u8(diff_d, dup_38), dup_64);
+ diff_d = vbsl_u8(diff_select, diff_d, vsub_u8(dup_64, diff_d));
+ vst1_u8(mask, diff_d);
+ src0_1 += 8;
+ src1_1 += 8;
+ mask += 8;
+ }
+ src0 += src0_stride;
+ src1 += src1_stride;
+ }
+ } else if (w == 4) {
+ for (int i = 0; i < h; i += 2) {
+ src0_1 = src0;
+ src1_1 = src1;
+ __builtin_prefetch(src0_1 + 0 * src0_stride);
+ __builtin_prefetch(src0_1 + 1 * src0_stride);
+ __builtin_prefetch(src1_1 + 0 * src1_stride);
+ __builtin_prefetch(src1_1 + 1 * src1_stride);
+ tmp0 = vcombine_u16(vld1_u16(src0_1 + (0 * src0_stride)),
+ vld1_u16(src0_1 + (1 * src0_stride)));
+ tmp1 = vcombine_u16(vld1_u16(src1_1 + (0 * src1_stride)),
+ vld1_u16(src1_1 + (1 * src1_stride)));
+ diff_q = vabdq_u16(tmp0, tmp1);
+ diff_q = vrshlq_u16(diff_q, dup_round);
+ diff_d = vshrn_n_u16(diff_q, DIFF_FACTOR_LOG2);
+ diff_d = vmin_u8(vadd_u8(diff_d, dup_38), dup_64);
+ diff_d = vbsl_u8(diff_select, diff_d, vsub_u8(dup_64, diff_d));
+ vst1_u8(mask, diff_d);
+ src0 += src0_stride * 2;
+ src1 += src1_stride * 2;
+ mask += w * 2;
+ }
+ }
+}
diff --git a/third_party/aom/av1/common/arm/selfguided_neon.c b/third_party/aom/av1/common/arm/selfguided_neon.c
new file mode 100644
index 0000000000..b3a37c4cb8
--- /dev/null
+++ b/third_party/aom/av1/common/arm/selfguided_neon.c
@@ -0,0 +1,1508 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/txfm_common.h"
+#include "aom_mem/aom_mem.h"
+#include "aom_ports/mem.h"
+#include "av1/common/common.h"
+#include "av1/common/onyxc_int.h"
+#include "av1/common/resize.h"
+#include "av1/common/restoration.h"
+#include "av1/common/arm/mem_neon.h"
+#include "av1/common/arm/transpose_neon.h"
+
+// Constants used for right shift in final_filter calculation.
+#define NB_EVEN 5
+#define NB_ODD 4
+
+static INLINE void calc_ab_fast_internal_common(
+ uint32x4_t s0, uint32x4_t s1, uint32x4_t s2, uint32x4_t s3, uint32x4_t s4,
+ uint32x4_t s5, uint32x4_t s6, uint32x4_t s7, int32x4_t sr4, int32x4_t sr5,
+ int32x4_t sr6, int32x4_t sr7, uint32x4_t const_n_val, uint32x4_t s_vec,
+ uint32x4_t const_val, uint32x4_t one_by_n_minus_1_vec,
+ uint16x4_t sgrproj_sgr, int32_t *src1, uint16_t *dst_A16, int32_t *src2,
+ const int buf_stride) {
+ uint32x4_t q0, q1, q2, q3;
+ uint32x4_t p0, p1, p2, p3;
+ uint16x4_t d0, d1, d2, d3;
+
+ s0 = vmulq_u32(s0, const_n_val);
+ s1 = vmulq_u32(s1, const_n_val);
+ s2 = vmulq_u32(s2, const_n_val);
+ s3 = vmulq_u32(s3, const_n_val);
+
+ q0 = vmulq_u32(s4, s4);
+ q1 = vmulq_u32(s5, s5);
+ q2 = vmulq_u32(s6, s6);
+ q3 = vmulq_u32(s7, s7);
+
+ p0 = vcleq_u32(q0, s0);
+ p1 = vcleq_u32(q1, s1);
+ p2 = vcleq_u32(q2, s2);
+ p3 = vcleq_u32(q3, s3);
+
+ q0 = vsubq_u32(s0, q0);
+ q1 = vsubq_u32(s1, q1);
+ q2 = vsubq_u32(s2, q2);
+ q3 = vsubq_u32(s3, q3);
+
+ p0 = vandq_u32(p0, q0);
+ p1 = vandq_u32(p1, q1);
+ p2 = vandq_u32(p2, q2);
+ p3 = vandq_u32(p3, q3);
+
+ p0 = vmulq_u32(p0, s_vec);
+ p1 = vmulq_u32(p1, s_vec);
+ p2 = vmulq_u32(p2, s_vec);
+ p3 = vmulq_u32(p3, s_vec);
+
+ p0 = vrshrq_n_u32(p0, SGRPROJ_MTABLE_BITS);
+ p1 = vrshrq_n_u32(p1, SGRPROJ_MTABLE_BITS);
+ p2 = vrshrq_n_u32(p2, SGRPROJ_MTABLE_BITS);
+ p3 = vrshrq_n_u32(p3, SGRPROJ_MTABLE_BITS);
+
+ p0 = vminq_u32(p0, const_val);
+ p1 = vminq_u32(p1, const_val);
+ p2 = vminq_u32(p2, const_val);
+ p3 = vminq_u32(p3, const_val);
+
+ {
+ store_u32_4x4((uint32_t *)src1, buf_stride, p0, p1, p2, p3);
+
+ for (int x = 0; x < 4; x++) {
+ for (int y = 0; y < 4; y++) {
+ dst_A16[x * buf_stride + y] = x_by_xplus1[src1[x * buf_stride + y]];
+ }
+ }
+ load_u16_4x4(dst_A16, buf_stride, &d0, &d1, &d2, &d3);
+ }
+ p0 = vsubl_u16(sgrproj_sgr, d0);
+ p1 = vsubl_u16(sgrproj_sgr, d1);
+ p2 = vsubl_u16(sgrproj_sgr, d2);
+ p3 = vsubl_u16(sgrproj_sgr, d3);
+
+ s4 = vmulq_u32(vreinterpretq_u32_s32(sr4), one_by_n_minus_1_vec);
+ s5 = vmulq_u32(vreinterpretq_u32_s32(sr5), one_by_n_minus_1_vec);
+ s6 = vmulq_u32(vreinterpretq_u32_s32(sr6), one_by_n_minus_1_vec);
+ s7 = vmulq_u32(vreinterpretq_u32_s32(sr7), one_by_n_minus_1_vec);
+
+ s4 = vmulq_u32(s4, p0);
+ s5 = vmulq_u32(s5, p1);
+ s6 = vmulq_u32(s6, p2);
+ s7 = vmulq_u32(s7, p3);
+
+ p0 = vrshrq_n_u32(s4, SGRPROJ_RECIP_BITS);
+ p1 = vrshrq_n_u32(s5, SGRPROJ_RECIP_BITS);
+ p2 = vrshrq_n_u32(s6, SGRPROJ_RECIP_BITS);
+ p3 = vrshrq_n_u32(s7, SGRPROJ_RECIP_BITS);
+
+ store_s32_4x4(src2, buf_stride, vreinterpretq_s32_u32(p0),
+ vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2),
+ vreinterpretq_s32_u32(p3));
+}
+static INLINE void calc_ab_internal_common(
+ uint32x4_t s0, uint32x4_t s1, uint32x4_t s2, uint32x4_t s3, uint32x4_t s4,
+ uint32x4_t s5, uint32x4_t s6, uint32x4_t s7, uint16x8_t s16_0,
+ uint16x8_t s16_1, uint16x8_t s16_2, uint16x8_t s16_3, uint16x8_t s16_4,
+ uint16x8_t s16_5, uint16x8_t s16_6, uint16x8_t s16_7,
+ uint32x4_t const_n_val, uint32x4_t s_vec, uint32x4_t const_val,
+ uint16x4_t one_by_n_minus_1_vec, uint16x8_t sgrproj_sgr, int32_t *src1,
+ uint16_t *dst_A16, int32_t *dst2, const int buf_stride) {
+ uint16x4_t d0, d1, d2, d3, d4, d5, d6, d7;
+ uint32x4_t q0, q1, q2, q3, q4, q5, q6, q7;
+ uint32x4_t p0, p1, p2, p3, p4, p5, p6, p7;
+
+ s0 = vmulq_u32(s0, const_n_val);
+ s1 = vmulq_u32(s1, const_n_val);
+ s2 = vmulq_u32(s2, const_n_val);
+ s3 = vmulq_u32(s3, const_n_val);
+ s4 = vmulq_u32(s4, const_n_val);
+ s5 = vmulq_u32(s5, const_n_val);
+ s6 = vmulq_u32(s6, const_n_val);
+ s7 = vmulq_u32(s7, const_n_val);
+
+ d0 = vget_low_u16(s16_4);
+ d1 = vget_low_u16(s16_5);
+ d2 = vget_low_u16(s16_6);
+ d3 = vget_low_u16(s16_7);
+ d4 = vget_high_u16(s16_4);
+ d5 = vget_high_u16(s16_5);
+ d6 = vget_high_u16(s16_6);
+ d7 = vget_high_u16(s16_7);
+
+ q0 = vmull_u16(d0, d0);
+ q1 = vmull_u16(d1, d1);
+ q2 = vmull_u16(d2, d2);
+ q3 = vmull_u16(d3, d3);
+ q4 = vmull_u16(d4, d4);
+ q5 = vmull_u16(d5, d5);
+ q6 = vmull_u16(d6, d6);
+ q7 = vmull_u16(d7, d7);
+
+ p0 = vcleq_u32(q0, s0);
+ p1 = vcleq_u32(q1, s1);
+ p2 = vcleq_u32(q2, s2);
+ p3 = vcleq_u32(q3, s3);
+ p4 = vcleq_u32(q4, s4);
+ p5 = vcleq_u32(q5, s5);
+ p6 = vcleq_u32(q6, s6);
+ p7 = vcleq_u32(q7, s7);
+
+ q0 = vsubq_u32(s0, q0);
+ q1 = vsubq_u32(s1, q1);
+ q2 = vsubq_u32(s2, q2);
+ q3 = vsubq_u32(s3, q3);
+ q4 = vsubq_u32(s4, q4);
+ q5 = vsubq_u32(s5, q5);
+ q6 = vsubq_u32(s6, q6);
+ q7 = vsubq_u32(s7, q7);
+
+ p0 = vandq_u32(p0, q0);
+ p1 = vandq_u32(p1, q1);
+ p2 = vandq_u32(p2, q2);
+ p3 = vandq_u32(p3, q3);
+ p4 = vandq_u32(p4, q4);
+ p5 = vandq_u32(p5, q5);
+ p6 = vandq_u32(p6, q6);
+ p7 = vandq_u32(p7, q7);
+
+ p0 = vmulq_u32(p0, s_vec);
+ p1 = vmulq_u32(p1, s_vec);
+ p2 = vmulq_u32(p2, s_vec);
+ p3 = vmulq_u32(p3, s_vec);
+ p4 = vmulq_u32(p4, s_vec);
+ p5 = vmulq_u32(p5, s_vec);
+ p6 = vmulq_u32(p6, s_vec);
+ p7 = vmulq_u32(p7, s_vec);
+
+ p0 = vrshrq_n_u32(p0, SGRPROJ_MTABLE_BITS);
+ p1 = vrshrq_n_u32(p1, SGRPROJ_MTABLE_BITS);
+ p2 = vrshrq_n_u32(p2, SGRPROJ_MTABLE_BITS);
+ p3 = vrshrq_n_u32(p3, SGRPROJ_MTABLE_BITS);
+ p4 = vrshrq_n_u32(p4, SGRPROJ_MTABLE_BITS);
+ p5 = vrshrq_n_u32(p5, SGRPROJ_MTABLE_BITS);
+ p6 = vrshrq_n_u32(p6, SGRPROJ_MTABLE_BITS);
+ p7 = vrshrq_n_u32(p7, SGRPROJ_MTABLE_BITS);
+
+ p0 = vminq_u32(p0, const_val);
+ p1 = vminq_u32(p1, const_val);
+ p2 = vminq_u32(p2, const_val);
+ p3 = vminq_u32(p3, const_val);
+ p4 = vminq_u32(p4, const_val);
+ p5 = vminq_u32(p5, const_val);
+ p6 = vminq_u32(p6, const_val);
+ p7 = vminq_u32(p7, const_val);
+
+ {
+ store_u32_4x4((uint32_t *)src1, buf_stride, p0, p1, p2, p3);
+ store_u32_4x4((uint32_t *)src1 + 4, buf_stride, p4, p5, p6, p7);
+
+ for (int x = 0; x < 4; x++) {
+ for (int y = 0; y < 8; y++) {
+ dst_A16[x * buf_stride + y] = x_by_xplus1[src1[x * buf_stride + y]];
+ }
+ }
+ load_u16_8x4(dst_A16, buf_stride, &s16_4, &s16_5, &s16_6, &s16_7);
+ }
+
+ s16_4 = vsubq_u16(sgrproj_sgr, s16_4);
+ s16_5 = vsubq_u16(sgrproj_sgr, s16_5);
+ s16_6 = vsubq_u16(sgrproj_sgr, s16_6);
+ s16_7 = vsubq_u16(sgrproj_sgr, s16_7);
+
+ s0 = vmull_u16(vget_low_u16(s16_0), one_by_n_minus_1_vec);
+ s1 = vmull_u16(vget_low_u16(s16_1), one_by_n_minus_1_vec);
+ s2 = vmull_u16(vget_low_u16(s16_2), one_by_n_minus_1_vec);
+ s3 = vmull_u16(vget_low_u16(s16_3), one_by_n_minus_1_vec);
+ s4 = vmull_u16(vget_high_u16(s16_0), one_by_n_minus_1_vec);
+ s5 = vmull_u16(vget_high_u16(s16_1), one_by_n_minus_1_vec);
+ s6 = vmull_u16(vget_high_u16(s16_2), one_by_n_minus_1_vec);
+ s7 = vmull_u16(vget_high_u16(s16_3), one_by_n_minus_1_vec);
+
+ s0 = vmulq_u32(s0, vmovl_u16(vget_low_u16(s16_4)));
+ s1 = vmulq_u32(s1, vmovl_u16(vget_low_u16(s16_5)));
+ s2 = vmulq_u32(s2, vmovl_u16(vget_low_u16(s16_6)));
+ s3 = vmulq_u32(s3, vmovl_u16(vget_low_u16(s16_7)));
+ s4 = vmulq_u32(s4, vmovl_u16(vget_high_u16(s16_4)));
+ s5 = vmulq_u32(s5, vmovl_u16(vget_high_u16(s16_5)));
+ s6 = vmulq_u32(s6, vmovl_u16(vget_high_u16(s16_6)));
+ s7 = vmulq_u32(s7, vmovl_u16(vget_high_u16(s16_7)));
+
+ p0 = vrshrq_n_u32(s0, SGRPROJ_RECIP_BITS);
+ p1 = vrshrq_n_u32(s1, SGRPROJ_RECIP_BITS);
+ p2 = vrshrq_n_u32(s2, SGRPROJ_RECIP_BITS);
+ p3 = vrshrq_n_u32(s3, SGRPROJ_RECIP_BITS);
+ p4 = vrshrq_n_u32(s4, SGRPROJ_RECIP_BITS);
+ p5 = vrshrq_n_u32(s5, SGRPROJ_RECIP_BITS);
+ p6 = vrshrq_n_u32(s6, SGRPROJ_RECIP_BITS);
+ p7 = vrshrq_n_u32(s7, SGRPROJ_RECIP_BITS);
+
+ store_s32_4x4(dst2, buf_stride, vreinterpretq_s32_u32(p0),
+ vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2),
+ vreinterpretq_s32_u32(p3));
+ store_s32_4x4(dst2 + 4, buf_stride, vreinterpretq_s32_u32(p4),
+ vreinterpretq_s32_u32(p5), vreinterpretq_s32_u32(p6),
+ vreinterpretq_s32_u32(p7));
+}
+
+static INLINE void boxsum2_square_sum_calc(
+ int16x4_t t1, int16x4_t t2, int16x4_t t3, int16x4_t t4, int16x4_t t5,
+ int16x4_t t6, int16x4_t t7, int16x4_t t8, int16x4_t t9, int16x4_t t10,
+ int16x4_t t11, int32x4_t *r0, int32x4_t *r1, int32x4_t *r2, int32x4_t *r3) {
+ int32x4_t d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11;
+ int32x4_t r12, r34, r67, r89, r1011;
+ int32x4_t r345, r6789, r789;
+
+ d1 = vmull_s16(t1, t1);
+ d2 = vmull_s16(t2, t2);
+ d3 = vmull_s16(t3, t3);
+ d4 = vmull_s16(t4, t4);
+ d5 = vmull_s16(t5, t5);
+ d6 = vmull_s16(t6, t6);
+ d7 = vmull_s16(t7, t7);
+ d8 = vmull_s16(t8, t8);
+ d9 = vmull_s16(t9, t9);
+ d10 = vmull_s16(t10, t10);
+ d11 = vmull_s16(t11, t11);
+
+ r12 = vaddq_s32(d1, d2);
+ r34 = vaddq_s32(d3, d4);
+ r67 = vaddq_s32(d6, d7);
+ r89 = vaddq_s32(d8, d9);
+ r1011 = vaddq_s32(d10, d11);
+ r345 = vaddq_s32(r34, d5);
+ r6789 = vaddq_s32(r67, r89);
+ r789 = vsubq_s32(r6789, d6);
+ *r0 = vaddq_s32(r12, r345);
+ *r1 = vaddq_s32(r67, r345);
+ *r2 = vaddq_s32(d5, r6789);
+ *r3 = vaddq_s32(r789, r1011);
+}
+
+static INLINE void boxsum2(int16_t *src, const int src_stride, int16_t *dst16,
+ int32_t *dst32, int32_t *dst2, const int dst_stride,
+ const int width, const int height) {
+ assert(width > 2 * SGRPROJ_BORDER_HORZ);
+ assert(height > 2 * SGRPROJ_BORDER_VERT);
+
+ int16_t *dst1_16_ptr, *src_ptr;
+ int32_t *dst2_ptr;
+ int h, w, count = 0;
+ const int dst_stride_2 = (dst_stride << 1);
+ const int dst_stride_8 = (dst_stride << 3);
+
+ dst1_16_ptr = dst16;
+ dst2_ptr = dst2;
+ src_ptr = src;
+ w = width;
+ {
+ int16x8_t t1, t2, t3, t4, t5, t6, t7;
+ int16x8_t t8, t9, t10, t11, t12;
+
+ int16x8_t q12345, q56789, q34567, q7891011;
+ int16x8_t q12, q34, q67, q89, q1011;
+ int16x8_t q345, q6789, q789;
+
+ int32x4_t r12345, r56789, r34567, r7891011;
+
+ do {
+ h = height;
+ dst1_16_ptr = dst16 + (count << 3);
+ dst2_ptr = dst2 + (count << 3);
+ src_ptr = src + (count << 3);
+
+ dst1_16_ptr += dst_stride_2;
+ dst2_ptr += dst_stride_2;
+ do {
+ load_s16_8x4(src_ptr, src_stride, &t1, &t2, &t3, &t4);
+ src_ptr += 4 * src_stride;
+ load_s16_8x4(src_ptr, src_stride, &t5, &t6, &t7, &t8);
+ src_ptr += 4 * src_stride;
+ load_s16_8x4(src_ptr, src_stride, &t9, &t10, &t11, &t12);
+
+ q12 = vaddq_s16(t1, t2);
+ q34 = vaddq_s16(t3, t4);
+ q67 = vaddq_s16(t6, t7);
+ q89 = vaddq_s16(t8, t9);
+ q1011 = vaddq_s16(t10, t11);
+ q345 = vaddq_s16(q34, t5);
+ q6789 = vaddq_s16(q67, q89);
+ q789 = vaddq_s16(q89, t7);
+ q12345 = vaddq_s16(q12, q345);
+ q34567 = vaddq_s16(q67, q345);
+ q56789 = vaddq_s16(t5, q6789);
+ q7891011 = vaddq_s16(q789, q1011);
+
+ store_s16_8x4(dst1_16_ptr, dst_stride_2, q12345, q34567, q56789,
+ q7891011);
+ dst1_16_ptr += dst_stride_8;
+
+ boxsum2_square_sum_calc(
+ vget_low_s16(t1), vget_low_s16(t2), vget_low_s16(t3),
+ vget_low_s16(t4), vget_low_s16(t5), vget_low_s16(t6),
+ vget_low_s16(t7), vget_low_s16(t8), vget_low_s16(t9),
+ vget_low_s16(t10), vget_low_s16(t11), &r12345, &r34567, &r56789,
+ &r7891011);
+
+ store_s32_4x4(dst2_ptr, dst_stride_2, r12345, r34567, r56789, r7891011);
+
+ boxsum2_square_sum_calc(
+ vget_high_s16(t1), vget_high_s16(t2), vget_high_s16(t3),
+ vget_high_s16(t4), vget_high_s16(t5), vget_high_s16(t6),
+ vget_high_s16(t7), vget_high_s16(t8), vget_high_s16(t9),
+ vget_high_s16(t10), vget_high_s16(t11), &r12345, &r34567, &r56789,
+ &r7891011);
+
+ store_s32_4x4(dst2_ptr + 4, dst_stride_2, r12345, r34567, r56789,
+ r7891011);
+ dst2_ptr += (dst_stride_8);
+ h -= 8;
+ } while (h > 0);
+ w -= 8;
+ count++;
+ } while (w > 0);
+ }
+
+ {
+ int16x4_t s1, s2, s3, s4, s5, s6, s7, s8;
+ int32x4_t d1, d2, d3, d4, d5, d6, d7, d8;
+ int32x4_t q12345, q34567, q23456, q45678;
+ int32x4_t q23, q45, q67;
+ int32x4_t q2345, q4567;
+
+ int32x4_t r12345, r34567, r23456, r45678;
+ int32x4_t r23, r45, r67;
+ int32x4_t r2345, r4567;
+
+ int32_t *src2_ptr, *dst1_32_ptr;
+ int16_t *src1_ptr;
+ count = 0;
+ h = height;
+ do {
+ dst1_32_ptr = dst32 + count * dst_stride_8 + (dst_stride_2);
+ dst2_ptr = dst2 + count * dst_stride_8 + (dst_stride_2);
+ src1_ptr = dst16 + count * dst_stride_8 + (dst_stride_2);
+ src2_ptr = dst2 + count * dst_stride_8 + (dst_stride_2);
+ w = width;
+
+ dst1_32_ptr += 2;
+ dst2_ptr += 2;
+ load_s16_4x4(src1_ptr, dst_stride_2, &s1, &s2, &s3, &s4);
+ transpose_s16_4x4d(&s1, &s2, &s3, &s4);
+ load_s32_4x4(src2_ptr, dst_stride_2, &d1, &d2, &d3, &d4);
+ transpose_s32_4x4(&d1, &d2, &d3, &d4);
+ do {
+ src1_ptr += 4;
+ src2_ptr += 4;
+ load_s16_4x4(src1_ptr, dst_stride_2, &s5, &s6, &s7, &s8);
+ transpose_s16_4x4d(&s5, &s6, &s7, &s8);
+ load_s32_4x4(src2_ptr, dst_stride_2, &d5, &d6, &d7, &d8);
+ transpose_s32_4x4(&d5, &d6, &d7, &d8);
+ q23 = vaddl_s16(s2, s3);
+ q45 = vaddl_s16(s4, s5);
+ q67 = vaddl_s16(s6, s7);
+ q2345 = vaddq_s32(q23, q45);
+ q4567 = vaddq_s32(q45, q67);
+ q12345 = vaddq_s32(vmovl_s16(s1), q2345);
+ q23456 = vaddq_s32(q2345, vmovl_s16(s6));
+ q34567 = vaddq_s32(q4567, vmovl_s16(s3));
+ q45678 = vaddq_s32(q4567, vmovl_s16(s8));
+
+ transpose_s32_4x4(&q12345, &q23456, &q34567, &q45678);
+ store_s32_4x4(dst1_32_ptr, dst_stride_2, q12345, q23456, q34567,
+ q45678);
+ dst1_32_ptr += 4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+
+ r23 = vaddq_s32(d2, d3);
+ r45 = vaddq_s32(d4, d5);
+ r67 = vaddq_s32(d6, d7);
+ r2345 = vaddq_s32(r23, r45);
+ r4567 = vaddq_s32(r45, r67);
+ r12345 = vaddq_s32(d1, r2345);
+ r23456 = vaddq_s32(r2345, d6);
+ r34567 = vaddq_s32(r4567, d3);
+ r45678 = vaddq_s32(r4567, d8);
+
+ transpose_s32_4x4(&r12345, &r23456, &r34567, &r45678);
+ store_s32_4x4(dst2_ptr, dst_stride_2, r12345, r23456, r34567, r45678);
+ dst2_ptr += 4;
+ d1 = d5;
+ d2 = d6;
+ d3 = d7;
+ d4 = d8;
+ w -= 4;
+ } while (w > 0);
+ h -= 8;
+ count++;
+ } while (h > 0);
+ }
+}
+
+static INLINE void calc_ab_internal_lbd(int32_t *A, uint16_t *A16,
+ uint16_t *B16, int32_t *B,
+ const int buf_stride, const int width,
+ const int height, const int r,
+ const int s, const int ht_inc) {
+ int32_t *src1, *dst2, count = 0;
+ uint16_t *dst_A16, *src2;
+ const uint32_t n = (2 * r + 1) * (2 * r + 1);
+ const uint32x4_t const_n_val = vdupq_n_u32(n);
+ const uint16x8_t sgrproj_sgr = vdupq_n_u16(SGRPROJ_SGR);
+ const uint16x4_t one_by_n_minus_1_vec = vdup_n_u16(one_by_x[n - 1]);
+ const uint32x4_t const_val = vdupq_n_u32(255);
+
+ uint16x8_t s16_0, s16_1, s16_2, s16_3, s16_4, s16_5, s16_6, s16_7;
+
+ uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+ const uint32x4_t s_vec = vdupq_n_u32(s);
+ int w, h = height;
+
+ do {
+ dst_A16 = A16 + (count << 2) * buf_stride;
+ src1 = A + (count << 2) * buf_stride;
+ src2 = B16 + (count << 2) * buf_stride;
+ dst2 = B + (count << 2) * buf_stride;
+ w = width;
+ do {
+ load_u32_4x4((uint32_t *)src1, buf_stride, &s0, &s1, &s2, &s3);
+ load_u32_4x4((uint32_t *)src1 + 4, buf_stride, &s4, &s5, &s6, &s7);
+ load_u16_8x4(src2, buf_stride, &s16_0, &s16_1, &s16_2, &s16_3);
+
+ s16_4 = s16_0;
+ s16_5 = s16_1;
+ s16_6 = s16_2;
+ s16_7 = s16_3;
+
+ calc_ab_internal_common(
+ s0, s1, s2, s3, s4, s5, s6, s7, s16_0, s16_1, s16_2, s16_3, s16_4,
+ s16_5, s16_6, s16_7, const_n_val, s_vec, const_val,
+ one_by_n_minus_1_vec, sgrproj_sgr, src1, dst_A16, dst2, buf_stride);
+
+ w -= 8;
+ dst2 += 8;
+ src1 += 8;
+ src2 += 8;
+ dst_A16 += 8;
+ } while (w > 0);
+ count++;
+ h -= (ht_inc * 4);
+ } while (h > 0);
+}
+
+static INLINE void calc_ab_internal_hbd(int32_t *A, uint16_t *A16,
+ uint16_t *B16, int32_t *B,
+ const int buf_stride, const int width,
+ const int height, const int bit_depth,
+ const int r, const int s,
+ const int ht_inc) {
+ int32_t *src1, *dst2, count = 0;
+ uint16_t *dst_A16, *src2;
+ const uint32_t n = (2 * r + 1) * (2 * r + 1);
+ const int16x8_t bd_min_2_vec = vdupq_n_s16(-(bit_depth - 8));
+ const int32x4_t bd_min_1_vec = vdupq_n_s32(-((bit_depth - 8) << 1));
+ const uint32x4_t const_n_val = vdupq_n_u32(n);
+ const uint16x8_t sgrproj_sgr = vdupq_n_u16(SGRPROJ_SGR);
+ const uint16x4_t one_by_n_minus_1_vec = vdup_n_u16(one_by_x[n - 1]);
+ const uint32x4_t const_val = vdupq_n_u32(255);
+
+ int32x4_t sr0, sr1, sr2, sr3, sr4, sr5, sr6, sr7;
+ uint16x8_t s16_0, s16_1, s16_2, s16_3;
+ uint16x8_t s16_4, s16_5, s16_6, s16_7;
+ uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+ const uint32x4_t s_vec = vdupq_n_u32(s);
+ int w, h = height;
+
+ do {
+ src1 = A + (count << 2) * buf_stride;
+ src2 = B16 + (count << 2) * buf_stride;
+ dst2 = B + (count << 2) * buf_stride;
+ dst_A16 = A16 + (count << 2) * buf_stride;
+ w = width;
+ do {
+ load_s32_4x4(src1, buf_stride, &sr0, &sr1, &sr2, &sr3);
+ load_s32_4x4(src1 + 4, buf_stride, &sr4, &sr5, &sr6, &sr7);
+ load_u16_8x4(src2, buf_stride, &s16_0, &s16_1, &s16_2, &s16_3);
+
+ s0 = vrshlq_u32(vreinterpretq_u32_s32(sr0), bd_min_1_vec);
+ s1 = vrshlq_u32(vreinterpretq_u32_s32(sr1), bd_min_1_vec);
+ s2 = vrshlq_u32(vreinterpretq_u32_s32(sr2), bd_min_1_vec);
+ s3 = vrshlq_u32(vreinterpretq_u32_s32(sr3), bd_min_1_vec);
+ s4 = vrshlq_u32(vreinterpretq_u32_s32(sr4), bd_min_1_vec);
+ s5 = vrshlq_u32(vreinterpretq_u32_s32(sr5), bd_min_1_vec);
+ s6 = vrshlq_u32(vreinterpretq_u32_s32(sr6), bd_min_1_vec);
+ s7 = vrshlq_u32(vreinterpretq_u32_s32(sr7), bd_min_1_vec);
+
+ s16_4 = vrshlq_u16(s16_0, bd_min_2_vec);
+ s16_5 = vrshlq_u16(s16_1, bd_min_2_vec);
+ s16_6 = vrshlq_u16(s16_2, bd_min_2_vec);
+ s16_7 = vrshlq_u16(s16_3, bd_min_2_vec);
+
+ calc_ab_internal_common(
+ s0, s1, s2, s3, s4, s5, s6, s7, s16_0, s16_1, s16_2, s16_3, s16_4,
+ s16_5, s16_6, s16_7, const_n_val, s_vec, const_val,
+ one_by_n_minus_1_vec, sgrproj_sgr, src1, dst_A16, dst2, buf_stride);
+
+ w -= 8;
+ dst2 += 8;
+ src1 += 8;
+ src2 += 8;
+ dst_A16 += 8;
+ } while (w > 0);
+ count++;
+ h -= (ht_inc * 4);
+ } while (h > 0);
+}
+
+static INLINE void calc_ab_fast_internal_lbd(int32_t *A, uint16_t *A16,
+ int32_t *B, const int buf_stride,
+ const int width, const int height,
+ const int r, const int s,
+ const int ht_inc) {
+ int32_t *src1, *src2, count = 0;
+ uint16_t *dst_A16;
+ const uint32_t n = (2 * r + 1) * (2 * r + 1);
+ const uint32x4_t const_n_val = vdupq_n_u32(n);
+ const uint16x4_t sgrproj_sgr = vdup_n_u16(SGRPROJ_SGR);
+ const uint32x4_t one_by_n_minus_1_vec = vdupq_n_u32(one_by_x[n - 1]);
+ const uint32x4_t const_val = vdupq_n_u32(255);
+
+ int32x4_t sr0, sr1, sr2, sr3, sr4, sr5, sr6, sr7;
+ uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+ const uint32x4_t s_vec = vdupq_n_u32(s);
+ int w, h = height;
+
+ do {
+ src1 = A + (count << 2) * buf_stride;
+ src2 = B + (count << 2) * buf_stride;
+ dst_A16 = A16 + (count << 2) * buf_stride;
+ w = width;
+ do {
+ load_s32_4x4(src1, buf_stride, &sr0, &sr1, &sr2, &sr3);
+ load_s32_4x4(src2, buf_stride, &sr4, &sr5, &sr6, &sr7);
+
+ s0 = vreinterpretq_u32_s32(sr0);
+ s1 = vreinterpretq_u32_s32(sr1);
+ s2 = vreinterpretq_u32_s32(sr2);
+ s3 = vreinterpretq_u32_s32(sr3);
+ s4 = vreinterpretq_u32_s32(sr4);
+ s5 = vreinterpretq_u32_s32(sr5);
+ s6 = vreinterpretq_u32_s32(sr6);
+ s7 = vreinterpretq_u32_s32(sr7);
+
+ calc_ab_fast_internal_common(s0, s1, s2, s3, s4, s5, s6, s7, sr4, sr5,
+ sr6, sr7, const_n_val, s_vec, const_val,
+ one_by_n_minus_1_vec, sgrproj_sgr, src1,
+ dst_A16, src2, buf_stride);
+
+ w -= 4;
+ src1 += 4;
+ src2 += 4;
+ dst_A16 += 4;
+ } while (w > 0);
+ count++;
+ h -= (ht_inc * 4);
+ } while (h > 0);
+}
+
+static INLINE void calc_ab_fast_internal_hbd(int32_t *A, uint16_t *A16,
+ int32_t *B, const int buf_stride,
+ const int width, const int height,
+ const int bit_depth, const int r,
+ const int s, const int ht_inc) {
+ int32_t *src1, *src2, count = 0;
+ uint16_t *dst_A16;
+ const uint32_t n = (2 * r + 1) * (2 * r + 1);
+ const int32x4_t bd_min_2_vec = vdupq_n_s32(-(bit_depth - 8));
+ const int32x4_t bd_min_1_vec = vdupq_n_s32(-((bit_depth - 8) << 1));
+ const uint32x4_t const_n_val = vdupq_n_u32(n);
+ const uint16x4_t sgrproj_sgr = vdup_n_u16(SGRPROJ_SGR);
+ const uint32x4_t one_by_n_minus_1_vec = vdupq_n_u32(one_by_x[n - 1]);
+ const uint32x4_t const_val = vdupq_n_u32(255);
+
+ int32x4_t sr0, sr1, sr2, sr3, sr4, sr5, sr6, sr7;
+ uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+ const uint32x4_t s_vec = vdupq_n_u32(s);
+ int w, h = height;
+
+ do {
+ src1 = A + (count << 2) * buf_stride;
+ src2 = B + (count << 2) * buf_stride;
+ dst_A16 = A16 + (count << 2) * buf_stride;
+ w = width;
+ do {
+ load_s32_4x4(src1, buf_stride, &sr0, &sr1, &sr2, &sr3);
+ load_s32_4x4(src2, buf_stride, &sr4, &sr5, &sr6, &sr7);
+
+ s0 = vrshlq_u32(vreinterpretq_u32_s32(sr0), bd_min_1_vec);
+ s1 = vrshlq_u32(vreinterpretq_u32_s32(sr1), bd_min_1_vec);
+ s2 = vrshlq_u32(vreinterpretq_u32_s32(sr2), bd_min_1_vec);
+ s3 = vrshlq_u32(vreinterpretq_u32_s32(sr3), bd_min_1_vec);
+ s4 = vrshlq_u32(vreinterpretq_u32_s32(sr4), bd_min_2_vec);
+ s5 = vrshlq_u32(vreinterpretq_u32_s32(sr5), bd_min_2_vec);
+ s6 = vrshlq_u32(vreinterpretq_u32_s32(sr6), bd_min_2_vec);
+ s7 = vrshlq_u32(vreinterpretq_u32_s32(sr7), bd_min_2_vec);
+
+ calc_ab_fast_internal_common(s0, s1, s2, s3, s4, s5, s6, s7, sr4, sr5,
+ sr6, sr7, const_n_val, s_vec, const_val,
+ one_by_n_minus_1_vec, sgrproj_sgr, src1,
+ dst_A16, src2, buf_stride);
+
+ w -= 4;
+ src1 += 4;
+ src2 += 4;
+ dst_A16 += 4;
+ } while (w > 0);
+ count++;
+ h -= (ht_inc * 4);
+ } while (h > 0);
+}
+
+static INLINE void boxsum1(int16_t *src, const int src_stride, uint16_t *dst1,
+ int32_t *dst2, const int dst_stride, const int width,
+ const int height) {
+ assert(width > 2 * SGRPROJ_BORDER_HORZ);
+ assert(height > 2 * SGRPROJ_BORDER_VERT);
+
+ int16_t *src_ptr;
+ int32_t *dst2_ptr;
+ uint16_t *dst1_ptr;
+ int h, w, count = 0;
+
+ w = width;
+ {
+ int16x8_t s1, s2, s3, s4, s5, s6, s7, s8;
+ int16x8_t q23, q34, q56, q234, q345, q456, q567;
+ int32x4_t r23, r56, r345, r456, r567, r78, r678;
+ int32x4_t r4_low, r4_high, r34_low, r34_high, r234_low, r234_high;
+ int32x4_t r2, r3, r5, r6, r7, r8;
+ int16x8_t q678, q78;
+
+ do {
+ dst1_ptr = dst1 + (count << 3);
+ dst2_ptr = dst2 + (count << 3);
+ src_ptr = src + (count << 3);
+ h = height;
+
+ load_s16_8x4(src_ptr, src_stride, &s1, &s2, &s3, &s4);
+ src_ptr += 4 * src_stride;
+
+ q23 = vaddq_s16(s2, s3);
+ q234 = vaddq_s16(q23, s4);
+ q34 = vaddq_s16(s3, s4);
+ dst1_ptr += (dst_stride << 1);
+
+ r2 = vmull_s16(vget_low_s16(s2), vget_low_s16(s2));
+ r3 = vmull_s16(vget_low_s16(s3), vget_low_s16(s3));
+ r4_low = vmull_s16(vget_low_s16(s4), vget_low_s16(s4));
+ r23 = vaddq_s32(r2, r3);
+ r234_low = vaddq_s32(r23, r4_low);
+ r34_low = vaddq_s32(r3, r4_low);
+
+ r2 = vmull_s16(vget_high_s16(s2), vget_high_s16(s2));
+ r3 = vmull_s16(vget_high_s16(s3), vget_high_s16(s3));
+ r4_high = vmull_s16(vget_high_s16(s4), vget_high_s16(s4));
+ r23 = vaddq_s32(r2, r3);
+ r234_high = vaddq_s32(r23, r4_high);
+ r34_high = vaddq_s32(r3, r4_high);
+
+ dst2_ptr += (dst_stride << 1);
+
+ do {
+ load_s16_8x4(src_ptr, src_stride, &s5, &s6, &s7, &s8);
+ src_ptr += 4 * src_stride;
+
+ q345 = vaddq_s16(s5, q34);
+ q56 = vaddq_s16(s5, s6);
+ q456 = vaddq_s16(s4, q56);
+ q567 = vaddq_s16(s7, q56);
+ q78 = vaddq_s16(s7, s8);
+ q678 = vaddq_s16(s6, q78);
+
+ store_s16_8x4((int16_t *)dst1_ptr, dst_stride, q234, q345, q456, q567);
+ dst1_ptr += (dst_stride << 2);
+
+ s4 = s8;
+ q34 = q78;
+ q234 = q678;
+
+ r5 = vmull_s16(vget_low_s16(s5), vget_low_s16(s5));
+ r6 = vmull_s16(vget_low_s16(s6), vget_low_s16(s6));
+ r7 = vmull_s16(vget_low_s16(s7), vget_low_s16(s7));
+ r8 = vmull_s16(vget_low_s16(s8), vget_low_s16(s8));
+
+ r345 = vaddq_s32(r5, r34_low);
+ r56 = vaddq_s32(r5, r6);
+ r456 = vaddq_s32(r4_low, r56);
+ r567 = vaddq_s32(r7, r56);
+ r78 = vaddq_s32(r7, r8);
+ r678 = vaddq_s32(r6, r78);
+ store_s32_4x4(dst2_ptr, dst_stride, r234_low, r345, r456, r567);
+
+ r4_low = r8;
+ r34_low = r78;
+ r234_low = r678;
+
+ r5 = vmull_s16(vget_high_s16(s5), vget_high_s16(s5));
+ r6 = vmull_s16(vget_high_s16(s6), vget_high_s16(s6));
+ r7 = vmull_s16(vget_high_s16(s7), vget_high_s16(s7));
+ r8 = vmull_s16(vget_high_s16(s8), vget_high_s16(s8));
+
+ r345 = vaddq_s32(r5, r34_high);
+ r56 = vaddq_s32(r5, r6);
+ r456 = vaddq_s32(r4_high, r56);
+ r567 = vaddq_s32(r7, r56);
+ r78 = vaddq_s32(r7, r8);
+ r678 = vaddq_s32(r6, r78);
+ store_s32_4x4((dst2_ptr + 4), dst_stride, r234_high, r345, r456, r567);
+ dst2_ptr += (dst_stride << 2);
+
+ r4_high = r8;
+ r34_high = r78;
+ r234_high = r678;
+
+ h -= 4;
+ } while (h > 0);
+ w -= 8;
+ count++;
+ } while (w > 0);
+ }
+
+ {
+ int16x4_t d1, d2, d3, d4, d5, d6, d7, d8;
+ int16x4_t q23, q34, q56, q234, q345, q456, q567;
+ int32x4_t r23, r56, r234, r345, r456, r567, r34, r78, r678;
+ int32x4_t r1, r2, r3, r4, r5, r6, r7, r8;
+ int16x4_t q678, q78;
+
+ int32_t *src2_ptr;
+ uint16_t *src1_ptr;
+ count = 0;
+ h = height;
+ w = width;
+ do {
+ dst1_ptr = dst1 + (count << 2) * dst_stride;
+ dst2_ptr = dst2 + (count << 2) * dst_stride;
+ src1_ptr = dst1 + (count << 2) * dst_stride;
+ src2_ptr = dst2 + (count << 2) * dst_stride;
+ w = width;
+
+ load_s16_4x4((int16_t *)src1_ptr, dst_stride, &d1, &d2, &d3, &d4);
+ transpose_s16_4x4d(&d1, &d2, &d3, &d4);
+ load_s32_4x4(src2_ptr, dst_stride, &r1, &r2, &r3, &r4);
+ transpose_s32_4x4(&r1, &r2, &r3, &r4);
+ src1_ptr += 4;
+ src2_ptr += 4;
+
+ q23 = vadd_s16(d2, d3);
+ q234 = vadd_s16(q23, d4);
+ q34 = vadd_s16(d3, d4);
+ dst1_ptr += 2;
+ r23 = vaddq_s32(r2, r3);
+ r234 = vaddq_s32(r23, r4);
+ r34 = vaddq_s32(r3, r4);
+ dst2_ptr += 2;
+
+ do {
+ load_s16_4x4((int16_t *)src1_ptr, dst_stride, &d5, &d6, &d7, &d8);
+ transpose_s16_4x4d(&d5, &d6, &d7, &d8);
+ load_s32_4x4(src2_ptr, dst_stride, &r5, &r6, &r7, &r8);
+ transpose_s32_4x4(&r5, &r6, &r7, &r8);
+ src1_ptr += 4;
+ src2_ptr += 4;
+
+ q345 = vadd_s16(d5, q34);
+ q56 = vadd_s16(d5, d6);
+ q456 = vadd_s16(d4, q56);
+ q567 = vadd_s16(d7, q56);
+ q78 = vadd_s16(d7, d8);
+ q678 = vadd_s16(d6, q78);
+ transpose_s16_4x4d(&q234, &q345, &q456, &q567);
+ store_s16_4x4((int16_t *)dst1_ptr, dst_stride, q234, q345, q456, q567);
+ dst1_ptr += 4;
+
+ d4 = d8;
+ q34 = q78;
+ q234 = q678;
+
+ r345 = vaddq_s32(r5, r34);
+ r56 = vaddq_s32(r5, r6);
+ r456 = vaddq_s32(r4, r56);
+ r567 = vaddq_s32(r7, r56);
+ r78 = vaddq_s32(r7, r8);
+ r678 = vaddq_s32(r6, r78);
+ transpose_s32_4x4(&r234, &r345, &r456, &r567);
+ store_s32_4x4(dst2_ptr, dst_stride, r234, r345, r456, r567);
+ dst2_ptr += 4;
+
+ r4 = r8;
+ r34 = r78;
+ r234 = r678;
+ w -= 4;
+ } while (w > 0);
+ h -= 4;
+ count++;
+ } while (h > 0);
+ }
+}
+
+static INLINE int32x4_t cross_sum_inp_s32(int32_t *buf, int buf_stride) {
+ int32x4_t xtr, xt, xtl, xl, x, xr, xbr, xb, xbl;
+ int32x4_t fours, threes, res;
+
+ xtl = vld1q_s32(buf - buf_stride - 1);
+ xt = vld1q_s32(buf - buf_stride);
+ xtr = vld1q_s32(buf - buf_stride + 1);
+ xl = vld1q_s32(buf - 1);
+ x = vld1q_s32(buf);
+ xr = vld1q_s32(buf + 1);
+ xbl = vld1q_s32(buf + buf_stride - 1);
+ xb = vld1q_s32(buf + buf_stride);
+ xbr = vld1q_s32(buf + buf_stride + 1);
+
+ fours = vaddq_s32(xl, vaddq_s32(xt, vaddq_s32(xr, vaddq_s32(xb, x))));
+ threes = vaddq_s32(xtl, vaddq_s32(xtr, vaddq_s32(xbr, xbl)));
+ res = vsubq_s32(vshlq_n_s32(vaddq_s32(fours, threes), 2), threes);
+ return res;
+}
+
+static INLINE void cross_sum_inp_u16(uint16_t *buf, int buf_stride,
+ int32x4_t *a0, int32x4_t *a1) {
+ uint16x8_t xtr, xt, xtl, xl, x, xr, xbr, xb, xbl;
+ uint16x8_t r0, r1;
+
+ xtl = vld1q_u16(buf - buf_stride - 1);
+ xt = vld1q_u16(buf - buf_stride);
+ xtr = vld1q_u16(buf - buf_stride + 1);
+ xl = vld1q_u16(buf - 1);
+ x = vld1q_u16(buf);
+ xr = vld1q_u16(buf + 1);
+ xbl = vld1q_u16(buf + buf_stride - 1);
+ xb = vld1q_u16(buf + buf_stride);
+ xbr = vld1q_u16(buf + buf_stride + 1);
+
+ xb = vaddq_u16(xb, x);
+ xt = vaddq_u16(xt, xr);
+ xl = vaddq_u16(xl, xb);
+ xl = vaddq_u16(xl, xt);
+
+ r0 = vshlq_n_u16(xl, 2);
+
+ xbl = vaddq_u16(xbl, xbr);
+ xtl = vaddq_u16(xtl, xtr);
+ xtl = vaddq_u16(xtl, xbl);
+
+ r1 = vshlq_n_u16(xtl, 2);
+ r1 = vsubq_u16(r1, xtl);
+
+ *a0 = vreinterpretq_s32_u32(
+ vaddq_u32(vmovl_u16(vget_low_u16(r0)), vmovl_u16(vget_low_u16(r1))));
+ *a1 = vreinterpretq_s32_u32(
+ vaddq_u32(vmovl_u16(vget_high_u16(r0)), vmovl_u16(vget_high_u16(r1))));
+}
+
+static INLINE int32x4_t cross_sum_fast_even_row(int32_t *buf, int buf_stride) {
+ int32x4_t xtr, xt, xtl, xbr, xb, xbl;
+ int32x4_t fives, sixes, fives_plus_sixes;
+
+ xtl = vld1q_s32(buf - buf_stride - 1);
+ xt = vld1q_s32(buf - buf_stride);
+ xtr = vld1q_s32(buf - buf_stride + 1);
+ xbl = vld1q_s32(buf + buf_stride - 1);
+ xb = vld1q_s32(buf + buf_stride);
+ xbr = vld1q_s32(buf + buf_stride + 1);
+
+ fives = vaddq_s32(xtl, vaddq_s32(xtr, vaddq_s32(xbr, xbl)));
+ sixes = vaddq_s32(xt, xb);
+ fives_plus_sixes = vaddq_s32(fives, sixes);
+
+ return vaddq_s32(
+ vaddq_s32(vshlq_n_s32(fives_plus_sixes, 2), fives_plus_sixes), sixes);
+}
+
+static INLINE void cross_sum_fast_even_row_inp16(uint16_t *buf, int buf_stride,
+ int32x4_t *a0, int32x4_t *a1) {
+ uint16x8_t xtr, xt, xtl, xbr, xb, xbl, xb0;
+
+ xtl = vld1q_u16(buf - buf_stride - 1);
+ xt = vld1q_u16(buf - buf_stride);
+ xtr = vld1q_u16(buf - buf_stride + 1);
+ xbl = vld1q_u16(buf + buf_stride - 1);
+ xb = vld1q_u16(buf + buf_stride);
+ xbr = vld1q_u16(buf + buf_stride + 1);
+
+ xbr = vaddq_u16(xbr, xbl);
+ xtr = vaddq_u16(xtr, xtl);
+ xbr = vaddq_u16(xbr, xtr);
+ xtl = vshlq_n_u16(xbr, 2);
+ xbr = vaddq_u16(xtl, xbr);
+
+ xb = vaddq_u16(xb, xt);
+ xb0 = vshlq_n_u16(xb, 1);
+ xb = vshlq_n_u16(xb, 2);
+ xb = vaddq_u16(xb, xb0);
+
+ *a0 = vreinterpretq_s32_u32(
+ vaddq_u32(vmovl_u16(vget_low_u16(xbr)), vmovl_u16(vget_low_u16(xb))));
+ *a1 = vreinterpretq_s32_u32(
+ vaddq_u32(vmovl_u16(vget_high_u16(xbr)), vmovl_u16(vget_high_u16(xb))));
+}
+
+static INLINE int32x4_t cross_sum_fast_odd_row(int32_t *buf) {
+ int32x4_t xl, x, xr;
+ int32x4_t fives, sixes, fives_plus_sixes;
+
+ xl = vld1q_s32(buf - 1);
+ x = vld1q_s32(buf);
+ xr = vld1q_s32(buf + 1);
+ fives = vaddq_s32(xl, xr);
+ sixes = x;
+ fives_plus_sixes = vaddq_s32(fives, sixes);
+
+ return vaddq_s32(
+ vaddq_s32(vshlq_n_s32(fives_plus_sixes, 2), fives_plus_sixes), sixes);
+}
+
+static INLINE void cross_sum_fast_odd_row_inp16(uint16_t *buf, int32x4_t *a0,
+ int32x4_t *a1) {
+ uint16x8_t xl, x, xr;
+ uint16x8_t x0;
+
+ xl = vld1q_u16(buf - 1);
+ x = vld1q_u16(buf);
+ xr = vld1q_u16(buf + 1);
+ xl = vaddq_u16(xl, xr);
+ x0 = vshlq_n_u16(xl, 2);
+ xl = vaddq_u16(xl, x0);
+
+ x0 = vshlq_n_u16(x, 1);
+ x = vshlq_n_u16(x, 2);
+ x = vaddq_u16(x, x0);
+
+ *a0 = vreinterpretq_s32_u32(
+ vaddq_u32(vmovl_u16(vget_low_u16(xl)), vmovl_u16(vget_low_u16(x))));
+ *a1 = vreinterpretq_s32_u32(
+ vaddq_u32(vmovl_u16(vget_high_u16(xl)), vmovl_u16(vget_high_u16(x))));
+}
+
+static void final_filter_fast_internal(uint16_t *A, int32_t *B,
+ const int buf_stride, int16_t *src,
+ const int src_stride, int32_t *dst,
+ const int dst_stride, const int width,
+ const int height) {
+ int16x8_t s0;
+ int32_t *B_tmp, *dst_ptr;
+ uint16_t *A_tmp;
+ int16_t *src_ptr;
+ int32x4_t a_res0, a_res1, b_res0, b_res1;
+ int w, h, count = 0;
+ assert(SGRPROJ_SGR_BITS == 8);
+ assert(SGRPROJ_RST_BITS == 4);
+
+ A_tmp = A;
+ B_tmp = B;
+ src_ptr = src;
+ dst_ptr = dst;
+ h = height;
+ do {
+ A_tmp = (A + count * buf_stride);
+ B_tmp = (B + count * buf_stride);
+ src_ptr = (src + count * src_stride);
+ dst_ptr = (dst + count * dst_stride);
+ w = width;
+ if (!(count & 1)) {
+ do {
+ s0 = vld1q_s16(src_ptr);
+ cross_sum_fast_even_row_inp16(A_tmp, buf_stride, &a_res0, &a_res1);
+ a_res0 = vmulq_s32(vmovl_s16(vget_low_s16(s0)), a_res0);
+ a_res1 = vmulq_s32(vmovl_s16(vget_high_s16(s0)), a_res1);
+
+ b_res0 = cross_sum_fast_even_row(B_tmp, buf_stride);
+ b_res1 = cross_sum_fast_even_row(B_tmp + 4, buf_stride);
+ a_res0 = vaddq_s32(a_res0, b_res0);
+ a_res1 = vaddq_s32(a_res1, b_res1);
+
+ a_res0 =
+ vrshrq_n_s32(a_res0, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS);
+ a_res1 =
+ vrshrq_n_s32(a_res1, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS);
+
+ vst1q_s32(dst_ptr, a_res0);
+ vst1q_s32(dst_ptr + 4, a_res1);
+
+ A_tmp += 8;
+ B_tmp += 8;
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w > 0);
+ } else {
+ do {
+ s0 = vld1q_s16(src_ptr);
+ cross_sum_fast_odd_row_inp16(A_tmp, &a_res0, &a_res1);
+ a_res0 = vmulq_s32(vmovl_s16(vget_low_s16(s0)), a_res0);
+ a_res1 = vmulq_s32(vmovl_s16(vget_high_s16(s0)), a_res1);
+
+ b_res0 = cross_sum_fast_odd_row(B_tmp);
+ b_res1 = cross_sum_fast_odd_row(B_tmp + 4);
+ a_res0 = vaddq_s32(a_res0, b_res0);
+ a_res1 = vaddq_s32(a_res1, b_res1);
+
+ a_res0 =
+ vrshrq_n_s32(a_res0, SGRPROJ_SGR_BITS + NB_ODD - SGRPROJ_RST_BITS);
+ a_res1 =
+ vrshrq_n_s32(a_res1, SGRPROJ_SGR_BITS + NB_ODD - SGRPROJ_RST_BITS);
+
+ vst1q_s32(dst_ptr, a_res0);
+ vst1q_s32(dst_ptr + 4, a_res1);
+
+ A_tmp += 8;
+ B_tmp += 8;
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w > 0);
+ }
+ count++;
+ h -= 1;
+ } while (h > 0);
+}
+
+void final_filter_internal(uint16_t *A, int32_t *B, const int buf_stride,
+ int16_t *src, const int src_stride, int32_t *dst,
+ const int dst_stride, const int width,
+ const int height) {
+ int16x8_t s0;
+ int32_t *B_tmp, *dst_ptr;
+ uint16_t *A_tmp;
+ int16_t *src_ptr;
+ int32x4_t a_res0, a_res1, b_res0, b_res1;
+ int w, h, count = 0;
+
+ assert(SGRPROJ_SGR_BITS == 8);
+ assert(SGRPROJ_RST_BITS == 4);
+ h = height;
+
+ do {
+ A_tmp = (A + count * buf_stride);
+ B_tmp = (B + count * buf_stride);
+ src_ptr = (src + count * src_stride);
+ dst_ptr = (dst + count * dst_stride);
+ w = width;
+ do {
+ s0 = vld1q_s16(src_ptr);
+ cross_sum_inp_u16(A_tmp, buf_stride, &a_res0, &a_res1);
+ a_res0 = vmulq_s32(vmovl_s16(vget_low_s16(s0)), a_res0);
+ a_res1 = vmulq_s32(vmovl_s16(vget_high_s16(s0)), a_res1);
+
+ b_res0 = cross_sum_inp_s32(B_tmp, buf_stride);
+ b_res1 = cross_sum_inp_s32(B_tmp + 4, buf_stride);
+ a_res0 = vaddq_s32(a_res0, b_res0);
+ a_res1 = vaddq_s32(a_res1, b_res1);
+
+ a_res0 =
+ vrshrq_n_s32(a_res0, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS);
+ a_res1 =
+ vrshrq_n_s32(a_res1, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS);
+ vst1q_s32(dst_ptr, a_res0);
+ vst1q_s32(dst_ptr + 4, a_res1);
+
+ A_tmp += 8;
+ B_tmp += 8;
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w > 0);
+ count++;
+ h -= 1;
+ } while (h > 0);
+}
+
+static INLINE void restoration_fast_internal(uint16_t *dgd16, int width,
+ int height, int dgd_stride,
+ int32_t *dst, int dst_stride,
+ int bit_depth, int sgr_params_idx,
+ int radius_idx) {
+ const sgr_params_type *const params = &sgr_params[sgr_params_idx];
+ const int r = params->r[radius_idx];
+ const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
+ const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
+
+ const int buf_stride = ((width_ext + 3) & ~3) + 16;
+ int32_t A_[RESTORATION_PROC_UNIT_PELS];
+ uint16_t A16_[RESTORATION_PROC_UNIT_PELS];
+ int32_t B_[RESTORATION_PROC_UNIT_PELS];
+ int32_t *square_sum_buf = A_;
+ int32_t *sum_buf = B_;
+ uint16_t *tmp16_buf = A16_;
+
+ assert(r <= MAX_RADIUS && "Need MAX_RADIUS >= r");
+ assert(r <= SGRPROJ_BORDER_VERT - 1 && r <= SGRPROJ_BORDER_HORZ - 1 &&
+ "Need SGRPROJ_BORDER_* >= r+1");
+
+ assert(radius_idx == 0);
+ assert(r == 2);
+
+ // input(dgd16) is 16bit.
+ // sum of pixels 1st stage output will be in 16bit(tmp16_buf). End output is
+ // kept in 32bit [sum_buf]. sum of squares output is kept in 32bit
+ // buffer(square_sum_buf).
+ boxsum2((int16_t *)(dgd16 - dgd_stride * SGRPROJ_BORDER_VERT -
+ SGRPROJ_BORDER_HORZ),
+ dgd_stride, (int16_t *)tmp16_buf, sum_buf, square_sum_buf, buf_stride,
+ width_ext, height_ext);
+
+ square_sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
+ sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
+ tmp16_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
+
+ // Calculation of a, b. a output is in 16bit tmp_buf which is in range of
+ // [1, 256] for all bit depths. b output is kept in 32bit buffer.
+
+ if (8 == bit_depth) {
+ calc_ab_fast_internal_lbd(
+ (square_sum_buf - buf_stride - 1), (tmp16_buf - buf_stride - 1),
+ (sum_buf - buf_stride - 1), buf_stride * 2, width + 2, height + 2, r,
+ params->s[radius_idx], 2);
+ } else {
+ calc_ab_fast_internal_hbd(
+ (square_sum_buf - buf_stride - 1), (tmp16_buf - buf_stride - 1),
+ (sum_buf - buf_stride - 1), buf_stride * 2, width + 2, height + 2,
+ bit_depth, r, params->s[radius_idx], 2);
+ }
+ final_filter_fast_internal(tmp16_buf, sum_buf, buf_stride, (int16_t *)dgd16,
+ dgd_stride, dst, dst_stride, width, height);
+}
+
+static INLINE void restoration_internal(uint16_t *dgd16, int width, int height,
+ int dgd_stride, int32_t *dst,
+ int dst_stride, int bit_depth,
+ int sgr_params_idx, int radius_idx) {
+ const sgr_params_type *const params = &sgr_params[sgr_params_idx];
+ const int r = params->r[radius_idx];
+ const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
+ const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
+
+ int buf_stride = ((width_ext + 3) & ~3) + 16;
+ int32_t A_[RESTORATION_PROC_UNIT_PELS];
+ uint16_t A16_[RESTORATION_PROC_UNIT_PELS];
+ uint16_t B16_[RESTORATION_PROC_UNIT_PELS];
+ int32_t B_[RESTORATION_PROC_UNIT_PELS];
+ int32_t *square_sum_buf = A_;
+ uint16_t *sum_buf = B16_;
+ uint16_t *A16 = A16_;
+ int32_t *B = B_;
+
+ assert(r <= MAX_RADIUS && "Need MAX_RADIUS >= r");
+ assert(r <= SGRPROJ_BORDER_VERT - 1 && r <= SGRPROJ_BORDER_HORZ - 1 &&
+ "Need SGRPROJ_BORDER_* >= r+1");
+
+ assert(radius_idx == 1);
+ assert(r == 1);
+
+ // input(dgd16) is 16bit.
+ // sum of pixels output will be in 16bit(sum_buf).
+ // sum of squares output is kept in 32bit buffer(square_sum_buf).
+ boxsum1((int16_t *)(dgd16 - dgd_stride * SGRPROJ_BORDER_VERT -
+ SGRPROJ_BORDER_HORZ),
+ dgd_stride, sum_buf, square_sum_buf, buf_stride, width_ext,
+ height_ext);
+
+ square_sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
+ B += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
+ A16 += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
+ sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
+
+ // Calculation of a, b. a output is in 16bit tmp_buf which is in range of
+ // [1, 256] for all bit depths. b output is kept in 32bit buffer.
+ if (8 == bit_depth) {
+ calc_ab_internal_lbd((square_sum_buf - buf_stride - 1),
+ (A16 - buf_stride - 1), (sum_buf - buf_stride - 1),
+ (B - buf_stride - 1), buf_stride, width + 2,
+ height + 2, r, params->s[radius_idx], 1);
+ } else {
+ calc_ab_internal_hbd((square_sum_buf - buf_stride - 1),
+ (A16 - buf_stride - 1), (sum_buf - buf_stride - 1),
+ (B - buf_stride - 1), buf_stride, width + 2,
+ height + 2, bit_depth, r, params->s[radius_idx], 1);
+ }
+ final_filter_internal(A16, B, buf_stride, (int16_t *)dgd16, dgd_stride, dst,
+ dst_stride, width, height);
+}
+
+static INLINE void src_convert_u8_to_u16(const uint8_t *src,
+ const int src_stride, uint16_t *dst,
+ const int dst_stride, const int width,
+ const int height) {
+ const uint8_t *src_ptr;
+ uint16_t *dst_ptr;
+ int h, w, count = 0;
+
+ uint8x8_t t1, t2, t3, t4;
+ uint16x8_t s1, s2, s3, s4;
+ h = height;
+ do {
+ src_ptr = src + (count << 2) * src_stride;
+ dst_ptr = dst + (count << 2) * dst_stride;
+ w = width;
+ if (w >= 7) {
+ do {
+ load_u8_8x4(src_ptr, src_stride, &t1, &t2, &t3, &t4);
+ s1 = vmovl_u8(t1);
+ s2 = vmovl_u8(t2);
+ s3 = vmovl_u8(t3);
+ s4 = vmovl_u8(t4);
+ store_u16_8x4(dst_ptr, dst_stride, s1, s2, s3, s4);
+
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w > 7);
+ }
+
+ for (int y = 0; y < w; y++) {
+ dst_ptr[y] = src_ptr[y];
+ dst_ptr[y + 1 * dst_stride] = src_ptr[y + 1 * src_stride];
+ dst_ptr[y + 2 * dst_stride] = src_ptr[y + 2 * src_stride];
+ dst_ptr[y + 3 * dst_stride] = src_ptr[y + 3 * src_stride];
+ }
+ count++;
+ h -= 4;
+ } while (h > 3);
+
+ src_ptr = src + (count << 2) * src_stride;
+ dst_ptr = dst + (count << 2) * dst_stride;
+ for (int x = 0; x < h; x++) {
+ for (int y = 0; y < width; y++) {
+ dst_ptr[y + x * dst_stride] = src_ptr[y + x * src_stride];
+ }
+ }
+}
+
+static INLINE void src_convert_hbd_copy(const uint16_t *src, int src_stride,
+ uint16_t *dst, const int dst_stride,
+ int width, int height) {
+ const uint16_t *src_ptr;
+ uint16_t *dst_ptr;
+ int h, w, count = 0;
+ uint16x8_t s1, s2, s3, s4;
+
+ h = height;
+ do {
+ src_ptr = src + (count << 2) * src_stride;
+ dst_ptr = dst + (count << 2) * dst_stride;
+ w = width;
+ do {
+ load_u16_8x4(src_ptr, src_stride, &s1, &s2, &s3, &s4);
+ store_u16_8x4(dst_ptr, dst_stride, s1, s2, s3, s4);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w > 7);
+
+ for (int y = 0; y < w; y++) {
+ dst_ptr[y] = src_ptr[y];
+ dst_ptr[y + 1 * dst_stride] = src_ptr[y + 1 * src_stride];
+ dst_ptr[y + 2 * dst_stride] = src_ptr[y + 2 * src_stride];
+ dst_ptr[y + 3 * dst_stride] = src_ptr[y + 3 * src_stride];
+ }
+ count++;
+ h -= 4;
+ } while (h > 3);
+
+ src_ptr = src + (count << 2) * src_stride;
+ dst_ptr = dst + (count << 2) * dst_stride;
+
+ for (int x = 0; x < h; x++) {
+ memcpy((dst_ptr + x * dst_stride), (src_ptr + x * src_stride),
+ sizeof(uint16_t) * width);
+ }
+}
+
+int av1_selfguided_restoration_neon(const uint8_t *dat8, int width, int height,
+ int stride, int32_t *flt0, int32_t *flt1,
+ int flt_stride, int sgr_params_idx,
+ int bit_depth, int highbd) {
+ const sgr_params_type *const params = &sgr_params[sgr_params_idx];
+ assert(!(params->r[0] == 0 && params->r[1] == 0));
+
+ uint16_t dgd16_[RESTORATION_PROC_UNIT_PELS];
+ const int dgd16_stride = width + 2 * SGRPROJ_BORDER_HORZ;
+ uint16_t *dgd16 =
+ dgd16_ + dgd16_stride * SGRPROJ_BORDER_VERT + SGRPROJ_BORDER_HORZ;
+ const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
+ const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
+ const int dgd_stride = stride;
+
+ if (highbd) {
+ const uint16_t *dgd16_tmp = CONVERT_TO_SHORTPTR(dat8);
+ src_convert_hbd_copy(
+ dgd16_tmp - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ,
+ dgd_stride,
+ dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ,
+ dgd16_stride, width_ext, height_ext);
+ } else {
+ src_convert_u8_to_u16(
+ dat8 - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ,
+ dgd_stride,
+ dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ,
+ dgd16_stride, width_ext, height_ext);
+ }
+
+ if (params->r[0] > 0)
+ restoration_fast_internal(dgd16, width, height, dgd16_stride, flt0,
+ flt_stride, bit_depth, sgr_params_idx, 0);
+ if (params->r[1] > 0)
+ restoration_internal(dgd16, width, height, dgd16_stride, flt1, flt_stride,
+ bit_depth, sgr_params_idx, 1);
+ return 0;
+}
+
+void apply_selfguided_restoration_neon(const uint8_t *dat8, int width,
+ int height, int stride, int eps,
+ const int *xqd, uint8_t *dst8,
+ int dst_stride, int32_t *tmpbuf,
+ int bit_depth, int highbd) {
+ int32_t *flt0 = tmpbuf;
+ int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX;
+ assert(width * height <= RESTORATION_UNITPELS_MAX);
+ uint16_t dgd16_[RESTORATION_PROC_UNIT_PELS];
+ const int dgd16_stride = width + 2 * SGRPROJ_BORDER_HORZ;
+ uint16_t *dgd16 =
+ dgd16_ + dgd16_stride * SGRPROJ_BORDER_VERT + SGRPROJ_BORDER_HORZ;
+ const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
+ const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
+ const int dgd_stride = stride;
+ const sgr_params_type *const params = &sgr_params[eps];
+ int xq[2];
+
+ assert(!(params->r[0] == 0 && params->r[1] == 0));
+
+ if (highbd) {
+ const uint16_t *dgd16_tmp = CONVERT_TO_SHORTPTR(dat8);
+ src_convert_hbd_copy(
+ dgd16_tmp - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ,
+ dgd_stride,
+ dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ,
+ dgd16_stride, width_ext, height_ext);
+ } else {
+ src_convert_u8_to_u16(
+ dat8 - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ,
+ dgd_stride,
+ dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ,
+ dgd16_stride, width_ext, height_ext);
+ }
+
+ if (params->r[0] > 0)
+ restoration_fast_internal(dgd16, width, height, dgd16_stride, flt0, width,
+ bit_depth, eps, 0);
+ if (params->r[1] > 0)
+ restoration_internal(dgd16, width, height, dgd16_stride, flt1, width,
+ bit_depth, eps, 1);
+
+ decode_xq(xqd, xq, params);
+
+ {
+ int16_t *src_ptr;
+ uint8_t *dst_ptr;
+ uint16_t *dst16_ptr;
+ int16x4_t d0, d4;
+ int16x8_t r0, s0;
+ uint16x8_t r4;
+ int32x4_t u0, u4, v0, v4, f00, f10;
+ uint8x8_t t0;
+ int count = 0, w = width, h = height, rc = 0;
+
+ const int32x4_t xq0_vec = vdupq_n_s32(xq[0]);
+ const int32x4_t xq1_vec = vdupq_n_s32(xq[1]);
+ const int16x8_t zero = vdupq_n_s16(0);
+ const uint16x8_t max = vdupq_n_u16((1 << bit_depth) - 1);
+ uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst8);
+ dst_ptr = dst8;
+ src_ptr = (int16_t *)dgd16;
+ do {
+ w = width;
+ count = 0;
+ dst_ptr = dst8 + rc * dst_stride;
+ dst16_ptr = dst16 + rc * dst_stride;
+ do {
+ s0 = vld1q_s16(src_ptr + count);
+
+ u0 = vshll_n_s16(vget_low_s16(s0), SGRPROJ_RST_BITS);
+ u4 = vshll_n_s16(vget_high_s16(s0), SGRPROJ_RST_BITS);
+
+ v0 = vshlq_n_s32(u0, SGRPROJ_PRJ_BITS);
+ v4 = vshlq_n_s32(u4, SGRPROJ_PRJ_BITS);
+
+ if (params->r[0] > 0) {
+ f00 = vld1q_s32(flt0 + count);
+ f10 = vld1q_s32(flt0 + count + 4);
+
+ f00 = vsubq_s32(f00, u0);
+ f10 = vsubq_s32(f10, u4);
+
+ v0 = vmlaq_s32(v0, xq0_vec, f00);
+ v4 = vmlaq_s32(v4, xq0_vec, f10);
+ }
+
+ if (params->r[1] > 0) {
+ f00 = vld1q_s32(flt1 + count);
+ f10 = vld1q_s32(flt1 + count + 4);
+
+ f00 = vsubq_s32(f00, u0);
+ f10 = vsubq_s32(f10, u4);
+
+ v0 = vmlaq_s32(v0, xq1_vec, f00);
+ v4 = vmlaq_s32(v4, xq1_vec, f10);
+ }
+
+ d0 = vqrshrn_n_s32(v0, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
+ d4 = vqrshrn_n_s32(v4, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
+
+ r0 = vcombine_s16(d0, d4);
+
+ r4 = vreinterpretq_u16_s16(vmaxq_s16(r0, zero));
+
+ if (highbd) {
+ r4 = vminq_u16(r4, max);
+ vst1q_u16(dst16_ptr, r4);
+ } else {
+ t0 = vqmovn_u16(r4);
+ vst1_u8(dst_ptr, t0);
+ }
+ w -= 8;
+ count += 8;
+ dst_ptr += 8;
+ dst16_ptr += 8;
+ } while (w > 0);
+
+ src_ptr += dgd16_stride;
+ flt1 += width;
+ flt0 += width;
+ rc++;
+ h--;
+ } while (h > 0);
+ }
+}
diff --git a/third_party/aom/av1/common/arm/transpose_neon.h b/third_party/aom/av1/common/arm/transpose_neon.h
new file mode 100644
index 0000000000..8a3d9f07ff
--- /dev/null
+++ b/third_party/aom/av1/common/arm/transpose_neon.h
@@ -0,0 +1,537 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AOM_AV1_COMMON_ARM_TRANSPOSE_NEON_H_
+#define AOM_AV1_COMMON_ARM_TRANSPOSE_NEON_H_
+
+#include <arm_neon.h>
+
+static INLINE void transpose_u8_8x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2,
+ uint8x8_t *a3, uint8x8_t *a4, uint8x8_t *a5,
+ uint8x8_t *a6, uint8x8_t *a7) {
+ // Swap 8 bit elements. Goes from:
+ // a0: 00 01 02 03 04 05 06 07
+ // a1: 10 11 12 13 14 15 16 17
+ // a2: 20 21 22 23 24 25 26 27
+ // a3: 30 31 32 33 34 35 36 37
+ // a4: 40 41 42 43 44 45 46 47
+ // a5: 50 51 52 53 54 55 56 57
+ // a6: 60 61 62 63 64 65 66 67
+ // a7: 70 71 72 73 74 75 76 77
+ // to:
+ // b0.val[0]: 00 10 02 12 04 14 06 16 40 50 42 52 44 54 46 56
+ // b0.val[1]: 01 11 03 13 05 15 07 17 41 51 43 53 45 55 47 57
+ // b1.val[0]: 20 30 22 32 24 34 26 36 60 70 62 72 64 74 66 76
+ // b1.val[1]: 21 31 23 33 25 35 27 37 61 71 63 73 65 75 67 77
+
+ const uint8x16x2_t b0 =
+ vtrnq_u8(vcombine_u8(*a0, *a4), vcombine_u8(*a1, *a5));
+ const uint8x16x2_t b1 =
+ vtrnq_u8(vcombine_u8(*a2, *a6), vcombine_u8(*a3, *a7));
+
+ // Swap 16 bit elements resulting in:
+ // c0.val[0]: 00 10 20 30 04 14 24 34 40 50 60 70 44 54 64 74
+ // c0.val[1]: 02 12 22 32 06 16 26 36 42 52 62 72 46 56 66 76
+ // c1.val[0]: 01 11 21 31 05 15 25 35 41 51 61 71 45 55 65 75
+ // c1.val[1]: 03 13 23 33 07 17 27 37 43 53 63 73 47 57 67 77
+
+ const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]),
+ vreinterpretq_u16_u8(b1.val[0]));
+ const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]),
+ vreinterpretq_u16_u8(b1.val[1]));
+
+ // Unzip 32 bit elements resulting in:
+ // d0.val[0]: 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
+ // d0.val[1]: 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75
+ // d1.val[0]: 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
+ // d1.val[1]: 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77
+ const uint32x4x2_t d0 = vuzpq_u32(vreinterpretq_u32_u16(c0.val[0]),
+ vreinterpretq_u32_u16(c1.val[0]));
+ const uint32x4x2_t d1 = vuzpq_u32(vreinterpretq_u32_u16(c0.val[1]),
+ vreinterpretq_u32_u16(c1.val[1]));
+
+ *a0 = vreinterpret_u8_u32(vget_low_u32(d0.val[0]));
+ *a1 = vreinterpret_u8_u32(vget_high_u32(d0.val[0]));
+ *a2 = vreinterpret_u8_u32(vget_low_u32(d1.val[0]));
+ *a3 = vreinterpret_u8_u32(vget_high_u32(d1.val[0]));
+ *a4 = vreinterpret_u8_u32(vget_low_u32(d0.val[1]));
+ *a5 = vreinterpret_u8_u32(vget_high_u32(d0.val[1]));
+ *a6 = vreinterpret_u8_u32(vget_low_u32(d1.val[1]));
+ *a7 = vreinterpret_u8_u32(vget_high_u32(d1.val[1]));
+}
+
+static INLINE void transpose_u8_8x4(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2,
+ uint8x8_t *a3) {
+ // Swap 8 bit elements. Goes from:
+ // a0: 00 01 02 03 04 05 06 07
+ // a1: 10 11 12 13 14 15 16 17
+ // a2: 20 21 22 23 24 25 26 27
+ // a3: 30 31 32 33 34 35 36 37
+ // to:
+ // b0.val[0]: 00 10 02 12 04 14 06 16
+ // b0.val[1]: 01 11 03 13 05 15 07 17
+ // b1.val[0]: 20 30 22 32 24 34 26 36
+ // b1.val[1]: 21 31 23 33 25 35 27 37
+
+ const uint8x8x2_t b0 = vtrn_u8(*a0, *a1);
+ const uint8x8x2_t b1 = vtrn_u8(*a2, *a3);
+
+ // Swap 16 bit elements resulting in:
+ // c0.val[0]: 00 10 20 30 04 14 24 34
+ // c0.val[1]: 02 12 22 32 06 16 26 36
+ // c1.val[0]: 01 11 21 31 05 15 25 35
+ // c1.val[1]: 03 13 23 33 07 17 27 37
+
+ const uint16x4x2_t c0 =
+ vtrn_u16(vreinterpret_u16_u8(b0.val[0]), vreinterpret_u16_u8(b1.val[0]));
+ const uint16x4x2_t c1 =
+ vtrn_u16(vreinterpret_u16_u8(b0.val[1]), vreinterpret_u16_u8(b1.val[1]));
+
+ *a0 = vreinterpret_u8_u16(c0.val[0]);
+ *a1 = vreinterpret_u8_u16(c1.val[0]);
+ *a2 = vreinterpret_u8_u16(c0.val[1]);
+ *a3 = vreinterpret_u8_u16(c1.val[1]);
+}
+
+static INLINE void transpose_u8_4x4(uint8x8_t *a0, uint8x8_t *a1) {
+ // Swap 16 bit elements. Goes from:
+ // a0: 00 01 02 03 10 11 12 13
+ // a1: 20 21 22 23 30 31 32 33
+ // to:
+ // b0.val[0]: 00 01 20 21 10 11 30 31
+ // b0.val[1]: 02 03 22 23 12 13 32 33
+
+ const uint16x4x2_t b0 =
+ vtrn_u16(vreinterpret_u16_u8(*a0), vreinterpret_u16_u8(*a1));
+
+ // Swap 32 bit elements resulting in:
+ // c0.val[0]: 00 01 20 21 02 03 22 23
+ // c0.val[1]: 10 11 30 31 12 13 32 33
+
+ const uint32x2x2_t c0 = vtrn_u32(vreinterpret_u32_u16(b0.val[0]),
+ vreinterpret_u32_u16(b0.val[1]));
+
+ // Swap 8 bit elements resulting in:
+ // d0.val[0]: 00 10 20 30 02 12 22 32
+ // d0.val[1]: 01 11 21 31 03 13 23 33
+
+ const uint8x8x2_t d0 =
+ vtrn_u8(vreinterpret_u8_u32(c0.val[0]), vreinterpret_u8_u32(c0.val[1]));
+
+ *a0 = d0.val[0];
+ *a1 = d0.val[1];
+}
+
+static INLINE void transpose_u8_4x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2,
+ uint8x8_t *a3, const uint8x8_t a4,
+ const uint8x8_t a5, const uint8x8_t a6,
+ const uint8x8_t a7) {
+ // Swap 32 bit elements. Goes from:
+ // a0: 00 01 02 03 XX XX XX XX
+ // a1: 10 11 12 13 XX XX XX XX
+ // a2: 20 21 22 23 XX XX XX XX
+ // a3; 30 31 32 33 XX XX XX XX
+ // a4: 40 41 42 43 XX XX XX XX
+ // a5: 50 51 52 53 XX XX XX XX
+ // a6: 60 61 62 63 XX XX XX XX
+ // a7: 70 71 72 73 XX XX XX XX
+ // to:
+ // b0.val[0]: 00 01 02 03 40 41 42 43
+ // b1.val[0]: 10 11 12 13 50 51 52 53
+ // b2.val[0]: 20 21 22 23 60 61 62 63
+ // b3.val[0]: 30 31 32 33 70 71 72 73
+
+ const uint32x2x2_t b0 =
+ vtrn_u32(vreinterpret_u32_u8(*a0), vreinterpret_u32_u8(a4));
+ const uint32x2x2_t b1 =
+ vtrn_u32(vreinterpret_u32_u8(*a1), vreinterpret_u32_u8(a5));
+ const uint32x2x2_t b2 =
+ vtrn_u32(vreinterpret_u32_u8(*a2), vreinterpret_u32_u8(a6));
+ const uint32x2x2_t b3 =
+ vtrn_u32(vreinterpret_u32_u8(*a3), vreinterpret_u32_u8(a7));
+
+ // Swap 16 bit elements resulting in:
+ // c0.val[0]: 00 01 20 21 40 41 60 61
+ // c0.val[1]: 02 03 22 23 42 43 62 63
+ // c1.val[0]: 10 11 30 31 50 51 70 71
+ // c1.val[1]: 12 13 32 33 52 53 72 73
+
+ const uint16x4x2_t c0 = vtrn_u16(vreinterpret_u16_u32(b0.val[0]),
+ vreinterpret_u16_u32(b2.val[0]));
+ const uint16x4x2_t c1 = vtrn_u16(vreinterpret_u16_u32(b1.val[0]),
+ vreinterpret_u16_u32(b3.val[0]));
+
+ // Swap 8 bit elements resulting in:
+ // d0.val[0]: 00 10 20 30 40 50 60 70
+ // d0.val[1]: 01 11 21 31 41 51 61 71
+ // d1.val[0]: 02 12 22 32 42 52 62 72
+ // d1.val[1]: 03 13 23 33 43 53 63 73
+
+ const uint8x8x2_t d0 =
+ vtrn_u8(vreinterpret_u8_u16(c0.val[0]), vreinterpret_u8_u16(c1.val[0]));
+ const uint8x8x2_t d1 =
+ vtrn_u8(vreinterpret_u8_u16(c0.val[1]), vreinterpret_u8_u16(c1.val[1]));
+
+ *a0 = d0.val[0];
+ *a1 = d0.val[1];
+ *a2 = d1.val[0];
+ *a3 = d1.val[1];
+}
+
+static INLINE void transpose_u16_4x8(uint16x4_t *a0, uint16x4_t *a1,
+ uint16x4_t *a2, uint16x4_t *a3,
+ uint16x4_t *a4, uint16x4_t *a5,
+ uint16x4_t *a6, uint16x4_t *a7,
+ uint16x8_t *o0, uint16x8_t *o1,
+ uint16x8_t *o2, uint16x8_t *o3) {
+ // Swap 16 bit elements. Goes from:
+ // a0: 00 01 02 03
+ // a1: 10 11 12 13
+ // a2: 20 21 22 23
+ // a3: 30 31 32 33
+ // a4: 40 41 42 43
+ // a5: 50 51 52 53
+ // a6: 60 61 62 63
+ // a7: 70 71 72 73
+ // to:
+ // b0.val[0]: 00 10 02 12
+ // b0.val[1]: 01 11 03 13
+ // b1.val[0]: 20 30 22 32
+ // b1.val[1]: 21 31 23 33
+ // b2.val[0]: 40 50 42 52
+ // b2.val[1]: 41 51 43 53
+ // b3.val[0]: 60 70 62 72
+ // b3.val[1]: 61 71 63 73
+
+ uint16x4x2_t b0 = vtrn_u16(*a0, *a1);
+ uint16x4x2_t b1 = vtrn_u16(*a2, *a3);
+ uint16x4x2_t b2 = vtrn_u16(*a4, *a5);
+ uint16x4x2_t b3 = vtrn_u16(*a6, *a7);
+
+ // Swap 32 bit elements resulting in:
+ // c0.val[0]: 00 10 20 30
+ // c0.val[1]: 02 12 22 32
+ // c1.val[0]: 01 11 21 31
+ // c1.val[1]: 03 13 23 33
+ // c2.val[0]: 40 50 60 70
+ // c2.val[1]: 42 52 62 72
+ // c3.val[0]: 41 51 61 71
+ // c3.val[1]: 43 53 63 73
+
+ uint32x2x2_t c0 = vtrn_u32(vreinterpret_u32_u16(b0.val[0]),
+ vreinterpret_u32_u16(b1.val[0]));
+ uint32x2x2_t c1 = vtrn_u32(vreinterpret_u32_u16(b0.val[1]),
+ vreinterpret_u32_u16(b1.val[1]));
+ uint32x2x2_t c2 = vtrn_u32(vreinterpret_u32_u16(b2.val[0]),
+ vreinterpret_u32_u16(b3.val[0]));
+ uint32x2x2_t c3 = vtrn_u32(vreinterpret_u32_u16(b2.val[1]),
+ vreinterpret_u32_u16(b3.val[1]));
+
+ // Swap 64 bit elements resulting in:
+ // o0: 00 10 20 30 40 50 60 70
+ // o1: 01 11 21 31 41 51 61 71
+ // o2: 02 12 22 32 42 52 62 72
+ // o3: 03 13 23 33 43 53 63 73
+
+ *o0 = vcombine_u16(vreinterpret_u16_u32(c0.val[0]),
+ vreinterpret_u16_u32(c2.val[0]));
+ *o1 = vcombine_u16(vreinterpret_u16_u32(c1.val[0]),
+ vreinterpret_u16_u32(c3.val[0]));
+ *o2 = vcombine_u16(vreinterpret_u16_u32(c0.val[1]),
+ vreinterpret_u16_u32(c2.val[1]));
+ *o3 = vcombine_u16(vreinterpret_u16_u32(c1.val[1]),
+ vreinterpret_u16_u32(c3.val[1]));
+}
+
+static INLINE void transpose_u16_8x8(uint16x8_t *a0, uint16x8_t *a1,
+ uint16x8_t *a2, uint16x8_t *a3,
+ uint16x8_t *a4, uint16x8_t *a5,
+ uint16x8_t *a6, uint16x8_t *a7) {
+ // Swap 16 bit elements. Goes from:
+ // a0: 00 01 02 03 04 05 06 07
+ // a1: 10 11 12 13 14 15 16 17
+ // a2: 20 21 22 23 24 25 26 27
+ // a3: 30 31 32 33 34 35 36 37
+ // a4: 40 41 42 43 44 45 46 47
+ // a5: 50 51 52 53 54 55 56 57
+ // a6: 60 61 62 63 64 65 66 67
+ // a7: 70 71 72 73 74 75 76 77
+ // to:
+ // b0.val[0]: 00 10 02 12 04 14 06 16
+ // b0.val[1]: 01 11 03 13 05 15 07 17
+ // b1.val[0]: 20 30 22 32 24 34 26 36
+ // b1.val[1]: 21 31 23 33 25 35 27 37
+ // b2.val[0]: 40 50 42 52 44 54 46 56
+ // b2.val[1]: 41 51 43 53 45 55 47 57
+ // b3.val[0]: 60 70 62 72 64 74 66 76
+ // b3.val[1]: 61 71 63 73 65 75 67 77
+
+ const uint16x8x2_t b0 = vtrnq_u16(*a0, *a1);
+ const uint16x8x2_t b1 = vtrnq_u16(*a2, *a3);
+ const uint16x8x2_t b2 = vtrnq_u16(*a4, *a5);
+ const uint16x8x2_t b3 = vtrnq_u16(*a6, *a7);
+
+ // Swap 32 bit elements resulting in:
+ // c0.val[0]: 00 10 20 30 04 14 24 34
+ // c0.val[1]: 02 12 22 32 06 16 26 36
+ // c1.val[0]: 01 11 21 31 05 15 25 35
+ // c1.val[1]: 03 13 23 33 07 17 27 37
+ // c2.val[0]: 40 50 60 70 44 54 64 74
+ // c2.val[1]: 42 52 62 72 46 56 66 76
+ // c3.val[0]: 41 51 61 71 45 55 65 75
+ // c3.val[1]: 43 53 63 73 47 57 67 77
+
+ const uint32x4x2_t c0 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[0]),
+ vreinterpretq_u32_u16(b1.val[0]));
+ const uint32x4x2_t c1 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[1]),
+ vreinterpretq_u32_u16(b1.val[1]));
+ const uint32x4x2_t c2 = vtrnq_u32(vreinterpretq_u32_u16(b2.val[0]),
+ vreinterpretq_u32_u16(b3.val[0]));
+ const uint32x4x2_t c3 = vtrnq_u32(vreinterpretq_u32_u16(b2.val[1]),
+ vreinterpretq_u32_u16(b3.val[1]));
+
+ *a0 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c0.val[0])),
+ vget_low_u16(vreinterpretq_u16_u32(c2.val[0])));
+ *a4 = vcombine_u16(vget_high_u16(vreinterpretq_u16_u32(c0.val[0])),
+ vget_high_u16(vreinterpretq_u16_u32(c2.val[0])));
+
+ *a2 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c0.val[1])),
+ vget_low_u16(vreinterpretq_u16_u32(c2.val[1])));
+ *a6 = vcombine_u16(vget_high_u16(vreinterpretq_u16_u32(c0.val[1])),
+ vget_high_u16(vreinterpretq_u16_u32(c2.val[1])));
+
+ *a1 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c1.val[0])),
+ vget_low_u16(vreinterpretq_u16_u32(c3.val[0])));
+ *a5 = vcombine_u16(vget_high_u16(vreinterpretq_u16_u32(c1.val[0])),
+ vget_high_u16(vreinterpretq_u16_u32(c3.val[0])));
+
+ *a3 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c1.val[1])),
+ vget_low_u16(vreinterpretq_u16_u32(c3.val[1])));
+ *a7 = vcombine_u16(vget_high_u16(vreinterpretq_u16_u32(c1.val[1])),
+ vget_high_u16(vreinterpretq_u16_u32(c3.val[1])));
+}
+
+static INLINE void transpose_s16_8x8(int16x8_t *a0, int16x8_t *a1,
+ int16x8_t *a2, int16x8_t *a3,
+ int16x8_t *a4, int16x8_t *a5,
+ int16x8_t *a6, int16x8_t *a7) {
+ // Swap 16 bit elements. Goes from:
+ // a0: 00 01 02 03 04 05 06 07
+ // a1: 10 11 12 13 14 15 16 17
+ // a2: 20 21 22 23 24 25 26 27
+ // a3: 30 31 32 33 34 35 36 37
+ // a4: 40 41 42 43 44 45 46 47
+ // a5: 50 51 52 53 54 55 56 57
+ // a6: 60 61 62 63 64 65 66 67
+ // a7: 70 71 72 73 74 75 76 77
+ // to:
+ // b0.val[0]: 00 10 02 12 04 14 06 16
+ // b0.val[1]: 01 11 03 13 05 15 07 17
+ // b1.val[0]: 20 30 22 32 24 34 26 36
+ // b1.val[1]: 21 31 23 33 25 35 27 37
+ // b2.val[0]: 40 50 42 52 44 54 46 56
+ // b2.val[1]: 41 51 43 53 45 55 47 57
+ // b3.val[0]: 60 70 62 72 64 74 66 76
+ // b3.val[1]: 61 71 63 73 65 75 67 77
+
+ const int16x8x2_t b0 = vtrnq_s16(*a0, *a1);
+ const int16x8x2_t b1 = vtrnq_s16(*a2, *a3);
+ const int16x8x2_t b2 = vtrnq_s16(*a4, *a5);
+ const int16x8x2_t b3 = vtrnq_s16(*a6, *a7);
+
+ // Swap 32 bit elements resulting in:
+ // c0.val[0]: 00 10 20 30 04 14 24 34
+ // c0.val[1]: 02 12 22 32 06 16 26 36
+ // c1.val[0]: 01 11 21 31 05 15 25 35
+ // c1.val[1]: 03 13 23 33 07 17 27 37
+ // c2.val[0]: 40 50 60 70 44 54 64 74
+ // c2.val[1]: 42 52 62 72 46 56 66 76
+ // c3.val[0]: 41 51 61 71 45 55 65 75
+ // c3.val[1]: 43 53 63 73 47 57 67 77
+
+ const int32x4x2_t c0 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[0]),
+ vreinterpretq_s32_s16(b1.val[0]));
+ const int32x4x2_t c1 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[1]),
+ vreinterpretq_s32_s16(b1.val[1]));
+ const int32x4x2_t c2 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[0]),
+ vreinterpretq_s32_s16(b3.val[0]));
+ const int32x4x2_t c3 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[1]),
+ vreinterpretq_s32_s16(b3.val[1]));
+
+ *a0 = vcombine_s16(vget_low_s16(vreinterpretq_s16_s32(c0.val[0])),
+ vget_low_s16(vreinterpretq_s16_s32(c2.val[0])));
+ *a4 = vcombine_s16(vget_high_s16(vreinterpretq_s16_s32(c0.val[0])),
+ vget_high_s16(vreinterpretq_s16_s32(c2.val[0])));
+
+ *a2 = vcombine_s16(vget_low_s16(vreinterpretq_s16_s32(c0.val[1])),
+ vget_low_s16(vreinterpretq_s16_s32(c2.val[1])));
+ *a6 = vcombine_s16(vget_high_s16(vreinterpretq_s16_s32(c0.val[1])),
+ vget_high_s16(vreinterpretq_s16_s32(c2.val[1])));
+
+ *a1 = vcombine_s16(vget_low_s16(vreinterpretq_s16_s32(c1.val[0])),
+ vget_low_s16(vreinterpretq_s16_s32(c3.val[0])));
+ *a5 = vcombine_s16(vget_high_s16(vreinterpretq_s16_s32(c1.val[0])),
+ vget_high_s16(vreinterpretq_s16_s32(c3.val[0])));
+
+ *a3 = vcombine_s16(vget_low_s16(vreinterpretq_s16_s32(c1.val[1])),
+ vget_low_s16(vreinterpretq_s16_s32(c3.val[1])));
+ *a7 = vcombine_s16(vget_high_s16(vreinterpretq_s16_s32(c1.val[1])),
+ vget_high_s16(vreinterpretq_s16_s32(c3.val[1])));
+}
+
+static INLINE int16x8x2_t vpx_vtrnq_s64_to_s16(int32x4_t a0, int32x4_t a1) {
+ int16x8x2_t b0;
+ b0.val[0] = vcombine_s16(vreinterpret_s16_s32(vget_low_s32(a0)),
+ vreinterpret_s16_s32(vget_low_s32(a1)));
+ b0.val[1] = vcombine_s16(vreinterpret_s16_s32(vget_high_s32(a0)),
+ vreinterpret_s16_s32(vget_high_s32(a1)));
+ return b0;
+}
+
+static INLINE void transpose_s16_8x8q(int16x8_t *a0, int16x8_t *out) {
+ // Swap 16 bit elements. Goes from:
+ // a0: 00 01 02 03 04 05 06 07
+ // a1: 10 11 12 13 14 15 16 17
+ // a2: 20 21 22 23 24 25 26 27
+ // a3: 30 31 32 33 34 35 36 37
+ // a4: 40 41 42 43 44 45 46 47
+ // a5: 50 51 52 53 54 55 56 57
+ // a6: 60 61 62 63 64 65 66 67
+ // a7: 70 71 72 73 74 75 76 77
+ // to:
+ // b0.val[0]: 00 10 02 12 04 14 06 16
+ // b0.val[1]: 01 11 03 13 05 15 07 17
+ // b1.val[0]: 20 30 22 32 24 34 26 36
+ // b1.val[1]: 21 31 23 33 25 35 27 37
+ // b2.val[0]: 40 50 42 52 44 54 46 56
+ // b2.val[1]: 41 51 43 53 45 55 47 57
+ // b3.val[0]: 60 70 62 72 64 74 66 76
+ // b3.val[1]: 61 71 63 73 65 75 67 77
+
+ const int16x8x2_t b0 = vtrnq_s16(*a0, *(a0 + 1));
+ const int16x8x2_t b1 = vtrnq_s16(*(a0 + 2), *(a0 + 3));
+ const int16x8x2_t b2 = vtrnq_s16(*(a0 + 4), *(a0 + 5));
+ const int16x8x2_t b3 = vtrnq_s16(*(a0 + 6), *(a0 + 7));
+
+ // Swap 32 bit elements resulting in:
+ // c0.val[0]: 00 10 20 30 04 14 24 34
+ // c0.val[1]: 02 12 22 32 06 16 26 36
+ // c1.val[0]: 01 11 21 31 05 15 25 35
+ // c1.val[1]: 03 13 23 33 07 17 27 37
+ // c2.val[0]: 40 50 60 70 44 54 64 74
+ // c2.val[1]: 42 52 62 72 46 56 66 76
+ // c3.val[0]: 41 51 61 71 45 55 65 75
+ // c3.val[1]: 43 53 63 73 47 57 67 77
+
+ const int32x4x2_t c0 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[0]),
+ vreinterpretq_s32_s16(b1.val[0]));
+ const int32x4x2_t c1 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[1]),
+ vreinterpretq_s32_s16(b1.val[1]));
+ const int32x4x2_t c2 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[0]),
+ vreinterpretq_s32_s16(b3.val[0]));
+ const int32x4x2_t c3 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[1]),
+ vreinterpretq_s32_s16(b3.val[1]));
+
+ // Swap 64 bit elements resulting in:
+ // d0.val[0]: 00 10 20 30 40 50 60 70
+ // d0.val[1]: 04 14 24 34 44 54 64 74
+ // d1.val[0]: 01 11 21 31 41 51 61 71
+ // d1.val[1]: 05 15 25 35 45 55 65 75
+ // d2.val[0]: 02 12 22 32 42 52 62 72
+ // d2.val[1]: 06 16 26 36 46 56 66 76
+ // d3.val[0]: 03 13 23 33 43 53 63 73
+ // d3.val[1]: 07 17 27 37 47 57 67 77
+ const int16x8x2_t d0 = vpx_vtrnq_s64_to_s16(c0.val[0], c2.val[0]);
+ const int16x8x2_t d1 = vpx_vtrnq_s64_to_s16(c1.val[0], c3.val[0]);
+ const int16x8x2_t d2 = vpx_vtrnq_s64_to_s16(c0.val[1], c2.val[1]);
+ const int16x8x2_t d3 = vpx_vtrnq_s64_to_s16(c1.val[1], c3.val[1]);
+
+ *out = d0.val[0];
+ *(out + 1) = d1.val[0];
+ *(out + 2) = d2.val[0];
+ *(out + 3) = d3.val[0];
+ *(out + 4) = d0.val[1];
+ *(out + 5) = d1.val[1];
+ *(out + 6) = d2.val[1];
+ *(out + 7) = d3.val[1];
+}
+
+static INLINE void transpose_s16_4x4d(int16x4_t *a0, int16x4_t *a1,
+ int16x4_t *a2, int16x4_t *a3) {
+ // Swap 16 bit elements. Goes from:
+ // a0: 00 01 02 03
+ // a1: 10 11 12 13
+ // a2: 20 21 22 23
+ // a3: 30 31 32 33
+ // to:
+ // b0.val[0]: 00 10 02 12
+ // b0.val[1]: 01 11 03 13
+ // b1.val[0]: 20 30 22 32
+ // b1.val[1]: 21 31 23 33
+
+ const int16x4x2_t b0 = vtrn_s16(*a0, *a1);
+ const int16x4x2_t b1 = vtrn_s16(*a2, *a3);
+
+ // Swap 32 bit elements resulting in:
+ // c0.val[0]: 00 10 20 30
+ // c0.val[1]: 02 12 22 32
+ // c1.val[0]: 01 11 21 31
+ // c1.val[1]: 03 13 23 33
+
+ const int32x2x2_t c0 = vtrn_s32(vreinterpret_s32_s16(b0.val[0]),
+ vreinterpret_s32_s16(b1.val[0]));
+ const int32x2x2_t c1 = vtrn_s32(vreinterpret_s32_s16(b0.val[1]),
+ vreinterpret_s32_s16(b1.val[1]));
+
+ *a0 = vreinterpret_s16_s32(c0.val[0]);
+ *a1 = vreinterpret_s16_s32(c1.val[0]);
+ *a2 = vreinterpret_s16_s32(c0.val[1]);
+ *a3 = vreinterpret_s16_s32(c1.val[1]);
+}
+
+static INLINE int32x4x2_t aom_vtrnq_s64_to_s32(int32x4_t a0, int32x4_t a1) {
+ int32x4x2_t b0;
+ b0.val[0] = vcombine_s32(vget_low_s32(a0), vget_low_s32(a1));
+ b0.val[1] = vcombine_s32(vget_high_s32(a0), vget_high_s32(a1));
+ return b0;
+}
+
+static INLINE void transpose_s32_4x4(int32x4_t *a0, int32x4_t *a1,
+ int32x4_t *a2, int32x4_t *a3) {
+ // Swap 32 bit elements. Goes from:
+ // a0: 00 01 02 03
+ // a1: 10 11 12 13
+ // a2: 20 21 22 23
+ // a3: 30 31 32 33
+ // to:
+ // b0.val[0]: 00 10 02 12
+ // b0.val[1]: 01 11 03 13
+ // b1.val[0]: 20 30 22 32
+ // b1.val[1]: 21 31 23 33
+
+ const int32x4x2_t b0 = vtrnq_s32(*a0, *a1);
+ const int32x4x2_t b1 = vtrnq_s32(*a2, *a3);
+
+ // Swap 64 bit elements resulting in:
+ // c0.val[0]: 00 10 20 30
+ // c0.val[1]: 02 12 22 32
+ // c1.val[0]: 01 11 21 31
+ // c1.val[1]: 03 13 23 33
+
+ const int32x4x2_t c0 = aom_vtrnq_s64_to_s32(b0.val[0], b1.val[0]);
+ const int32x4x2_t c1 = aom_vtrnq_s64_to_s32(b0.val[1], b1.val[1]);
+
+ *a0 = c0.val[0];
+ *a1 = c1.val[0];
+ *a2 = c0.val[1];
+ *a3 = c1.val[1];
+}
+
+#endif // AOM_AV1_COMMON_ARM_TRANSPOSE_NEON_H_
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.c b/third_party/aom/av1/common/arm/warp_plane_neon.c
new file mode 100644
index 0000000000..7f02d42a73
--- /dev/null
+++ b/third_party/aom/av1/common/arm/warp_plane_neon.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+#include <memory.h>
+#include <math.h>
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_ports/mem.h"
+#include "config/av1_rtcd.h"
+#include "av1/common/warped_motion.h"
+#include "av1/common/scale.h"
+
+/* This is a modified version of 'warped_filter' from warped_motion.c:
+ * Each coefficient is stored in 8 bits instead of 16 bits
+ * The coefficients are rearranged in the column order 0, 2, 4, 6, 1, 3, 5, 7
+
+ This is done in order to avoid overflow: Since the tap with the largest
+ coefficient could be any of taps 2, 3, 4 or 5, we can't use the summation
+ order ((0 + 1) + (4 + 5)) + ((2 + 3) + (6 + 7)) used in the regular
+ convolve functions.
+
+ Instead, we use the summation order
+ ((0 + 2) + (4 + 6)) + ((1 + 3) + (5 + 7)).
+ The rearrangement of coefficients in this table is so that we can get the
+ coefficients into the correct order more quickly.
+*/
+/* clang-format off */
+DECLARE_ALIGNED(8, static const int8_t,
+ filter_8bit_neon[WARPEDPIXEL_PREC_SHIFTS * 3 + 1][8]) = {
+#if WARPEDPIXEL_PREC_BITS == 6
+ // [-1, 0)
+ { 0, 127, 0, 0, 0, 1, 0, 0}, { 0, 127, 0, 0, -1, 2, 0, 0},
+ { 1, 127, -1, 0, -3, 4, 0, 0}, { 1, 126, -2, 0, -4, 6, 1, 0},
+ { 1, 126, -3, 0, -5, 8, 1, 0}, { 1, 125, -4, 0, -6, 11, 1, 0},
+ { 1, 124, -4, 0, -7, 13, 1, 0}, { 2, 123, -5, 0, -8, 15, 1, 0},
+ { 2, 122, -6, 0, -9, 18, 1, 0}, { 2, 121, -6, 0, -10, 20, 1, 0},
+ { 2, 120, -7, 0, -11, 22, 2, 0}, { 2, 119, -8, 0, -12, 25, 2, 0},
+ { 3, 117, -8, 0, -13, 27, 2, 0}, { 3, 116, -9, 0, -13, 29, 2, 0},
+ { 3, 114, -10, 0, -14, 32, 3, 0}, { 3, 113, -10, 0, -15, 35, 2, 0},
+ { 3, 111, -11, 0, -15, 37, 3, 0}, { 3, 109, -11, 0, -16, 40, 3, 0},
+ { 3, 108, -12, 0, -16, 42, 3, 0}, { 4, 106, -13, 0, -17, 45, 3, 0},
+ { 4, 104, -13, 0, -17, 47, 3, 0}, { 4, 102, -14, 0, -17, 50, 3, 0},
+ { 4, 100, -14, 0, -17, 52, 3, 0}, { 4, 98, -15, 0, -18, 55, 4, 0},
+ { 4, 96, -15, 0, -18, 58, 3, 0}, { 4, 94, -16, 0, -18, 60, 4, 0},
+ { 4, 91, -16, 0, -18, 63, 4, 0}, { 4, 89, -16, 0, -18, 65, 4, 0},
+ { 4, 87, -17, 0, -18, 68, 4, 0}, { 4, 85, -17, 0, -18, 70, 4, 0},
+ { 4, 82, -17, 0, -18, 73, 4, 0}, { 4, 80, -17, 0, -18, 75, 4, 0},
+ { 4, 78, -18, 0, -18, 78, 4, 0}, { 4, 75, -18, 0, -17, 80, 4, 0},
+ { 4, 73, -18, 0, -17, 82, 4, 0}, { 4, 70, -18, 0, -17, 85, 4, 0},
+ { 4, 68, -18, 0, -17, 87, 4, 0}, { 4, 65, -18, 0, -16, 89, 4, 0},
+ { 4, 63, -18, 0, -16, 91, 4, 0}, { 4, 60, -18, 0, -16, 94, 4, 0},
+ { 3, 58, -18, 0, -15, 96, 4, 0}, { 4, 55, -18, 0, -15, 98, 4, 0},
+ { 3, 52, -17, 0, -14, 100, 4, 0}, { 3, 50, -17, 0, -14, 102, 4, 0},
+ { 3, 47, -17, 0, -13, 104, 4, 0}, { 3, 45, -17, 0, -13, 106, 4, 0},
+ { 3, 42, -16, 0, -12, 108, 3, 0}, { 3, 40, -16, 0, -11, 109, 3, 0},
+ { 3, 37, -15, 0, -11, 111, 3, 0}, { 2, 35, -15, 0, -10, 113, 3, 0},
+ { 3, 32, -14, 0, -10, 114, 3, 0}, { 2, 29, -13, 0, -9, 116, 3, 0},
+ { 2, 27, -13, 0, -8, 117, 3, 0}, { 2, 25, -12, 0, -8, 119, 2, 0},
+ { 2, 22, -11, 0, -7, 120, 2, 0}, { 1, 20, -10, 0, -6, 121, 2, 0},
+ { 1, 18, -9, 0, -6, 122, 2, 0}, { 1, 15, -8, 0, -5, 123, 2, 0},
+ { 1, 13, -7, 0, -4, 124, 1, 0}, { 1, 11, -6, 0, -4, 125, 1, 0},
+ { 1, 8, -5, 0, -3, 126, 1, 0}, { 1, 6, -4, 0, -2, 126, 1, 0},
+ { 0, 4, -3, 0, -1, 127, 1, 0}, { 0, 2, -1, 0, 0, 127, 0, 0},
+ // [0, 1)
+ { 0, 0, 1, 0, 0, 127, 0, 0}, { 0, -1, 2, 0, 0, 127, 0, 0},
+ { 0, -3, 4, 1, 1, 127, -2, 0}, { 0, -5, 6, 1, 1, 127, -2, 0},
+ { 0, -6, 8, 1, 2, 126, -3, 0}, {-1, -7, 11, 2, 2, 126, -4, -1},
+ {-1, -8, 13, 2, 3, 125, -5, -1}, {-1, -10, 16, 3, 3, 124, -6, -1},
+ {-1, -11, 18, 3, 4, 123, -7, -1}, {-1, -12, 20, 3, 4, 122, -7, -1},
+ {-1, -13, 23, 3, 4, 121, -8, -1}, {-2, -14, 25, 4, 5, 120, -9, -1},
+ {-1, -15, 27, 4, 5, 119, -10, -1}, {-1, -16, 30, 4, 5, 118, -11, -1},
+ {-2, -17, 33, 5, 6, 116, -12, -1}, {-2, -17, 35, 5, 6, 114, -12, -1},
+ {-2, -18, 38, 5, 6, 113, -13, -1}, {-2, -19, 41, 6, 7, 111, -14, -2},
+ {-2, -19, 43, 6, 7, 110, -15, -2}, {-2, -20, 46, 6, 7, 108, -15, -2},
+ {-2, -20, 49, 6, 7, 106, -16, -2}, {-2, -21, 51, 7, 7, 104, -16, -2},
+ {-2, -21, 54, 7, 7, 102, -17, -2}, {-2, -21, 56, 7, 8, 100, -18, -2},
+ {-2, -22, 59, 7, 8, 98, -18, -2}, {-2, -22, 62, 7, 8, 96, -19, -2},
+ {-2, -22, 64, 7, 8, 94, -19, -2}, {-2, -22, 67, 8, 8, 91, -20, -2},
+ {-2, -22, 69, 8, 8, 89, -20, -2}, {-2, -22, 72, 8, 8, 87, -21, -2},
+ {-2, -21, 74, 8, 8, 84, -21, -2}, {-2, -22, 77, 8, 8, 82, -21, -2},
+ {-2, -21, 79, 8, 8, 79, -21, -2}, {-2, -21, 82, 8, 8, 77, -22, -2},
+ {-2, -21, 84, 8, 8, 74, -21, -2}, {-2, -21, 87, 8, 8, 72, -22, -2},
+ {-2, -20, 89, 8, 8, 69, -22, -2}, {-2, -20, 91, 8, 8, 67, -22, -2},
+ {-2, -19, 94, 8, 7, 64, -22, -2}, {-2, -19, 96, 8, 7, 62, -22, -2},
+ {-2, -18, 98, 8, 7, 59, -22, -2}, {-2, -18, 100, 8, 7, 56, -21, -2},
+ {-2, -17, 102, 7, 7, 54, -21, -2}, {-2, -16, 104, 7, 7, 51, -21, -2},
+ {-2, -16, 106, 7, 6, 49, -20, -2}, {-2, -15, 108, 7, 6, 46, -20, -2},
+ {-2, -15, 110, 7, 6, 43, -19, -2}, {-2, -14, 111, 7, 6, 41, -19, -2},
+ {-1, -13, 113, 6, 5, 38, -18, -2}, {-1, -12, 114, 6, 5, 35, -17, -2},
+ {-1, -12, 116, 6, 5, 33, -17, -2}, {-1, -11, 118, 5, 4, 30, -16, -1},
+ {-1, -10, 119, 5, 4, 27, -15, -1}, {-1, -9, 120, 5, 4, 25, -14, -2},
+ {-1, -8, 121, 4, 3, 23, -13, -1}, {-1, -7, 122, 4, 3, 20, -12, -1},
+ {-1, -7, 123, 4, 3, 18, -11, -1}, {-1, -6, 124, 3, 3, 16, -10, -1},
+ {-1, -5, 125, 3, 2, 13, -8, -1}, {-1, -4, 126, 2, 2, 11, -7, -1},
+ { 0, -3, 126, 2, 1, 8, -6, 0}, { 0, -2, 127, 1, 1, 6, -5, 0},
+ { 0, -2, 127, 1, 1, 4, -3, 0}, { 0, 0, 127, 0, 0, 2, -1, 0},
+ // [1, 2)
+ { 0, 0, 127, 0, 0, 1, 0, 0}, { 0, 0, 127, 0, 0, -1, 2, 0},
+ { 0, 1, 127, -1, 0, -3, 4, 0}, { 0, 1, 126, -2, 0, -4, 6, 1},
+ { 0, 1, 126, -3, 0, -5, 8, 1}, { 0, 1, 125, -4, 0, -6, 11, 1},
+ { 0, 1, 124, -4, 0, -7, 13, 1}, { 0, 2, 123, -5, 0, -8, 15, 1},
+ { 0, 2, 122, -6, 0, -9, 18, 1}, { 0, 2, 121, -6, 0, -10, 20, 1},
+ { 0, 2, 120, -7, 0, -11, 22, 2}, { 0, 2, 119, -8, 0, -12, 25, 2},
+ { 0, 3, 117, -8, 0, -13, 27, 2}, { 0, 3, 116, -9, 0, -13, 29, 2},
+ { 0, 3, 114, -10, 0, -14, 32, 3}, { 0, 3, 113, -10, 0, -15, 35, 2},
+ { 0, 3, 111, -11, 0, -15, 37, 3}, { 0, 3, 109, -11, 0, -16, 40, 3},
+ { 0, 3, 108, -12, 0, -16, 42, 3}, { 0, 4, 106, -13, 0, -17, 45, 3},
+ { 0, 4, 104, -13, 0, -17, 47, 3}, { 0, 4, 102, -14, 0, -17, 50, 3},
+ { 0, 4, 100, -14, 0, -17, 52, 3}, { 0, 4, 98, -15, 0, -18, 55, 4},
+ { 0, 4, 96, -15, 0, -18, 58, 3}, { 0, 4, 94, -16, 0, -18, 60, 4},
+ { 0, 4, 91, -16, 0, -18, 63, 4}, { 0, 4, 89, -16, 0, -18, 65, 4},
+ { 0, 4, 87, -17, 0, -18, 68, 4}, { 0, 4, 85, -17, 0, -18, 70, 4},
+ { 0, 4, 82, -17, 0, -18, 73, 4}, { 0, 4, 80, -17, 0, -18, 75, 4},
+ { 0, 4, 78, -18, 0, -18, 78, 4}, { 0, 4, 75, -18, 0, -17, 80, 4},
+ { 0, 4, 73, -18, 0, -17, 82, 4}, { 0, 4, 70, -18, 0, -17, 85, 4},
+ { 0, 4, 68, -18, 0, -17, 87, 4}, { 0, 4, 65, -18, 0, -16, 89, 4},
+ { 0, 4, 63, -18, 0, -16, 91, 4}, { 0, 4, 60, -18, 0, -16, 94, 4},
+ { 0, 3, 58, -18, 0, -15, 96, 4}, { 0, 4, 55, -18, 0, -15, 98, 4},
+ { 0, 3, 52, -17, 0, -14, 100, 4}, { 0, 3, 50, -17, 0, -14, 102, 4},
+ { 0, 3, 47, -17, 0, -13, 104, 4}, { 0, 3, 45, -17, 0, -13, 106, 4},
+ { 0, 3, 42, -16, 0, -12, 108, 3}, { 0, 3, 40, -16, 0, -11, 109, 3},
+ { 0, 3, 37, -15, 0, -11, 111, 3}, { 0, 2, 35, -15, 0, -10, 113, 3},
+ { 0, 3, 32, -14, 0, -10, 114, 3}, { 0, 2, 29, -13, 0, -9, 116, 3},
+ { 0, 2, 27, -13, 0, -8, 117, 3}, { 0, 2, 25, -12, 0, -8, 119, 2},
+ { 0, 2, 22, -11, 0, -7, 120, 2}, { 0, 1, 20, -10, 0, -6, 121, 2},
+ { 0, 1, 18, -9, 0, -6, 122, 2}, { 0, 1, 15, -8, 0, -5, 123, 2},
+ { 0, 1, 13, -7, 0, -4, 124, 1}, { 0, 1, 11, -6, 0, -4, 125, 1},
+ { 0, 1, 8, -5, 0, -3, 126, 1}, { 0, 1, 6, -4, 0, -2, 126, 1},
+ { 0, 0, 4, -3, 0, -1, 127, 1}, { 0, 0, 2, -1, 0, 0, 127, 0},
+ // dummy (replicate row index 191)
+ { 0, 0, 2, -1, 0, 0, 127, 0},
+
+#else
+ // [-1, 0)
+ { 0, 127, 0, 0, 0, 1, 0, 0}, { 1, 127, -1, 0, -3, 4, 0, 0},
+ { 1, 126, -3, 0, -5, 8, 1, 0}, { 1, 124, -4, 0, -7, 13, 1, 0},
+ { 2, 122, -6, 0, -9, 18, 1, 0}, { 2, 120, -7, 0, -11, 22, 2, 0},
+ { 3, 117, -8, 0, -13, 27, 2, 0}, { 3, 114, -10, 0, -14, 32, 3, 0},
+ { 3, 111, -11, 0, -15, 37, 3, 0}, { 3, 108, -12, 0, -16, 42, 3, 0},
+ { 4, 104, -13, 0, -17, 47, 3, 0}, { 4, 100, -14, 0, -17, 52, 3, 0},
+ { 4, 96, -15, 0, -18, 58, 3, 0}, { 4, 91, -16, 0, -18, 63, 4, 0},
+ { 4, 87, -17, 0, -18, 68, 4, 0}, { 4, 82, -17, 0, -18, 73, 4, 0},
+ { 4, 78, -18, 0, -18, 78, 4, 0}, { 4, 73, -18, 0, -17, 82, 4, 0},
+ { 4, 68, -18, 0, -17, 87, 4, 0}, { 4, 63, -18, 0, -16, 91, 4, 0},
+ { 3, 58, -18, 0, -15, 96, 4, 0}, { 3, 52, -17, 0, -14, 100, 4, 0},
+ { 3, 47, -17, 0, -13, 104, 4, 0}, { 3, 42, -16, 0, -12, 108, 3, 0},
+ { 3, 37, -15, 0, -11, 111, 3, 0}, { 3, 32, -14, 0, -10, 114, 3, 0},
+ { 2, 27, -13, 0, -8, 117, 3, 0}, { 2, 22, -11, 0, -7, 120, 2, 0},
+ { 1, 18, -9, 0, -6, 122, 2, 0}, { 1, 13, -7, 0, -4, 124, 1, 0},
+ { 1, 8, -5, 0, -3, 126, 1, 0}, { 0, 4, -3, 0, -1, 127, 1, 0},
+ // [0, 1)
+ { 0, 0, 1, 0, 0, 127, 0, 0}, { 0, -3, 4, 1, 1, 127, -2, 0},
+ { 0, -6, 8, 1, 2, 126, -3, 0}, {-1, -8, 13, 2, 3, 125, -5, -1},
+ {-1, -11, 18, 3, 4, 123, -7, -1}, {-1, -13, 23, 3, 4, 121, -8, -1},
+ {-1, -15, 27, 4, 5, 119, -10, -1}, {-2, -17, 33, 5, 6, 116, -12, -1},
+ {-2, -18, 38, 5, 6, 113, -13, -1}, {-2, -19, 43, 6, 7, 110, -15, -2},
+ {-2, -20, 49, 6, 7, 106, -16, -2}, {-2, -21, 54, 7, 7, 102, -17, -2},
+ {-2, -22, 59, 7, 8, 98, -18, -2}, {-2, -22, 64, 7, 8, 94, -19, -2},
+ {-2, -22, 69, 8, 8, 89, -20, -2}, {-2, -21, 74, 8, 8, 84, -21, -2},
+ {-2, -21, 79, 8, 8, 79, -21, -2}, {-2, -21, 84, 8, 8, 74, -21, -2},
+ {-2, -20, 89, 8, 8, 69, -22, -2}, {-2, -19, 94, 8, 7, 64, -22, -2},
+ {-2, -18, 98, 8, 7, 59, -22, -2}, {-2, -17, 102, 7, 7, 54, -21, -2},
+ {-2, -16, 106, 7, 6, 49, -20, -2}, {-2, -15, 110, 7, 6, 43, -19, -2},
+ {-1, -13, 113, 6, 5, 38, -18, -2}, {-1, -12, 116, 6, 5, 33, -17, -2},
+ {-1, -10, 119, 5, 4, 27, -15, -1}, {-1, -8, 121, 4, 3, 23, -13, -1},
+ {-1, -7, 123, 4, 3, 18, -11, -1}, {-1, -5, 125, 3, 2, 13, -8, -1},
+ { 0, -3, 126, 2, 1, 8, -6, 0}, { 0, -2, 127, 1, 1, 4, -3, 0},
+ // [1, 2)
+ { 0, 0, 127, 0, 0, 1, 0, 0}, { 0, 1, 127, -1, 0, -3, 4, 0},
+ { 0, 1, 126, -3, 0, -5, 8, 1}, { 0, 1, 124, -4, 0, -7, 13, 1},
+ { 0, 2, 122, -6, 0, -9, 18, 1}, { 0, 2, 120, -7, 0, -11, 22, 2},
+ { 0, 3, 117, -8, 0, -13, 27, 2}, { 0, 3, 114, -10, 0, -14, 32, 3},
+ { 0, 3, 111, -11, 0, -15, 37, 3}, { 0, 3, 108, -12, 0, -16, 42, 3},
+ { 0, 4, 104, -13, 0, -17, 47, 3}, { 0, 4, 100, -14, 0, -17, 52, 3},
+ { 0, 4, 96, -15, 0, -18, 58, 3}, { 0, 4, 91, -16, 0, -18, 63, 4},
+ { 0, 4, 87, -17, 0, -18, 68, 4}, { 0, 4, 82, -17, 0, -18, 73, 4},
+ { 0, 4, 78, -18, 0, -18, 78, 4}, { 0, 4, 73, -18, 0, -17, 82, 4},
+ { 0, 4, 68, -18, 0, -17, 87, 4}, { 0, 4, 63, -18, 0, -16, 91, 4},
+ { 0, 3, 58, -18, 0, -15, 96, 4}, { 0, 3, 52, -17, 0, -14, 100, 4},
+ { 0, 3, 47, -17, 0, -13, 104, 4}, { 0, 3, 42, -16, 0, -12, 108, 3},
+ { 0, 3, 37, -15, 0, -11, 111, 3}, { 0, 3, 32, -14, 0, -10, 114, 3},
+ { 0, 2, 27, -13, 0, -8, 117, 3}, { 0, 2, 22, -11, 0, -7, 120, 2},
+ { 0, 1, 18, -9, 0, -6, 122, 2}, { 0, 1, 13, -7, 0, -4, 124, 1},
+ { 0, 1, 8, -5, 0, -3, 126, 1}, { 0, 0, 4, -3, 0, -1, 127, 1},
+ // dummy (replicate row index 95)
+ { 0, 0, 4, -3, 0, -1, 127, 1},
+#endif // WARPEDPIXEL_PREC_BITS == 6
+};
+/* clang-format on */
+
+static INLINE void convolve(int32x2x2_t x0, int32x2x2_t x1, uint8x8_t src_0,
+ uint8x8_t src_1, int16x4_t *res) {
+ int16x8_t coeff_0, coeff_1;
+ int16x8_t pix_0, pix_1;
+
+ coeff_0 = vcombine_s16(vreinterpret_s16_s32(x0.val[0]),
+ vreinterpret_s16_s32(x1.val[0]));
+ coeff_1 = vcombine_s16(vreinterpret_s16_s32(x0.val[1]),
+ vreinterpret_s16_s32(x1.val[1]));
+
+ pix_0 = vreinterpretq_s16_u16(vmovl_u8(src_0));
+ pix_0 = vmulq_s16(coeff_0, pix_0);
+
+ pix_1 = vreinterpretq_s16_u16(vmovl_u8(src_1));
+ pix_0 = vmlaq_s16(pix_0, coeff_1, pix_1);
+
+ *res = vpadd_s16(vget_low_s16(pix_0), vget_high_s16(pix_0));
+}
+
+static INLINE void horizontal_filter_neon(uint8x16_t src_1, uint8x16_t src_2,
+ uint8x16_t src_3, uint8x16_t src_4,
+ int16x8_t *tmp_dst, int sx, int alpha,
+ int k, const int offset_bits_horiz,
+ const int reduce_bits_horiz) {
+ const uint8x16_t mask = { 255, 0, 255, 0, 255, 0, 255, 0,
+ 255, 0, 255, 0, 255, 0, 255, 0 };
+ const int32x4_t add_const = vdupq_n_s32((int32_t)(1 << offset_bits_horiz));
+ const int16x8_t shift = vdupq_n_s16(-(int16_t)reduce_bits_horiz);
+
+ int16x8_t f0, f1, f2, f3, f4, f5, f6, f7;
+ int32x2x2_t b0, b1;
+ uint8x8_t src_1_low, src_2_low, src_3_low, src_4_low, src_5_low, src_6_low;
+ int32x4_t tmp_res_low, tmp_res_high;
+ uint16x8_t res;
+ int16x4_t res_0246_even, res_0246_odd, res_1357_even, res_1357_odd;
+
+ uint8x16_t tmp_0 = vandq_u8(src_1, mask);
+ uint8x16_t tmp_1 = vandq_u8(src_2, mask);
+ uint8x16_t tmp_2 = vandq_u8(src_3, mask);
+ uint8x16_t tmp_3 = vandq_u8(src_4, mask);
+
+ tmp_2 = vextq_u8(tmp_0, tmp_0, 1);
+ tmp_3 = vextq_u8(tmp_1, tmp_1, 1);
+
+ src_1 = vaddq_u8(tmp_0, tmp_2);
+ src_2 = vaddq_u8(tmp_1, tmp_3);
+
+ src_1_low = vget_low_u8(src_1);
+ src_2_low = vget_low_u8(src_2);
+ src_3_low = vget_low_u8(vextq_u8(src_1, src_1, 4));
+ src_4_low = vget_low_u8(vextq_u8(src_2, src_2, 4));
+ src_5_low = vget_low_u8(vextq_u8(src_1, src_1, 2));
+ src_6_low = vget_low_u8(vextq_u8(src_1, src_1, 6));
+
+ // Loading the 8 filter taps
+ f0 = vmovl_s8(
+ vld1_s8(filter_8bit_neon[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]));
+ f1 = vmovl_s8(
+ vld1_s8(filter_8bit_neon[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]));
+ f2 = vmovl_s8(
+ vld1_s8(filter_8bit_neon[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]));
+ f3 = vmovl_s8(
+ vld1_s8(filter_8bit_neon[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]));
+ f4 = vmovl_s8(
+ vld1_s8(filter_8bit_neon[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]));
+ f5 = vmovl_s8(
+ vld1_s8(filter_8bit_neon[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]));
+ f6 = vmovl_s8(
+ vld1_s8(filter_8bit_neon[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]));
+ f7 = vmovl_s8(
+ vld1_s8(filter_8bit_neon[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]));
+
+ b0 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(f0)),
+ vreinterpret_s32_s16(vget_low_s16(f2)));
+ b1 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(f4)),
+ vreinterpret_s32_s16(vget_low_s16(f6)));
+ convolve(b0, b1, src_1_low, src_3_low, &res_0246_even);
+
+ b0 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(f1)),
+ vreinterpret_s32_s16(vget_low_s16(f3)));
+ b1 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(f5)),
+ vreinterpret_s32_s16(vget_low_s16(f7)));
+ convolve(b0, b1, src_2_low, src_4_low, &res_0246_odd);
+
+ b0 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(f0)),
+ vreinterpret_s32_s16(vget_high_s16(f2)));
+ b1 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(f4)),
+ vreinterpret_s32_s16(vget_high_s16(f6)));
+ convolve(b0, b1, src_2_low, src_4_low, &res_1357_even);
+
+ b0 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(f1)),
+ vreinterpret_s32_s16(vget_high_s16(f3)));
+ b1 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(f5)),
+ vreinterpret_s32_s16(vget_high_s16(f7)));
+ convolve(b0, b1, src_5_low, src_6_low, &res_1357_odd);
+
+ tmp_res_low = vaddl_s16(res_0246_even, res_1357_even);
+ tmp_res_high = vaddl_s16(res_0246_odd, res_1357_odd);
+
+ tmp_res_low = vaddq_s32(tmp_res_low, add_const);
+ tmp_res_high = vaddq_s32(tmp_res_high, add_const);
+
+ res = vcombine_u16(vqmovun_s32(tmp_res_low), vqmovun_s32(tmp_res_high));
+ res = vqrshlq_u16(res, shift);
+
+ tmp_dst[k + 7] = vreinterpretq_s16_u16(res);
+}
+
+static INLINE void vertical_filter_neon(const int16x8_t *src,
+ int32x4_t *res_low, int32x4_t *res_high,
+ int sy, int gamma) {
+ int16x4_t src_0, src_1, fltr_0, fltr_1;
+ int32x4_t res_0, res_1;
+ int32x2_t res_0_im, res_1_im;
+ int32x4_t res_even, res_odd, im_res_0, im_res_1;
+
+ int16x8_t f0, f1, f2, f3, f4, f5, f6, f7;
+ int16x8x2_t b0, b1, b2, b3;
+ int32x4x2_t c0, c1, c2, c3;
+ int32x4x2_t d0, d1, d2, d3;
+
+ b0 = vtrnq_s16(src[0], src[1]);
+ b1 = vtrnq_s16(src[2], src[3]);
+ b2 = vtrnq_s16(src[4], src[5]);
+ b3 = vtrnq_s16(src[6], src[7]);
+
+ c0 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[0]),
+ vreinterpretq_s32_s16(b0.val[1]));
+ c1 = vtrnq_s32(vreinterpretq_s32_s16(b1.val[0]),
+ vreinterpretq_s32_s16(b1.val[1]));
+ c2 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[0]),
+ vreinterpretq_s32_s16(b2.val[1]));
+ c3 = vtrnq_s32(vreinterpretq_s32_s16(b3.val[0]),
+ vreinterpretq_s32_s16(b3.val[1]));
+
+ f0 = vld1q_s16(
+ (int16_t *)(warped_filter + ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
+ f1 = vld1q_s16(
+ (int16_t *)(warped_filter + ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
+ f2 = vld1q_s16(
+ (int16_t *)(warped_filter + ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
+ f3 = vld1q_s16(
+ (int16_t *)(warped_filter + ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
+ f4 = vld1q_s16(
+ (int16_t *)(warped_filter + ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
+ f5 = vld1q_s16(
+ (int16_t *)(warped_filter + ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
+ f6 = vld1q_s16(
+ (int16_t *)(warped_filter + ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
+ f7 = vld1q_s16(
+ (int16_t *)(warped_filter + ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
+
+ d0 = vtrnq_s32(vreinterpretq_s32_s16(f0), vreinterpretq_s32_s16(f2));
+ d1 = vtrnq_s32(vreinterpretq_s32_s16(f4), vreinterpretq_s32_s16(f6));
+ d2 = vtrnq_s32(vreinterpretq_s32_s16(f1), vreinterpretq_s32_s16(f3));
+ d3 = vtrnq_s32(vreinterpretq_s32_s16(f5), vreinterpretq_s32_s16(f7));
+
+ // row:0,1 even_col:0,2
+ src_0 = vget_low_s16(vreinterpretq_s16_s32(c0.val[0]));
+ fltr_0 = vget_low_s16(vreinterpretq_s16_s32(d0.val[0]));
+ res_0 = vmull_s16(src_0, fltr_0);
+
+ // row:0,1,2,3 even_col:0,2
+ src_0 = vget_low_s16(vreinterpretq_s16_s32(c1.val[0]));
+ fltr_0 = vget_low_s16(vreinterpretq_s16_s32(d0.val[1]));
+ res_0 = vmlal_s16(res_0, src_0, fltr_0);
+ res_0_im = vpadd_s32(vget_low_s32(res_0), vget_high_s32(res_0));
+
+ // row:0,1 even_col:4,6
+ src_1 = vget_low_s16(vreinterpretq_s16_s32(c0.val[1]));
+ fltr_1 = vget_low_s16(vreinterpretq_s16_s32(d1.val[0]));
+ res_1 = vmull_s16(src_1, fltr_1);
+
+ // row:0,1,2,3 even_col:4,6
+ src_1 = vget_low_s16(vreinterpretq_s16_s32(c1.val[1]));
+ fltr_1 = vget_low_s16(vreinterpretq_s16_s32(d1.val[1]));
+ res_1 = vmlal_s16(res_1, src_1, fltr_1);
+ res_1_im = vpadd_s32(vget_low_s32(res_1), vget_high_s32(res_1));
+
+ // row:0,1,2,3 even_col:0,2,4,6
+ im_res_0 = vcombine_s32(res_0_im, res_1_im);
+
+ // row:4,5 even_col:0,2
+ src_0 = vget_low_s16(vreinterpretq_s16_s32(c2.val[0]));
+ fltr_0 = vget_high_s16(vreinterpretq_s16_s32(d0.val[0]));
+ res_0 = vmull_s16(src_0, fltr_0);
+
+ // row:4,5,6,7 even_col:0,2
+ src_0 = vget_low_s16(vreinterpretq_s16_s32(c3.val[0]));
+ fltr_0 = vget_high_s16(vreinterpretq_s16_s32(d0.val[1]));
+ res_0 = vmlal_s16(res_0, src_0, fltr_0);
+ res_0_im = vpadd_s32(vget_low_s32(res_0), vget_high_s32(res_0));
+
+ // row:4,5 even_col:4,6
+ src_1 = vget_low_s16(vreinterpretq_s16_s32(c2.val[1]));
+ fltr_1 = vget_high_s16(vreinterpretq_s16_s32(d1.val[0]));
+ res_1 = vmull_s16(src_1, fltr_1);
+
+ // row:4,5,6,7 even_col:4,6
+ src_1 = vget_low_s16(vreinterpretq_s16_s32(c3.val[1]));
+ fltr_1 = vget_high_s16(vreinterpretq_s16_s32(d1.val[1]));
+ res_1 = vmlal_s16(res_1, src_1, fltr_1);
+ res_1_im = vpadd_s32(vget_low_s32(res_1), vget_high_s32(res_1));
+
+ // row:4,5,6,7 even_col:0,2,4,6
+ im_res_1 = vcombine_s32(res_0_im, res_1_im);
+
+ // row:0-7 even_col:0,2,4,6
+ res_even = vaddq_s32(im_res_0, im_res_1);
+
+ // row:0,1 odd_col:1,3
+ src_0 = vget_high_s16(vreinterpretq_s16_s32(c0.val[0]));
+ fltr_0 = vget_low_s16(vreinterpretq_s16_s32(d2.val[0]));
+ res_0 = vmull_s16(src_0, fltr_0);
+
+ // row:0,1,2,3 odd_col:1,3
+ src_0 = vget_high_s16(vreinterpretq_s16_s32(c1.val[0]));
+ fltr_0 = vget_low_s16(vreinterpretq_s16_s32(d2.val[1]));
+ res_0 = vmlal_s16(res_0, src_0, fltr_0);
+ res_0_im = vpadd_s32(vget_low_s32(res_0), vget_high_s32(res_0));
+
+ // row:0,1 odd_col:5,7
+ src_1 = vget_high_s16(vreinterpretq_s16_s32(c0.val[1]));
+ fltr_1 = vget_low_s16(vreinterpretq_s16_s32(d3.val[0]));
+ res_1 = vmull_s16(src_1, fltr_1);
+
+ // row:0,1,2,3 odd_col:5,7
+ src_1 = vget_high_s16(vreinterpretq_s16_s32(c1.val[1]));
+ fltr_1 = vget_low_s16(vreinterpretq_s16_s32(d3.val[1]));
+ res_1 = vmlal_s16(res_1, src_1, fltr_1);
+ res_1_im = vpadd_s32(vget_low_s32(res_1), vget_high_s32(res_1));
+
+ // row:0,1,2,3 odd_col:1,3,5,7
+ im_res_0 = vcombine_s32(res_0_im, res_1_im);
+
+ // row:4,5 odd_col:1,3
+ src_0 = vget_high_s16(vreinterpretq_s16_s32(c2.val[0]));
+ fltr_0 = vget_high_s16(vreinterpretq_s16_s32(d2.val[0]));
+ res_0 = vmull_s16(src_0, fltr_0);
+
+ // row:4,5,6,7 odd_col:1,3
+ src_0 = vget_high_s16(vreinterpretq_s16_s32(c3.val[0]));
+ fltr_0 = vget_high_s16(vreinterpretq_s16_s32(d2.val[1]));
+ res_0 = vmlal_s16(res_0, src_0, fltr_0);
+ res_0_im = vpadd_s32(vget_low_s32(res_0), vget_high_s32(res_0));
+
+ // row:4,5 odd_col:5,7
+ src_1 = vget_high_s16(vreinterpretq_s16_s32(c2.val[1]));
+ fltr_1 = vget_high_s16(vreinterpretq_s16_s32(d3.val[0]));
+ res_1 = vmull_s16(src_1, fltr_1);
+
+ // row:4,5,6,7 odd_col:5,7
+ src_1 = vget_high_s16(vreinterpretq_s16_s32(c3.val[1]));
+ fltr_1 = vget_high_s16(vreinterpretq_s16_s32(d3.val[1]));
+ res_1 = vmlal_s16(res_1, src_1, fltr_1);
+ res_1_im = vpadd_s32(vget_low_s32(res_1), vget_high_s32(res_1));
+
+ // row:4,5,6,7 odd_col:1,3,5,7
+ im_res_1 = vcombine_s32(res_0_im, res_1_im);
+
+ // row:0-7 odd_col:1,3,5,7
+ res_odd = vaddq_s32(im_res_0, im_res_1);
+
+ // reordering as 0 1 2 3 | 4 5 6 7
+ c0 = vtrnq_s32(res_even, res_odd);
+
+ // Final store
+ *res_low = vcombine_s32(vget_low_s32(c0.val[0]), vget_low_s32(c0.val[1]));
+ *res_high = vcombine_s32(vget_high_s32(c0.val[0]), vget_high_s32(c0.val[1]));
+}
+
+void av1_warp_affine_neon(const int32_t *mat, const uint8_t *ref, int width,
+ int height, int stride, uint8_t *pred, int p_col,
+ int p_row, int p_width, int p_height, int p_stride,
+ int subsampling_x, int subsampling_y,
+ ConvolveParams *conv_params, int16_t alpha,
+ int16_t beta, int16_t gamma, int16_t delta) {
+ int16x8_t tmp[15];
+ const int bd = 8;
+ const int w0 = conv_params->fwd_offset;
+ const int w1 = conv_params->bck_offset;
+ const int32x4_t fwd = vdupq_n_s32((int32_t)w0);
+ const int32x4_t bwd = vdupq_n_s32((int32_t)w1);
+ const int16x8_t sub_constant = vdupq_n_s16((1 << (bd - 1)) + (1 << bd));
+
+ int limit = 0;
+ uint8x16_t vec_dup, mask_val;
+ int32x4_t res_lo, res_hi;
+ int16x8_t result_final;
+ uint8x16_t src_1, src_2, src_3, src_4;
+ uint8x16_t indx_vec = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+ };
+ uint8x16_t cmp_vec;
+
+ const int reduce_bits_horiz = conv_params->round_0;
+ const int reduce_bits_vert = conv_params->is_compound
+ ? conv_params->round_1
+ : 2 * FILTER_BITS - reduce_bits_horiz;
+ const int32x4_t shift_vert = vdupq_n_s32(-(int32_t)reduce_bits_vert);
+ const int offset_bits_horiz = bd + FILTER_BITS - 1;
+
+ assert(IMPLIES(conv_params->is_compound, conv_params->dst != NULL));
+
+ const int offset_bits_vert = bd + 2 * FILTER_BITS - reduce_bits_horiz;
+ int32x4_t add_const_vert = vdupq_n_s32((int32_t)(1 << offset_bits_vert));
+ const int round_bits =
+ 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
+ const int16x4_t round_bits_vec = vdup_n_s16(-(int16_t)round_bits);
+ const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int16x4_t res_sub_const =
+ vdup_n_s16(-((1 << (offset_bits - conv_params->round_1)) +
+ (1 << (offset_bits - conv_params->round_1 - 1))));
+ int k;
+
+ assert(IMPLIES(conv_params->do_average, conv_params->is_compound));
+
+ for (int i = 0; i < p_height; i += 8) {
+ for (int j = 0; j < p_width; j += 8) {
+ const int32_t src_x = (p_col + j + 4) << subsampling_x;
+ const int32_t src_y = (p_row + i + 4) << subsampling_y;
+ const int32_t dst_x = mat[2] * src_x + mat[3] * src_y + mat[0];
+ const int32_t dst_y = mat[4] * src_x + mat[5] * src_y + mat[1];
+ const int32_t x4 = dst_x >> subsampling_x;
+ const int32_t y4 = dst_y >> subsampling_y;
+
+ int32_t ix4 = x4 >> WARPEDMODEL_PREC_BITS;
+ int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
+ int32_t iy4 = y4 >> WARPEDMODEL_PREC_BITS;
+ int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
+
+ sx4 += alpha * (-4) + beta * (-4) + (1 << (WARPEDDIFF_PREC_BITS - 1)) +
+ (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
+ sy4 += gamma * (-4) + delta * (-4) + (1 << (WARPEDDIFF_PREC_BITS - 1)) +
+ (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
+
+ sx4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1);
+ sy4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1);
+ // horizontal
+ if (ix4 <= -7) {
+ for (k = -7; k < AOMMIN(8, p_height - i); ++k) {
+ int iy = iy4 + k;
+ if (iy < 0)
+ iy = 0;
+ else if (iy > height - 1)
+ iy = height - 1;
+ int16_t dup_val =
+ (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1)) +
+ ref[iy * stride] * (1 << (FILTER_BITS - reduce_bits_horiz));
+
+ tmp[k + 7] = vdupq_n_s16(dup_val);
+ }
+ } else if (ix4 >= width + 6) {
+ for (k = -7; k < AOMMIN(8, p_height - i); ++k) {
+ int iy = iy4 + k;
+ if (iy < 0)
+ iy = 0;
+ else if (iy > height - 1)
+ iy = height - 1;
+ int16_t dup_val = (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1)) +
+ ref[iy * stride + (width - 1)] *
+ (1 << (FILTER_BITS - reduce_bits_horiz));
+ tmp[k + 7] = vdupq_n_s16(dup_val);
+ }
+ } else if (((ix4 - 7) < 0) || ((ix4 + 9) > width)) {
+ const int out_of_boundary_left = -(ix4 - 6);
+ const int out_of_boundary_right = (ix4 + 8) - width;
+
+ for (k = -7; k < AOMMIN(8, p_height - i); ++k) {
+ int iy = iy4 + k;
+ if (iy < 0)
+ iy = 0;
+ else if (iy > height - 1)
+ iy = height - 1;
+ int sx = sx4 + beta * (k + 4);
+
+ const uint8_t *src = ref + iy * stride + ix4 - 7;
+ src_1 = vld1q_u8(src);
+
+ if (out_of_boundary_left >= 0) {
+ limit = out_of_boundary_left + 1;
+ cmp_vec = vdupq_n_u8(out_of_boundary_left);
+ vec_dup = vdupq_n_u8(*(src + limit));
+ mask_val = vcleq_u8(indx_vec, cmp_vec);
+ src_1 = vbslq_u8(mask_val, vec_dup, src_1);
+ }
+ if (out_of_boundary_right >= 0) {
+ limit = 15 - (out_of_boundary_right + 1);
+ cmp_vec = vdupq_n_u8(15 - out_of_boundary_right);
+ vec_dup = vdupq_n_u8(*(src + limit));
+ mask_val = vcgeq_u8(indx_vec, cmp_vec);
+ src_1 = vbslq_u8(mask_val, vec_dup, src_1);
+ }
+ src_2 = vextq_u8(src_1, src_1, 1);
+ src_3 = vextq_u8(src_2, src_2, 1);
+ src_4 = vextq_u8(src_3, src_3, 1);
+
+ horizontal_filter_neon(src_1, src_2, src_3, src_4, tmp, sx, alpha, k,
+ offset_bits_horiz, reduce_bits_horiz);
+ }
+ } else {
+ for (k = -7; k < AOMMIN(8, p_height - i); ++k) {
+ int iy = iy4 + k;
+ if (iy < 0)
+ iy = 0;
+ else if (iy > height - 1)
+ iy = height - 1;
+ int sx = sx4 + beta * (k + 4);
+
+ const uint8_t *src = ref + iy * stride + ix4 - 7;
+ src_1 = vld1q_u8(src);
+ src_2 = vextq_u8(src_1, src_1, 1);
+ src_3 = vextq_u8(src_2, src_2, 1);
+ src_4 = vextq_u8(src_3, src_3, 1);
+
+ horizontal_filter_neon(src_1, src_2, src_3, src_4, tmp, sx, alpha, k,
+ offset_bits_horiz, reduce_bits_horiz);
+ }
+ }
+
+ // vertical
+ for (k = -4; k < AOMMIN(4, p_height - i - 4); ++k) {
+ int sy = sy4 + delta * (k + 4);
+
+ const int16x8_t *v_src = tmp + (k + 4);
+
+ vertical_filter_neon(v_src, &res_lo, &res_hi, sy, gamma);
+
+ res_lo = vaddq_s32(res_lo, add_const_vert);
+ res_hi = vaddq_s32(res_hi, add_const_vert);
+
+ if (conv_params->is_compound) {
+ uint16_t *const p =
+ (uint16_t *)&conv_params
+ ->dst[(i + k + 4) * conv_params->dst_stride + j];
+
+ res_lo = vrshlq_s32(res_lo, shift_vert);
+ if (conv_params->do_average) {
+ uint8_t *const dst8 = &pred[(i + k + 4) * p_stride + j];
+ uint16x4_t tmp16_lo = vld1_u16(p);
+ int32x4_t tmp32_lo = vreinterpretq_s32_u32(vmovl_u16(tmp16_lo));
+ int16x4_t tmp16_low;
+ if (conv_params->use_jnt_comp_avg) {
+ res_lo = vmulq_s32(res_lo, bwd);
+ tmp32_lo = vmulq_s32(tmp32_lo, fwd);
+ tmp32_lo = vaddq_s32(tmp32_lo, res_lo);
+ tmp16_low = vshrn_n_s32(tmp32_lo, DIST_PRECISION_BITS);
+ } else {
+ tmp32_lo = vaddq_s32(tmp32_lo, res_lo);
+ tmp16_low = vshrn_n_s32(tmp32_lo, 1);
+ }
+ int16x4_t res_low = vadd_s16(tmp16_low, res_sub_const);
+ res_low = vqrshl_s16(res_low, round_bits_vec);
+ int16x8_t final_res_low = vcombine_s16(res_low, res_low);
+ uint8x8_t res_8_low = vqmovun_s16(final_res_low);
+
+ vst1_lane_u32((uint32_t *)dst8, vreinterpret_u32_u8(res_8_low), 0);
+ } else {
+ uint16x4_t res_u16_low = vqmovun_s32(res_lo);
+ vst1_u16(p, res_u16_low);
+ }
+ if (p_width > 4) {
+ uint16_t *const p4 =
+ (uint16_t *)&conv_params
+ ->dst[(i + k + 4) * conv_params->dst_stride + j + 4];
+
+ res_hi = vrshlq_s32(res_hi, shift_vert);
+ if (conv_params->do_average) {
+ uint8_t *const dst8_4 = &pred[(i + k + 4) * p_stride + j + 4];
+
+ uint16x4_t tmp16_hi = vld1_u16(p4);
+ int32x4_t tmp32_hi = vreinterpretq_s32_u32(vmovl_u16(tmp16_hi));
+ int16x4_t tmp16_high;
+ if (conv_params->use_jnt_comp_avg) {
+ res_hi = vmulq_s32(res_hi, bwd);
+ tmp32_hi = vmulq_s32(tmp32_hi, fwd);
+ tmp32_hi = vaddq_s32(tmp32_hi, res_hi);
+ tmp16_high = vshrn_n_s32(tmp32_hi, DIST_PRECISION_BITS);
+ } else {
+ tmp32_hi = vaddq_s32(tmp32_hi, res_hi);
+ tmp16_high = vshrn_n_s32(tmp32_hi, 1);
+ }
+ int16x4_t res_high = vadd_s16(tmp16_high, res_sub_const);
+ res_high = vqrshl_s16(res_high, round_bits_vec);
+ int16x8_t final_res_high = vcombine_s16(res_high, res_high);
+ uint8x8_t res_8_high = vqmovun_s16(final_res_high);
+
+ vst1_lane_u32((uint32_t *)dst8_4, vreinterpret_u32_u8(res_8_high),
+ 0);
+ } else {
+ uint16x4_t res_u16_high = vqmovun_s32(res_hi);
+ vst1_u16(p4, res_u16_high);
+ }
+ }
+ } else {
+ res_lo = vrshlq_s32(res_lo, shift_vert);
+ res_hi = vrshlq_s32(res_hi, shift_vert);
+
+ result_final = vcombine_s16(vmovn_s32(res_lo), vmovn_s32(res_hi));
+ result_final = vsubq_s16(result_final, sub_constant);
+
+ uint8_t *const p = (uint8_t *)&pred[(i + k + 4) * p_stride + j];
+ uint8x8_t val = vqmovun_s16(result_final);
+
+ if (p_width == 4) {
+ vst1_lane_u32((uint32_t *)p, vreinterpret_u32_u8(val), 0);
+ } else {
+ vst1_u8(p, val);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/aom/av1/common/arm/wiener_convolve_neon.c b/third_party/aom/av1/common/arm/wiener_convolve_neon.c
new file mode 100644
index 0000000000..a9bb5bcf00
--- /dev/null
+++ b/third_party/aom/av1/common/arm/wiener_convolve_neon.c
@@ -0,0 +1,530 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/txfm_common.h"
+#include "aom_ports/mem.h"
+#include "av1/common/common.h"
+#include "av1/common/arm/convolve_neon.h"
+#include "av1/common/arm/mem_neon.h"
+#include "av1/common/arm/transpose_neon.h"
+
+/* Wiener filter 2D
+ Apply horizontal filter and store in a temporary buffer. When applying
+ vertical filter, overwrite the original pixel values.
+ */
+void av1_wiener_convolve_add_src_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h,
+ const ConvolveParams *conv_params) {
+ uint16_t *d_tmp;
+ uint8_t *d;
+ const uint8_t *src_ptr, *s_tmp;
+ uint16_t *dst_ptr;
+ (void)x_step_q4;
+ (void)y_step_q4;
+
+ int width, height;
+ const int bd = 8;
+ const int intermediate_height = h + SUBPEL_TAPS - 1;
+ const int center_tap = ((SUBPEL_TAPS - 1) / 2);
+ int16_t filter_x_tmp[7], filter_y_tmp[7];
+
+ DECLARE_ALIGNED(16, uint16_t,
+ temp[(MAX_SB_SIZE + HORIZ_EXTRA_ROWS) * MAX_SB_SIZE]);
+
+ assert(x_step_q4 == 16 && y_step_q4 == 16);
+ assert(!(w % 8));
+
+ assert(w <= MAX_SB_SIZE);
+ assert(h <= MAX_SB_SIZE);
+
+ assert(filter_x[7] == 0);
+ assert(filter_y[7] == 0);
+
+ /* assumption of horizontal filtering output will not exceed 15 bit.
+ ((bd) + 1 + FILTER_BITS - conv_params->round_0) <= 15
+ 16 - conv_params->round_0 <= 15 -- (conv_params->round_0) >= 1
+ */
+ assert((conv_params->round_0) >= 1);
+
+ memcpy(&filter_x_tmp[0], filter_x, sizeof(*filter_x) * FILTER_BITS);
+ memcpy(&filter_y_tmp[0], filter_y, sizeof(*filter_y) * FILTER_BITS);
+
+ filter_x_tmp[3] += (1 << FILTER_BITS);
+ filter_y_tmp[3] += (1 << FILTER_BITS);
+
+ s_tmp = src - center_tap * src_stride - center_tap;
+ dst_ptr = temp;
+ src_ptr = s_tmp;
+ height = intermediate_height;
+
+ /* if height is a multiple of 8 */
+ if (!(h & 7)) {
+ int16x8_t res0, res1, res2, res3;
+ uint16x8_t res4;
+ uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7;
+#if defined(__aarch64__)
+ uint16x8_t res5, res6, res7, res8, res9, res10, res11;
+ uint8x8_t t8, t9, t10, t11, t12, t13, t14;
+
+ do {
+ const uint8_t *s;
+
+ __builtin_prefetch(src_ptr + 0 * src_stride);
+ __builtin_prefetch(src_ptr + 1 * src_stride);
+ __builtin_prefetch(src_ptr + 2 * src_stride);
+ __builtin_prefetch(src_ptr + 3 * src_stride);
+ __builtin_prefetch(src_ptr + 4 * src_stride);
+ __builtin_prefetch(src_ptr + 5 * src_stride);
+ __builtin_prefetch(src_ptr + 6 * src_stride);
+ __builtin_prefetch(src_ptr + 7 * src_stride);
+
+ load_u8_8x8(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
+ s = src_ptr + 7;
+ d_tmp = dst_ptr;
+ width = w;
+
+ __builtin_prefetch(dst_ptr + 0 * dst_stride);
+ __builtin_prefetch(dst_ptr + 1 * dst_stride);
+ __builtin_prefetch(dst_ptr + 2 * dst_stride);
+ __builtin_prefetch(dst_ptr + 3 * dst_stride);
+ __builtin_prefetch(dst_ptr + 4 * dst_stride);
+ __builtin_prefetch(dst_ptr + 5 * dst_stride);
+ __builtin_prefetch(dst_ptr + 6 * dst_stride);
+ __builtin_prefetch(dst_ptr + 7 * dst_stride);
+
+ do {
+ load_u8_8x8(s, src_stride, &t7, &t8, &t9, &t10, &t11, &t12, &t13, &t14);
+ transpose_u8_8x8(&t7, &t8, &t9, &t10, &t11, &t12, &t13, &t14);
+
+ res0 = vreinterpretq_s16_u16(vaddl_u8(t0, t6));
+ res1 = vreinterpretq_s16_u16(vaddl_u8(t1, t5));
+ res2 = vreinterpretq_s16_u16(vaddl_u8(t2, t4));
+ res3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ res4 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp,
+ bd, conv_params->round_0);
+
+ res0 = vreinterpretq_s16_u16(vaddl_u8(t1, t7));
+ res1 = vreinterpretq_s16_u16(vaddl_u8(t2, t6));
+ res2 = vreinterpretq_s16_u16(vaddl_u8(t3, t5));
+ res3 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ res5 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp,
+ bd, conv_params->round_0);
+
+ res0 = vreinterpretq_s16_u16(vaddl_u8(t2, t8));
+ res1 = vreinterpretq_s16_u16(vaddl_u8(t3, t7));
+ res2 = vreinterpretq_s16_u16(vaddl_u8(t4, t6));
+ res3 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ res6 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp,
+ bd, conv_params->round_0);
+
+ res0 = vreinterpretq_s16_u16(vaddl_u8(t3, t9));
+ res1 = vreinterpretq_s16_u16(vaddl_u8(t4, t8));
+ res2 = vreinterpretq_s16_u16(vaddl_u8(t5, t7));
+ res3 = vreinterpretq_s16_u16(vmovl_u8(t6));
+ res7 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp,
+ bd, conv_params->round_0);
+
+ res0 = vreinterpretq_s16_u16(vaddl_u8(t4, t10));
+ res1 = vreinterpretq_s16_u16(vaddl_u8(t5, t9));
+ res2 = vreinterpretq_s16_u16(vaddl_u8(t6, t8));
+ res3 = vreinterpretq_s16_u16(vmovl_u8(t7));
+ res8 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp,
+ bd, conv_params->round_0);
+
+ res0 = vreinterpretq_s16_u16(vaddl_u8(t5, t11));
+ res1 = vreinterpretq_s16_u16(vaddl_u8(t6, t10));
+ res2 = vreinterpretq_s16_u16(vaddl_u8(t7, t9));
+ res3 = vreinterpretq_s16_u16(vmovl_u8(t8));
+ res9 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp,
+ bd, conv_params->round_0);
+
+ res0 = vreinterpretq_s16_u16(vaddl_u8(t6, t12));
+ res1 = vreinterpretq_s16_u16(vaddl_u8(t7, t11));
+ res2 = vreinterpretq_s16_u16(vaddl_u8(t8, t10));
+ res3 = vreinterpretq_s16_u16(vmovl_u8(t9));
+ res10 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp,
+ bd, conv_params->round_0);
+
+ res0 = vreinterpretq_s16_u16(vaddl_u8(t7, t13));
+ res1 = vreinterpretq_s16_u16(vaddl_u8(t8, t12));
+ res2 = vreinterpretq_s16_u16(vaddl_u8(t9, t11));
+ res3 = vreinterpretq_s16_u16(vmovl_u8(t10));
+ res11 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp,
+ bd, conv_params->round_0);
+
+ transpose_u16_8x8(&res4, &res5, &res6, &res7, &res8, &res9, &res10,
+ &res11);
+ store_u16_8x8(d_tmp, MAX_SB_SIZE, res4, res5, res6, res7, res8, res9,
+ res10, res11);
+
+ t0 = t8;
+ t1 = t9;
+ t2 = t10;
+ t3 = t11;
+ t4 = t12;
+ t5 = t13;
+ t6 = t14;
+ s += 8;
+ d_tmp += 8;
+ width -= 8;
+ } while (width > 0);
+ src_ptr += 8 * src_stride;
+ dst_ptr += 8 * MAX_SB_SIZE;
+ height -= 8;
+ } while (height > 0);
+#else
+ uint8x8_t temp_0;
+
+ do {
+ const uint8_t *s;
+
+ __builtin_prefetch(src_ptr);
+
+ t0 = vld1_u8(src_ptr); // a0 a1 a2 a3 a4 a5 a6 a7
+ s = src_ptr + 8;
+ d_tmp = dst_ptr;
+ width = w;
+
+ __builtin_prefetch(dst_ptr);
+
+ do {
+ t7 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15
+ temp_0 = t0;
+ t0 = t7;
+
+ t1 = vext_u8(temp_0, t7, 1); // a1 a2 a3 a4 a5 a6 a7 a8
+ t2 = vext_u8(temp_0, t7, 2); // a2 a3 a4 a5 a6 a7 a8 a9
+ t3 = vext_u8(temp_0, t7, 3); // a3 a4 a5 a6 a7 a8 a9 a10
+ t4 = vext_u8(temp_0, t7, 4); // a4 a5 a6 a7 a8 a9 a10 a11
+ t5 = vext_u8(temp_0, t7, 5); // a5 a6 a7 a8 a9 a10 a11 a12
+ t6 = vext_u8(temp_0, t7, 6); // a6 a7 a8 a9 a10 a11 a12 a13
+ t7 = vext_u8(temp_0, t7, 7); // a7 a8 a9 a10 a11 a12 a13 a14
+
+ res0 = vreinterpretq_s16_u16(vaddl_u8(temp_0, t6));
+ res1 = vreinterpretq_s16_u16(vaddl_u8(t1, t5));
+ res2 = vreinterpretq_s16_u16(vaddl_u8(t2, t4));
+ res3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ res4 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp,
+ bd, conv_params->round_0);
+
+ vst1q_u16(d_tmp, res4);
+
+ s += 8;
+ d_tmp += 8;
+ width -= 8;
+ } while (width > 0);
+ src_ptr += src_stride;
+ dst_ptr += MAX_SB_SIZE;
+ height--;
+ } while (height > 0);
+#endif
+ } else {
+ /*if height is a multiple of 4*/
+ const uint8_t *s;
+ int16x8_t tt0, tt1, tt2, tt3;
+ uint16x8_t d0;
+ uint8x8_t t0, t1, t2, t3;
+
+#if defined(__aarch64__)
+ uint16x4_t res0, res1, res2, res3, res4, res5, res6, res7;
+ uint16x8_t d1, d2, d3;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
+ int16x4_t s11, s12, s13, s14;
+ do {
+ __builtin_prefetch(src_ptr + 0 * src_stride);
+ __builtin_prefetch(src_ptr + 1 * src_stride);
+ __builtin_prefetch(src_ptr + 2 * src_stride);
+ __builtin_prefetch(src_ptr + 3 * src_stride);
+
+ load_u8_8x4(src_ptr, src_stride, &t0, &t1, &t2, &t3); /*8x4*/
+ transpose_u8_8x4(&t0, &t1, &t2,
+ &t3); /*first 8 pixels of 4 rows transposed-- 4x8*/
+
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+
+ s0 = vget_low_s16(tt0); /*pa0 pb0 pc0 pd0 -- pixel_a0*/
+ s1 = vget_low_s16(tt1); /*pa1 pb1 pc1 pd1 */
+ s2 = vget_low_s16(tt2); /*pa2 pb2 pc2 pd2 */
+ s3 = vget_low_s16(tt3); /*pa3 pb3 pc3 pd3 */
+ s4 = vget_high_s16(tt0); /*pa4 pb4 pc4 pd4 */
+ s5 = vget_high_s16(tt1); /*pa5 pb5 pc5 pd5 */
+ s6 = vget_high_s16(tt2); /*pa6 pb6 pc6 pd6 */
+
+ __builtin_prefetch(dst_ptr + 0 * dst_stride);
+ __builtin_prefetch(dst_ptr + 1 * dst_stride);
+ __builtin_prefetch(dst_ptr + 2 * dst_stride);
+ __builtin_prefetch(dst_ptr + 3 * dst_stride);
+
+ s = src_ptr + 7;
+ d_tmp = dst_ptr;
+ width = w;
+
+ do {
+ load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3); /*8x4*/
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+
+ s7 = vget_low_s16(tt0); /*pa7 pb7 pc7 pd7 */ /*4x8*/
+ s8 = vget_low_s16(tt1); /*pa8 pb8 pc8 pd8 */
+ s9 = vget_low_s16(tt2); /*pa9 pb9 pc9 pd9 */
+ s10 = vget_low_s16(tt3); /*pa10 pb10 pc10 pd10 */
+ s11 = vget_high_s16(tt0); /*pa11 pb11 pc11 pd11 */
+ s12 = vget_high_s16(tt1); /*pa12 pb12 pc12 pd12 */
+ s13 = vget_high_s16(tt2); /*pa13 pb13 pc13 pd13 */
+ s14 = vget_high_s16(tt3); /*pa14 pb14 pc14 pd14 */
+
+ res0 = wiener_convolve8_horiz_4x8(
+ s0, s1, s2, s3, s4, s5, s6, filter_x_tmp, bd, conv_params->round_0);
+ res1 = wiener_convolve8_horiz_4x8(
+ s1, s2, s3, s4, s5, s6, s7, filter_x_tmp, bd, conv_params->round_0);
+ res2 = wiener_convolve8_horiz_4x8(
+ s2, s3, s4, s5, s6, s7, s8, filter_x_tmp, bd, conv_params->round_0);
+ res3 = wiener_convolve8_horiz_4x8(
+ s3, s4, s5, s6, s7, s8, s9, filter_x_tmp, bd, conv_params->round_0);
+ res4 =
+ wiener_convolve8_horiz_4x8(s4, s5, s6, s7, s8, s9, s10,
+ filter_x_tmp, bd, conv_params->round_0);
+ res5 =
+ wiener_convolve8_horiz_4x8(s5, s6, s7, s8, s9, s10, s11,
+ filter_x_tmp, bd, conv_params->round_0);
+ res6 =
+ wiener_convolve8_horiz_4x8(s6, s7, s8, s9, s10, s11, s12,
+ filter_x_tmp, bd, conv_params->round_0);
+ res7 =
+ wiener_convolve8_horiz_4x8(s7, s8, s9, s10, s11, s12, s13,
+ filter_x_tmp, bd, conv_params->round_0);
+
+ transpose_u16_4x8(&res0, &res1, &res2, &res3, &res4, &res5, &res6,
+ &res7, &d0, &d1, &d2, &d3);
+
+ store_u16_8x4(d_tmp, MAX_SB_SIZE, d0, d1, d2, d3);
+
+ s0 = s8;
+ s1 = s9;
+ s2 = s10;
+ s3 = s11;
+ s4 = s12;
+ s5 = s13;
+ s6 = s14;
+ s += 8;
+ d_tmp += 8;
+ width -= 8;
+ } while (width > 0);
+
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * MAX_SB_SIZE;
+ height -= 4;
+ } while (height > 0);
+#else
+ uint8x8_t temp_0, t4, t5, t6, t7;
+
+ do {
+ __builtin_prefetch(src_ptr);
+
+ t0 = vld1_u8(src_ptr); // a0 a1 a2 a3 a4 a5 a6 a7
+
+ __builtin_prefetch(dst_ptr);
+
+ s = src_ptr + 8;
+ d_tmp = dst_ptr;
+ width = w;
+
+ do {
+ t7 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15
+ temp_0 = t0;
+ t0 = t7;
+
+ t1 = vext_u8(temp_0, t7, 1); // a1 a2 a3 a4 a5 a6 a7 a8
+ t2 = vext_u8(temp_0, t7, 2); // a2 a3 a4 a5 a6 a7 a8 a9
+ t3 = vext_u8(temp_0, t7, 3); // a3 a4 a5 a6 a7 a8 a9 a10
+ t4 = vext_u8(temp_0, t7, 4); // a4 a5 a6 a7 a8 a9 a10 a11
+ t5 = vext_u8(temp_0, t7, 5); // a5 a6 a7 a8 a9 a10 a11 a12
+ t6 = vext_u8(temp_0, t7, 6); // a6 a7 a8 a9 a10 a11 a12 a13
+ t7 = vext_u8(temp_0, t7, 7); // a7 a8 a9 a10 a11 a12 a13 a14
+
+ tt0 = vreinterpretq_s16_u16(vaddl_u8(temp_0, t6));
+ tt1 = vreinterpretq_s16_u16(vaddl_u8(t1, t5));
+ tt2 = vreinterpretq_s16_u16(vaddl_u8(t2, t4));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ d0 = wiener_convolve8_horiz_8x8(tt0, tt1, tt2, tt3, filter_x_tmp, bd,
+ conv_params->round_0);
+
+ vst1q_u16(d_tmp, d0);
+
+ s += 8;
+ d_tmp += 8;
+ width -= 8;
+ } while (width > 0);
+
+ src_ptr += src_stride;
+ dst_ptr += MAX_SB_SIZE;
+ height -= 1;
+ } while (height > 0);
+#endif
+ }
+
+ {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ uint8x8_t t0;
+#if defined(__aarch64__)
+ int16x8_t s8, s9, s10;
+ uint8x8_t t1, t2, t3;
+#endif
+ int16_t *src_tmp_ptr, *s;
+ uint8_t *dst_tmp_ptr;
+ height = h;
+ width = w;
+ src_tmp_ptr = (int16_t *)temp;
+ dst_tmp_ptr = dst;
+ src_stride = MAX_SB_SIZE;
+
+ do {
+ s = src_tmp_ptr;
+ s0 = vld1q_s16(s);
+ s += src_stride;
+ s1 = vld1q_s16(s);
+ s += src_stride;
+ s2 = vld1q_s16(s);
+ s += src_stride;
+ s3 = vld1q_s16(s);
+ s += src_stride;
+ s4 = vld1q_s16(s);
+ s += src_stride;
+ s5 = vld1q_s16(s);
+ s += src_stride;
+ s6 = vld1q_s16(s);
+ s += src_stride;
+ d = dst_tmp_ptr;
+ height = h;
+
+#if defined(__aarch64__)
+ do {
+ __builtin_prefetch(dst_tmp_ptr + 0 * dst_stride);
+ __builtin_prefetch(dst_tmp_ptr + 1 * dst_stride);
+ __builtin_prefetch(dst_tmp_ptr + 2 * dst_stride);
+ __builtin_prefetch(dst_tmp_ptr + 3 * dst_stride);
+
+ s7 = vld1q_s16(s);
+ s += src_stride;
+ s8 = vld1q_s16(s);
+ s += src_stride;
+ s9 = vld1q_s16(s);
+ s += src_stride;
+ s10 = vld1q_s16(s);
+ s += src_stride;
+
+ t0 = wiener_convolve8_vert_4x8(s0, s1, s2, s3, s4, s5, s6, filter_y_tmp,
+ bd, conv_params->round_1);
+ t1 = wiener_convolve8_vert_4x8(s1, s2, s3, s4, s5, s6, s7, filter_y_tmp,
+ bd, conv_params->round_1);
+ t2 = wiener_convolve8_vert_4x8(s2, s3, s4, s5, s6, s7, s8, filter_y_tmp,
+ bd, conv_params->round_1);
+ t3 = wiener_convolve8_vert_4x8(s3, s4, s5, s6, s7, s8, s9, filter_y_tmp,
+ bd, conv_params->round_1);
+
+ vst1_u8(d, t0);
+ d += dst_stride;
+ vst1_u8(d, t1);
+ d += dst_stride;
+ vst1_u8(d, t2);
+ d += dst_stride;
+ vst1_u8(d, t3);
+ d += dst_stride;
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ height -= 4;
+ } while (height > 3);
+
+ if (height != 0) {
+ __builtin_prefetch(dst_tmp_ptr + 0 * dst_stride);
+ __builtin_prefetch(dst_tmp_ptr + 1 * dst_stride);
+
+ do {
+ s7 = vld1q_s16(s);
+ s += src_stride;
+
+ t0 =
+ wiener_convolve8_vert_4x8(s0, s1, s2, s3, s4, s5, s6,
+ filter_y_tmp, bd, conv_params->round_1);
+ vst1_u8(d, t0);
+ d += dst_stride;
+
+ s0 = s1;
+ s1 = s2;
+ s2 = s3;
+ s3 = s4;
+ s4 = s5;
+ s5 = s6;
+ s6 = s7;
+ height -= 1;
+ } while (height > 0);
+ }
+
+ src_tmp_ptr += 8;
+ dst_tmp_ptr += 8;
+
+ w -= 8;
+ } while (w > 0);
+#else
+ do {
+ __builtin_prefetch(dst_tmp_ptr + 0 * dst_stride);
+
+ s7 = vld1q_s16(s);
+ s += src_stride;
+
+ t0 = wiener_convolve8_vert_4x8(s0, s1, s2, s3, s4, s5, s6, filter_y_tmp,
+ bd, conv_params->round_1);
+
+ vst1_u8(d, t0);
+ d += dst_stride;
+
+ s0 = s1;
+ s1 = s2;
+ s2 = s3;
+ s3 = s4;
+ s4 = s5;
+ s5 = s6;
+ s6 = s7;
+ height -= 1;
+ } while (height > 0);
+
+ src_tmp_ptr += 8;
+ dst_tmp_ptr += 8;
+
+ w -= 8;
+ } while (w > 0);
+#endif
+ }
+}