summaryrefslogtreecommitdiffstats
path: root/media/libvpx/libvpx/vp9/encoder/arm/neon
diff options
context:
space:
mode:
Diffstat (limited to 'media/libvpx/libvpx/vp9/encoder/arm/neon')
-rw-r--r--media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c2173
-rw-r--r--media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_denoiser_neon.c356
-rw-r--r--media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_diamond_search_sad_neon.c296
-rw-r--r--media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_error_neon.c102
-rw-r--r--media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_frame_scale_neon.c844
-rw-r--r--media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_highbd_error_neon.c49
-rw-r--r--media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_highbd_temporal_filter_neon.c872
-rw-r--r--media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c408
-rw-r--r--media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_temporal_filter_neon.c849
9 files changed, 5949 insertions, 0 deletions
diff --git a/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c
new file mode 100644
index 0000000000..997b5477e1
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c
@@ -0,0 +1,2173 @@
+/*
+ * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+
+#include "vpx_dsp/txfm_common.h"
+#include "vpx_dsp/arm/mem_neon.h"
+#include "vpx_dsp/arm/transpose_neon.h"
+#include "vpx_dsp/arm/fdct_neon.h"
+#include "vpx_dsp/arm/fdct4x4_neon.h"
+#include "vpx_dsp/arm/fdct8x8_neon.h"
+#include "vpx_dsp/arm/fdct16x16_neon.h"
+
+static INLINE void load_buffer_4x4(const int16_t *input, int16x8_t *in,
+ int stride) {
+ // { 0, 1, 1, 1 };
+ const int16x4_t nonzero_bias_a = vext_s16(vdup_n_s16(0), vdup_n_s16(1), 3);
+ // { 1, 0, 0, 0 };
+ const int16x4_t nonzero_bias_b = vext_s16(vdup_n_s16(1), vdup_n_s16(0), 3);
+ int16x4_t mask;
+
+ int16x4_t input_0 = vshl_n_s16(vld1_s16(input + 0 * stride), 4);
+ int16x4_t input_1 = vshl_n_s16(vld1_s16(input + 1 * stride), 4);
+ int16x4_t input_2 = vshl_n_s16(vld1_s16(input + 2 * stride), 4);
+ int16x4_t input_3 = vshl_n_s16(vld1_s16(input + 3 * stride), 4);
+
+ // Copy the SSE method, use a mask to avoid an 'if' branch here to increase by
+ // one non-zero first elements
+ mask = vreinterpret_s16_u16(vceq_s16(input_0, nonzero_bias_a));
+ input_0 = vadd_s16(input_0, mask);
+ input_0 = vadd_s16(input_0, nonzero_bias_b);
+
+ in[0] = vcombine_s16(input_0, input_1);
+ in[1] = vcombine_s16(input_2, input_3);
+}
+
+static INLINE void write_buffer_4x4(tran_low_t *output, int16x8_t *res) {
+ const int16x8_t one_s16 = vdupq_n_s16(1);
+ res[0] = vaddq_s16(res[0], one_s16);
+ res[1] = vaddq_s16(res[1], one_s16);
+ res[0] = vshrq_n_s16(res[0], 2);
+ res[1] = vshrq_n_s16(res[1], 2);
+ store_s16q_to_tran_low(output + 0 * 8, res[0]);
+ store_s16q_to_tran_low(output + 1 * 8, res[1]);
+}
+
+static INLINE void fadst4x4_neon(int16x8_t *in) {
+ int32x4_t u[4], t[4];
+ int16x4_t s[4], out[4];
+
+ s[0] = vget_low_s16(in[0]); // | x_00 | x_01 | x_02 | x_03 |
+ s[1] = vget_high_s16(in[0]); // | x_10 | x_11 | x_12 | x_13 |
+ s[2] = vget_low_s16(in[1]); // | x_20 | x_21 | x_22 | x_23 |
+ s[3] = vget_high_s16(in[1]); // | x_30 | x_31 | x_32 | x_33 |
+
+ // Must expand all elements to s32. See 'needs32' comment in fwd_txfm.c.
+ // t0 = s0 * sinpi_1_9 + s1 * sinpi_2_9 + s3 * sinpi_4_9
+ t[0] = vmull_n_s16(s[0], sinpi_1_9);
+ t[0] = vmlal_n_s16(t[0], s[1], sinpi_2_9);
+ t[0] = vmlal_n_s16(t[0], s[3], sinpi_4_9);
+
+ // t1 = (s0 + s1) * sinpi_3_9 - s3 * sinpi_3_9
+ t[1] = vmull_n_s16(s[0], sinpi_3_9);
+ t[1] = vmlal_n_s16(t[1], s[1], sinpi_3_9);
+ t[1] = vmlsl_n_s16(t[1], s[3], sinpi_3_9);
+
+ // t2 = s0 * sinpi_4_9 - s1* sinpi_1_9 + s3 * sinpi_2_9
+ t[2] = vmull_n_s16(s[0], sinpi_4_9);
+ t[2] = vmlsl_n_s16(t[2], s[1], sinpi_1_9);
+ t[2] = vmlal_n_s16(t[2], s[3], sinpi_2_9);
+
+ // t3 = s2 * sinpi_3_9
+ t[3] = vmull_n_s16(s[2], sinpi_3_9);
+
+ /*
+ * u0 = t0 + t3
+ * u1 = t1
+ * u2 = t2 - t3
+ * u3 = t2 - t0 + t3
+ */
+ u[0] = vaddq_s32(t[0], t[3]);
+ u[1] = t[1];
+ u[2] = vsubq_s32(t[2], t[3]);
+ u[3] = vaddq_s32(vsubq_s32(t[2], t[0]), t[3]);
+
+ // fdct_round_shift
+ out[0] = vrshrn_n_s32(u[0], DCT_CONST_BITS);
+ out[1] = vrshrn_n_s32(u[1], DCT_CONST_BITS);
+ out[2] = vrshrn_n_s32(u[2], DCT_CONST_BITS);
+ out[3] = vrshrn_n_s32(u[3], DCT_CONST_BITS);
+
+ transpose_s16_4x4d(&out[0], &out[1], &out[2], &out[3]);
+
+ in[0] = vcombine_s16(out[0], out[1]);
+ in[1] = vcombine_s16(out[2], out[3]);
+}
+
+void vp9_fht4x4_neon(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
+ int16x8_t in[2];
+
+ switch (tx_type) {
+ case DCT_DCT: vpx_fdct4x4_neon(input, output, stride); break;
+ case ADST_DCT:
+ load_buffer_4x4(input, in, stride);
+ fadst4x4_neon(in);
+ // pass1 variant is not accurate enough
+ vpx_fdct4x4_pass2_neon((int16x4_t *)in);
+ write_buffer_4x4(output, in);
+ break;
+ case DCT_ADST:
+ load_buffer_4x4(input, in, stride);
+ // pass1 variant is not accurate enough
+ vpx_fdct4x4_pass2_neon((int16x4_t *)in);
+ fadst4x4_neon(in);
+ write_buffer_4x4(output, in);
+ break;
+ default:
+ assert(tx_type == ADST_ADST);
+ load_buffer_4x4(input, in, stride);
+ fadst4x4_neon(in);
+ fadst4x4_neon(in);
+ write_buffer_4x4(output, in);
+ break;
+ }
+}
+
+static INLINE void load_buffer_8x8(const int16_t *input, int16x8_t *in,
+ int stride) {
+ in[0] = vshlq_n_s16(vld1q_s16(input + 0 * stride), 2);
+ in[1] = vshlq_n_s16(vld1q_s16(input + 1 * stride), 2);
+ in[2] = vshlq_n_s16(vld1q_s16(input + 2 * stride), 2);
+ in[3] = vshlq_n_s16(vld1q_s16(input + 3 * stride), 2);
+ in[4] = vshlq_n_s16(vld1q_s16(input + 4 * stride), 2);
+ in[5] = vshlq_n_s16(vld1q_s16(input + 5 * stride), 2);
+ in[6] = vshlq_n_s16(vld1q_s16(input + 6 * stride), 2);
+ in[7] = vshlq_n_s16(vld1q_s16(input + 7 * stride), 2);
+}
+
+/* right shift and rounding
+ * first get the sign bit (bit 15).
+ * If bit == 1, it's the simple case of shifting right by one bit.
+ * If bit == 2, it essentially computes the expression:
+ *
+ * out[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
+ *
+ * for each row.
+ */
+static INLINE void right_shift_8x8(int16x8_t *res, const int bit) {
+ int16x8_t sign0 = vshrq_n_s16(res[0], 15);
+ int16x8_t sign1 = vshrq_n_s16(res[1], 15);
+ int16x8_t sign2 = vshrq_n_s16(res[2], 15);
+ int16x8_t sign3 = vshrq_n_s16(res[3], 15);
+ int16x8_t sign4 = vshrq_n_s16(res[4], 15);
+ int16x8_t sign5 = vshrq_n_s16(res[5], 15);
+ int16x8_t sign6 = vshrq_n_s16(res[6], 15);
+ int16x8_t sign7 = vshrq_n_s16(res[7], 15);
+
+ if (bit == 2) {
+ const int16x8_t const_rounding = vdupq_n_s16(1);
+ res[0] = vaddq_s16(res[0], const_rounding);
+ res[1] = vaddq_s16(res[1], const_rounding);
+ res[2] = vaddq_s16(res[2], const_rounding);
+ res[3] = vaddq_s16(res[3], const_rounding);
+ res[4] = vaddq_s16(res[4], const_rounding);
+ res[5] = vaddq_s16(res[5], const_rounding);
+ res[6] = vaddq_s16(res[6], const_rounding);
+ res[7] = vaddq_s16(res[7], const_rounding);
+ }
+
+ res[0] = vsubq_s16(res[0], sign0);
+ res[1] = vsubq_s16(res[1], sign1);
+ res[2] = vsubq_s16(res[2], sign2);
+ res[3] = vsubq_s16(res[3], sign3);
+ res[4] = vsubq_s16(res[4], sign4);
+ res[5] = vsubq_s16(res[5], sign5);
+ res[6] = vsubq_s16(res[6], sign6);
+ res[7] = vsubq_s16(res[7], sign7);
+
+ if (bit == 1) {
+ res[0] = vshrq_n_s16(res[0], 1);
+ res[1] = vshrq_n_s16(res[1], 1);
+ res[2] = vshrq_n_s16(res[2], 1);
+ res[3] = vshrq_n_s16(res[3], 1);
+ res[4] = vshrq_n_s16(res[4], 1);
+ res[5] = vshrq_n_s16(res[5], 1);
+ res[6] = vshrq_n_s16(res[6], 1);
+ res[7] = vshrq_n_s16(res[7], 1);
+ } else {
+ res[0] = vshrq_n_s16(res[0], 2);
+ res[1] = vshrq_n_s16(res[1], 2);
+ res[2] = vshrq_n_s16(res[2], 2);
+ res[3] = vshrq_n_s16(res[3], 2);
+ res[4] = vshrq_n_s16(res[4], 2);
+ res[5] = vshrq_n_s16(res[5], 2);
+ res[6] = vshrq_n_s16(res[6], 2);
+ res[7] = vshrq_n_s16(res[7], 2);
+ }
+}
+
+static INLINE void write_buffer_8x8(tran_low_t *output, int16x8_t *res,
+ int stride) {
+ store_s16q_to_tran_low(output + 0 * stride, res[0]);
+ store_s16q_to_tran_low(output + 1 * stride, res[1]);
+ store_s16q_to_tran_low(output + 2 * stride, res[2]);
+ store_s16q_to_tran_low(output + 3 * stride, res[3]);
+ store_s16q_to_tran_low(output + 4 * stride, res[4]);
+ store_s16q_to_tran_low(output + 5 * stride, res[5]);
+ store_s16q_to_tran_low(output + 6 * stride, res[6]);
+ store_s16q_to_tran_low(output + 7 * stride, res[7]);
+}
+
+static INLINE void fadst8x8_neon(int16x8_t *in) {
+ int16x4_t x_lo[8], x_hi[8];
+ int32x4_t s_lo[8], s_hi[8];
+ int32x4_t t_lo[8], t_hi[8];
+
+ x_lo[0] = vget_low_s16(in[7]);
+ x_hi[0] = vget_high_s16(in[7]);
+ x_lo[1] = vget_low_s16(in[0]);
+ x_hi[1] = vget_high_s16(in[0]);
+ x_lo[2] = vget_low_s16(in[5]);
+ x_hi[2] = vget_high_s16(in[5]);
+ x_lo[3] = vget_low_s16(in[2]);
+ x_hi[3] = vget_high_s16(in[2]);
+ x_lo[4] = vget_low_s16(in[3]);
+ x_hi[4] = vget_high_s16(in[3]);
+ x_lo[5] = vget_low_s16(in[4]);
+ x_hi[5] = vget_high_s16(in[4]);
+ x_lo[6] = vget_low_s16(in[1]);
+ x_hi[6] = vget_high_s16(in[1]);
+ x_lo[7] = vget_low_s16(in[6]);
+ x_hi[7] = vget_high_s16(in[6]);
+
+ // stage 1
+ // s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ // s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ butterfly_two_coeff_s16_s32_noround(x_lo[0], x_hi[0], x_lo[1], x_hi[1],
+ cospi_2_64, cospi_30_64, &s_lo[0],
+ &s_hi[0], &s_lo[1], &s_hi[1]);
+
+ // s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+ // s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+ butterfly_two_coeff_s16_s32_noround(x_lo[2], x_hi[2], x_lo[3], x_hi[3],
+ cospi_10_64, cospi_22_64, &s_lo[2],
+ &s_hi[2], &s_lo[3], &s_hi[3]);
+
+ // s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+ // s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
+ butterfly_two_coeff_s16_s32_noround(x_lo[4], x_hi[4], x_lo[5], x_hi[5],
+ cospi_18_64, cospi_14_64, &s_lo[4],
+ &s_hi[4], &s_lo[5], &s_hi[5]);
+
+ // s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ // s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+ butterfly_two_coeff_s16_s32_noround(x_lo[6], x_hi[6], x_lo[7], x_hi[7],
+ cospi_26_64, cospi_6_64, &s_lo[6],
+ &s_hi[6], &s_lo[7], &s_hi[7]);
+
+ // fdct_round_shift
+ t_lo[0] = vrshrq_n_s32(vaddq_s32(s_lo[0], s_lo[4]), DCT_CONST_BITS);
+ t_hi[0] = vrshrq_n_s32(vaddq_s32(s_hi[0], s_hi[4]), DCT_CONST_BITS);
+ t_lo[1] = vrshrq_n_s32(vaddq_s32(s_lo[1], s_lo[5]), DCT_CONST_BITS);
+ t_hi[1] = vrshrq_n_s32(vaddq_s32(s_hi[1], s_hi[5]), DCT_CONST_BITS);
+ t_lo[2] = vrshrq_n_s32(vaddq_s32(s_lo[2], s_lo[6]), DCT_CONST_BITS);
+ t_hi[2] = vrshrq_n_s32(vaddq_s32(s_hi[2], s_hi[6]), DCT_CONST_BITS);
+ t_lo[3] = vrshrq_n_s32(vaddq_s32(s_lo[3], s_lo[7]), DCT_CONST_BITS);
+ t_hi[3] = vrshrq_n_s32(vaddq_s32(s_hi[3], s_hi[7]), DCT_CONST_BITS);
+ t_lo[4] = vrshrq_n_s32(vsubq_s32(s_lo[0], s_lo[4]), DCT_CONST_BITS);
+ t_hi[4] = vrshrq_n_s32(vsubq_s32(s_hi[0], s_hi[4]), DCT_CONST_BITS);
+ t_lo[5] = vrshrq_n_s32(vsubq_s32(s_lo[1], s_lo[5]), DCT_CONST_BITS);
+ t_hi[5] = vrshrq_n_s32(vsubq_s32(s_hi[1], s_hi[5]), DCT_CONST_BITS);
+ t_lo[6] = vrshrq_n_s32(vsubq_s32(s_lo[2], s_lo[6]), DCT_CONST_BITS);
+ t_hi[6] = vrshrq_n_s32(vsubq_s32(s_hi[2], s_hi[6]), DCT_CONST_BITS);
+ t_lo[7] = vrshrq_n_s32(vsubq_s32(s_lo[3], s_lo[7]), DCT_CONST_BITS);
+ t_hi[7] = vrshrq_n_s32(vsubq_s32(s_hi[3], s_hi[7]), DCT_CONST_BITS);
+
+ // stage 2
+ s_lo[0] = t_lo[0];
+ s_hi[0] = t_hi[0];
+ s_lo[1] = t_lo[1];
+ s_hi[1] = t_hi[1];
+ s_lo[2] = t_lo[2];
+ s_hi[2] = t_hi[2];
+ s_lo[3] = t_lo[3];
+ s_hi[3] = t_hi[3];
+ // s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ // s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ butterfly_two_coeff_s32_noround(t_lo[4], t_hi[4], t_lo[5], t_hi[5],
+ cospi_8_64, cospi_24_64, &s_lo[4], &s_hi[4],
+ &s_lo[5], &s_hi[5]);
+
+ // s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+ // s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+ butterfly_two_coeff_s32_noround(t_lo[6], t_hi[6], t_lo[7], t_hi[7],
+ -cospi_24_64, cospi_8_64, &s_lo[6], &s_hi[6],
+ &s_lo[7], &s_hi[7]);
+
+ // fdct_round_shift
+ // s0 + s2
+ t_lo[0] = vaddq_s32(s_lo[0], s_lo[2]);
+ t_hi[0] = vaddq_s32(s_hi[0], s_hi[2]);
+ // s1 + s3
+ t_lo[1] = vaddq_s32(s_lo[1], s_lo[3]);
+ t_hi[1] = vaddq_s32(s_hi[1], s_hi[3]);
+ // s0 - s2
+ t_lo[2] = vsubq_s32(s_lo[0], s_lo[2]);
+ t_hi[2] = vsubq_s32(s_hi[0], s_hi[2]);
+ // s1 - s3
+ t_lo[3] = vsubq_s32(s_lo[1], s_lo[3]);
+ t_hi[3] = vsubq_s32(s_hi[1], s_hi[3]);
+ // s4 + s6
+ t_lo[4] = vrshrq_n_s32(vaddq_s32(s_lo[4], s_lo[6]), DCT_CONST_BITS);
+ t_hi[4] = vrshrq_n_s32(vaddq_s32(s_hi[4], s_hi[6]), DCT_CONST_BITS);
+ // s5 + s7
+ t_lo[5] = vrshrq_n_s32(vaddq_s32(s_lo[5], s_lo[7]), DCT_CONST_BITS);
+ t_hi[5] = vrshrq_n_s32(vaddq_s32(s_hi[5], s_hi[7]), DCT_CONST_BITS);
+ // s4 - s6
+ t_lo[6] = vrshrq_n_s32(vsubq_s32(s_lo[4], s_lo[6]), DCT_CONST_BITS);
+ t_hi[6] = vrshrq_n_s32(vsubq_s32(s_hi[4], s_hi[6]), DCT_CONST_BITS);
+ // s5 - s7
+ t_lo[7] = vrshrq_n_s32(vsubq_s32(s_lo[5], s_lo[7]), DCT_CONST_BITS);
+ t_hi[7] = vrshrq_n_s32(vsubq_s32(s_hi[5], s_hi[7]), DCT_CONST_BITS);
+
+ // stage 3
+ // cospi_16_64 * (x2 + x3)
+ // cospi_16_64 * (x2 - x3)
+ butterfly_one_coeff_s32_noround(t_lo[2], t_hi[2], t_lo[3], t_hi[3],
+ cospi_16_64, &s_lo[2], &s_hi[2], &s_lo[3],
+ &s_hi[3]);
+
+ // cospi_16_64 * (x6 + x7)
+ // cospi_16_64 * (x2 - x3)
+ butterfly_one_coeff_s32_noround(t_lo[6], t_hi[6], t_lo[7], t_hi[7],
+ cospi_16_64, &s_lo[6], &s_hi[6], &s_lo[7],
+ &s_hi[7]);
+
+ // final fdct_round_shift
+ x_lo[2] = vrshrn_n_s32(s_lo[2], DCT_CONST_BITS);
+ x_hi[2] = vrshrn_n_s32(s_hi[2], DCT_CONST_BITS);
+ x_lo[3] = vrshrn_n_s32(s_lo[3], DCT_CONST_BITS);
+ x_hi[3] = vrshrn_n_s32(s_hi[3], DCT_CONST_BITS);
+ x_lo[6] = vrshrn_n_s32(s_lo[6], DCT_CONST_BITS);
+ x_hi[6] = vrshrn_n_s32(s_hi[6], DCT_CONST_BITS);
+ x_lo[7] = vrshrn_n_s32(s_lo[7], DCT_CONST_BITS);
+ x_hi[7] = vrshrn_n_s32(s_hi[7], DCT_CONST_BITS);
+
+ // x0, x1, x4, x5 narrow down to 16-bits directly
+ x_lo[0] = vmovn_s32(t_lo[0]);
+ x_hi[0] = vmovn_s32(t_hi[0]);
+ x_lo[1] = vmovn_s32(t_lo[1]);
+ x_hi[1] = vmovn_s32(t_hi[1]);
+ x_lo[4] = vmovn_s32(t_lo[4]);
+ x_hi[4] = vmovn_s32(t_hi[4]);
+ x_lo[5] = vmovn_s32(t_lo[5]);
+ x_hi[5] = vmovn_s32(t_hi[5]);
+
+ in[0] = vcombine_s16(x_lo[0], x_hi[0]);
+ in[1] = vnegq_s16(vcombine_s16(x_lo[4], x_hi[4]));
+ in[2] = vcombine_s16(x_lo[6], x_hi[6]);
+ in[3] = vnegq_s16(vcombine_s16(x_lo[2], x_hi[2]));
+ in[4] = vcombine_s16(x_lo[3], x_hi[3]);
+ in[5] = vnegq_s16(vcombine_s16(x_lo[7], x_hi[7]));
+ in[6] = vcombine_s16(x_lo[5], x_hi[5]);
+ in[7] = vnegq_s16(vcombine_s16(x_lo[1], x_hi[1]));
+
+ transpose_s16_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
+ &in[7]);
+}
+
+void vp9_fht8x8_neon(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
+ int16x8_t in[8];
+
+ switch (tx_type) {
+ case DCT_DCT: vpx_fdct8x8_neon(input, output, stride); break;
+ case ADST_DCT:
+ load_buffer_8x8(input, in, stride);
+ fadst8x8_neon(in);
+ // pass1 variant is not accurate enough
+ vpx_fdct8x8_pass2_neon(in);
+ right_shift_8x8(in, 1);
+ write_buffer_8x8(output, in, 8);
+ break;
+ case DCT_ADST:
+ load_buffer_8x8(input, in, stride);
+ // pass1 variant is not accurate enough
+ vpx_fdct8x8_pass2_neon(in);
+ fadst8x8_neon(in);
+ right_shift_8x8(in, 1);
+ write_buffer_8x8(output, in, 8);
+ break;
+ default:
+ assert(tx_type == ADST_ADST);
+ load_buffer_8x8(input, in, stride);
+ fadst8x8_neon(in);
+ fadst8x8_neon(in);
+ right_shift_8x8(in, 1);
+ write_buffer_8x8(output, in, 8);
+ break;
+ }
+}
+
+static INLINE void load_buffer_16x16(const int16_t *input, int16x8_t *in0,
+ int16x8_t *in1, int stride) {
+ // load first 8 columns
+ load_buffer_8x8(input, in0, stride);
+ load_buffer_8x8(input + 8 * stride, in0 + 8, stride);
+
+ input += 8;
+ // load second 8 columns
+ load_buffer_8x8(input, in1, stride);
+ load_buffer_8x8(input + 8 * stride, in1 + 8, stride);
+}
+
+static INLINE void write_buffer_16x16(tran_low_t *output, int16x8_t *in0,
+ int16x8_t *in1, int stride) {
+ // write first 8 columns
+ write_buffer_8x8(output, in0, stride);
+ write_buffer_8x8(output + 8 * stride, in0 + 8, stride);
+
+ // write second 8 columns
+ output += 8;
+ write_buffer_8x8(output, in1, stride);
+ write_buffer_8x8(output + 8 * stride, in1 + 8, stride);
+}
+
+static INLINE void right_shift_16x16(int16x8_t *res0, int16x8_t *res1) {
+ // perform rounding operations
+ right_shift_8x8(res0, 2);
+ right_shift_8x8(res0 + 8, 2);
+ right_shift_8x8(res1, 2);
+ right_shift_8x8(res1 + 8, 2);
+}
+
+static void fdct16_8col(int16x8_t *in) {
+ // perform 16x16 1-D DCT for 8 columns
+ int16x8_t i[8], s1[8], s2[8], s3[8], t[8];
+ int16x4_t t_lo[8], t_hi[8];
+ int32x4_t u_lo[8], u_hi[8];
+
+ // stage 1
+ i[0] = vaddq_s16(in[0], in[15]);
+ i[1] = vaddq_s16(in[1], in[14]);
+ i[2] = vaddq_s16(in[2], in[13]);
+ i[3] = vaddq_s16(in[3], in[12]);
+ i[4] = vaddq_s16(in[4], in[11]);
+ i[5] = vaddq_s16(in[5], in[10]);
+ i[6] = vaddq_s16(in[6], in[9]);
+ i[7] = vaddq_s16(in[7], in[8]);
+
+ // pass1 variant is not accurate enough
+ vpx_fdct8x8_pass2_neon(i);
+ transpose_s16_8x8(&i[0], &i[1], &i[2], &i[3], &i[4], &i[5], &i[6], &i[7]);
+
+ // step 2
+ s1[0] = vsubq_s16(in[7], in[8]);
+ s1[1] = vsubq_s16(in[6], in[9]);
+ s1[2] = vsubq_s16(in[5], in[10]);
+ s1[3] = vsubq_s16(in[4], in[11]);
+ s1[4] = vsubq_s16(in[3], in[12]);
+ s1[5] = vsubq_s16(in[2], in[13]);
+ s1[6] = vsubq_s16(in[1], in[14]);
+ s1[7] = vsubq_s16(in[0], in[15]);
+
+ t[2] = vsubq_s16(s1[5], s1[2]);
+ t[3] = vsubq_s16(s1[4], s1[3]);
+ t[4] = vaddq_s16(s1[4], s1[3]);
+ t[5] = vaddq_s16(s1[5], s1[2]);
+
+ t_lo[2] = vget_low_s16(t[2]);
+ t_hi[2] = vget_high_s16(t[2]);
+ t_lo[3] = vget_low_s16(t[3]);
+ t_hi[3] = vget_high_s16(t[3]);
+ t_lo[4] = vget_low_s16(t[4]);
+ t_hi[4] = vget_high_s16(t[4]);
+ t_lo[5] = vget_low_s16(t[5]);
+ t_hi[5] = vget_high_s16(t[5]);
+
+ u_lo[2] = vmull_n_s16(t_lo[2], cospi_16_64);
+ u_hi[2] = vmull_n_s16(t_hi[2], cospi_16_64);
+ u_lo[3] = vmull_n_s16(t_lo[3], cospi_16_64);
+ u_hi[3] = vmull_n_s16(t_hi[3], cospi_16_64);
+ u_lo[4] = vmull_n_s16(t_lo[4], cospi_16_64);
+ u_hi[4] = vmull_n_s16(t_hi[4], cospi_16_64);
+ u_lo[5] = vmull_n_s16(t_lo[5], cospi_16_64);
+ u_hi[5] = vmull_n_s16(t_hi[5], cospi_16_64);
+
+ t_lo[2] = vrshrn_n_s32(u_lo[2], DCT_CONST_BITS);
+ t_hi[2] = vrshrn_n_s32(u_hi[2], DCT_CONST_BITS);
+ t_lo[3] = vrshrn_n_s32(u_lo[3], DCT_CONST_BITS);
+ t_hi[3] = vrshrn_n_s32(u_hi[3], DCT_CONST_BITS);
+ t_lo[4] = vrshrn_n_s32(u_lo[4], DCT_CONST_BITS);
+ t_hi[4] = vrshrn_n_s32(u_hi[4], DCT_CONST_BITS);
+ t_lo[5] = vrshrn_n_s32(u_lo[5], DCT_CONST_BITS);
+ t_hi[5] = vrshrn_n_s32(u_hi[5], DCT_CONST_BITS);
+
+ s2[2] = vcombine_s16(t_lo[2], t_hi[2]);
+ s2[3] = vcombine_s16(t_lo[3], t_hi[3]);
+ s2[4] = vcombine_s16(t_lo[4], t_hi[4]);
+ s2[5] = vcombine_s16(t_lo[5], t_hi[5]);
+
+ // step 3
+ s3[0] = vaddq_s16(s1[0], s2[3]);
+ s3[1] = vaddq_s16(s1[1], s2[2]);
+ s3[2] = vsubq_s16(s1[1], s2[2]);
+ s3[3] = vsubq_s16(s1[0], s2[3]);
+ s3[4] = vsubq_s16(s1[7], s2[4]);
+ s3[5] = vsubq_s16(s1[6], s2[5]);
+ s3[6] = vaddq_s16(s1[6], s2[5]);
+ s3[7] = vaddq_s16(s1[7], s2[4]);
+
+ // step 4
+ t_lo[0] = vget_low_s16(s3[0]);
+ t_hi[0] = vget_high_s16(s3[0]);
+ t_lo[1] = vget_low_s16(s3[1]);
+ t_hi[1] = vget_high_s16(s3[1]);
+ t_lo[2] = vget_low_s16(s3[2]);
+ t_hi[2] = vget_high_s16(s3[2]);
+ t_lo[3] = vget_low_s16(s3[3]);
+ t_hi[3] = vget_high_s16(s3[3]);
+ t_lo[4] = vget_low_s16(s3[4]);
+ t_hi[4] = vget_high_s16(s3[4]);
+ t_lo[5] = vget_low_s16(s3[5]);
+ t_hi[5] = vget_high_s16(s3[5]);
+ t_lo[6] = vget_low_s16(s3[6]);
+ t_hi[6] = vget_high_s16(s3[6]);
+ t_lo[7] = vget_low_s16(s3[7]);
+ t_hi[7] = vget_high_s16(s3[7]);
+
+ // u[1] = -cospi_8_64 * t[1] + cospi_24_64 * t[6]
+ // u[6] = cospi_24_64 * t[1] + cospi_8_64 * t[6]
+ butterfly_two_coeff_s16_s32_noround(t_lo[1], t_hi[1], t_lo[6], t_hi[6],
+ -cospi_8_64, cospi_24_64, &u_lo[1],
+ &u_hi[1], &u_lo[6], &u_hi[6]);
+
+ // u[5] = -cospi_24_64 * t[5] + cospi_8_64 * t[2]
+ // u[2] = cospi_8_64 * t[5] + cospi_24_64 * t[2]
+ butterfly_two_coeff_s16_s32_noround(t_lo[5], t_hi[5], t_lo[2], t_hi[2],
+ -cospi_24_64, cospi_8_64, &u_lo[5],
+ &u_hi[5], &u_lo[2], &u_hi[2]);
+
+ t_lo[1] = vrshrn_n_s32(u_lo[1], DCT_CONST_BITS);
+ t_hi[1] = vrshrn_n_s32(u_hi[1], DCT_CONST_BITS);
+ t_lo[2] = vrshrn_n_s32(u_lo[2], DCT_CONST_BITS);
+ t_hi[2] = vrshrn_n_s32(u_hi[2], DCT_CONST_BITS);
+ t_lo[5] = vrshrn_n_s32(u_lo[5], DCT_CONST_BITS);
+ t_hi[5] = vrshrn_n_s32(u_hi[5], DCT_CONST_BITS);
+ t_lo[6] = vrshrn_n_s32(u_lo[6], DCT_CONST_BITS);
+ t_hi[6] = vrshrn_n_s32(u_hi[6], DCT_CONST_BITS);
+
+ s2[1] = vcombine_s16(t_lo[1], t_hi[1]);
+ s2[2] = vcombine_s16(t_lo[2], t_hi[2]);
+ s2[5] = vcombine_s16(t_lo[5], t_hi[5]);
+ s2[6] = vcombine_s16(t_lo[6], t_hi[6]);
+
+ // step 5
+ s1[0] = vaddq_s16(s3[0], s2[1]);
+ s1[1] = vsubq_s16(s3[0], s2[1]);
+ s1[2] = vaddq_s16(s3[3], s2[2]);
+ s1[3] = vsubq_s16(s3[3], s2[2]);
+ s1[4] = vsubq_s16(s3[4], s2[5]);
+ s1[5] = vaddq_s16(s3[4], s2[5]);
+ s1[6] = vsubq_s16(s3[7], s2[6]);
+ s1[7] = vaddq_s16(s3[7], s2[6]);
+
+ // step 6
+ t_lo[0] = vget_low_s16(s1[0]);
+ t_hi[0] = vget_high_s16(s1[0]);
+ t_lo[1] = vget_low_s16(s1[1]);
+ t_hi[1] = vget_high_s16(s1[1]);
+ t_lo[2] = vget_low_s16(s1[2]);
+ t_hi[2] = vget_high_s16(s1[2]);
+ t_lo[3] = vget_low_s16(s1[3]);
+ t_hi[3] = vget_high_s16(s1[3]);
+ t_lo[4] = vget_low_s16(s1[4]);
+ t_hi[4] = vget_high_s16(s1[4]);
+ t_lo[5] = vget_low_s16(s1[5]);
+ t_hi[5] = vget_high_s16(s1[5]);
+ t_lo[6] = vget_low_s16(s1[6]);
+ t_hi[6] = vget_high_s16(s1[6]);
+ t_lo[7] = vget_low_s16(s1[7]);
+ t_hi[7] = vget_high_s16(s1[7]);
+
+ // u[0] = step1[7] * cospi_2_64 + step1[0] * cospi_30_64
+ // u[7] = step1[7] * cospi_30_64 - step1[0] * cospi_2_64
+ butterfly_two_coeff_s16_s32_noround(t_lo[7], t_hi[7], t_lo[0], t_hi[0],
+ cospi_2_64, cospi_30_64, &u_lo[0],
+ &u_hi[0], &u_lo[7], &u_hi[7]);
+
+ // u[1] = step1[6] * cospi_18_64 + step1[1] * cospi_14_64
+ // u[6] = step1[6] * cospi_14_64 - step1[1] * cospi_18_64
+ butterfly_two_coeff_s16_s32_noround(t_lo[6], t_hi[6], t_lo[1], t_hi[1],
+ cospi_18_64, cospi_14_64, &u_lo[1],
+ &u_hi[1], &u_lo[6], &u_hi[6]);
+
+ // u[2] = step1[5] * cospi_10_64 + step1[2] * cospi_22_64
+ // u[5] = step1[5] * cospi_22_64 - step1[2] * cospi_10_64
+ butterfly_two_coeff_s16_s32_noround(t_lo[5], t_hi[5], t_lo[2], t_hi[2],
+ cospi_10_64, cospi_22_64, &u_lo[2],
+ &u_hi[2], &u_lo[5], &u_hi[5]);
+
+ // u[3] = step1[4] * cospi_26_64 + step1[3] * cospi_6_64
+ // u[4] = step1[4] * cospi_6_64 - step1[3] * cospi_26_64
+ butterfly_two_coeff_s16_s32_noround(t_lo[4], t_hi[4], t_lo[3], t_hi[3],
+ cospi_26_64, cospi_6_64, &u_lo[3],
+ &u_hi[3], &u_lo[4], &u_hi[4]);
+
+ // final fdct_round_shift
+ t_lo[0] = vrshrn_n_s32(u_lo[0], DCT_CONST_BITS);
+ t_hi[0] = vrshrn_n_s32(u_hi[0], DCT_CONST_BITS);
+ t_lo[1] = vrshrn_n_s32(u_lo[1], DCT_CONST_BITS);
+ t_hi[1] = vrshrn_n_s32(u_hi[1], DCT_CONST_BITS);
+ t_lo[2] = vrshrn_n_s32(u_lo[2], DCT_CONST_BITS);
+ t_hi[2] = vrshrn_n_s32(u_hi[2], DCT_CONST_BITS);
+ t_lo[3] = vrshrn_n_s32(u_lo[3], DCT_CONST_BITS);
+ t_hi[3] = vrshrn_n_s32(u_hi[3], DCT_CONST_BITS);
+ t_lo[4] = vrshrn_n_s32(u_lo[4], DCT_CONST_BITS);
+ t_hi[4] = vrshrn_n_s32(u_hi[4], DCT_CONST_BITS);
+ t_lo[5] = vrshrn_n_s32(u_lo[5], DCT_CONST_BITS);
+ t_hi[5] = vrshrn_n_s32(u_hi[5], DCT_CONST_BITS);
+ t_lo[6] = vrshrn_n_s32(u_lo[6], DCT_CONST_BITS);
+ t_hi[6] = vrshrn_n_s32(u_hi[6], DCT_CONST_BITS);
+ t_lo[7] = vrshrn_n_s32(u_lo[7], DCT_CONST_BITS);
+ t_hi[7] = vrshrn_n_s32(u_hi[7], DCT_CONST_BITS);
+
+ in[0] = i[0];
+ in[2] = i[1];
+ in[4] = i[2];
+ in[6] = i[3];
+ in[8] = i[4];
+ in[10] = i[5];
+ in[12] = i[6];
+ in[14] = i[7];
+ in[1] = vcombine_s16(t_lo[0], t_hi[0]);
+ in[3] = vcombine_s16(t_lo[4], t_hi[4]);
+ in[5] = vcombine_s16(t_lo[2], t_hi[2]);
+ in[7] = vcombine_s16(t_lo[6], t_hi[6]);
+ in[9] = vcombine_s16(t_lo[1], t_hi[1]);
+ in[11] = vcombine_s16(t_lo[5], t_hi[5]);
+ in[13] = vcombine_s16(t_lo[3], t_hi[3]);
+ in[15] = vcombine_s16(t_lo[7], t_hi[7]);
+}
+
+static void fadst16_8col(int16x8_t *in) {
+ // perform 16x16 1-D ADST for 8 columns
+ int16x4_t x_lo[16], x_hi[16];
+ int32x4_t s_lo[16], s_hi[16];
+ int32x4_t t_lo[16], t_hi[16];
+
+ x_lo[0] = vget_low_s16(in[15]);
+ x_hi[0] = vget_high_s16(in[15]);
+ x_lo[1] = vget_low_s16(in[0]);
+ x_hi[1] = vget_high_s16(in[0]);
+ x_lo[2] = vget_low_s16(in[13]);
+ x_hi[2] = vget_high_s16(in[13]);
+ x_lo[3] = vget_low_s16(in[2]);
+ x_hi[3] = vget_high_s16(in[2]);
+ x_lo[4] = vget_low_s16(in[11]);
+ x_hi[4] = vget_high_s16(in[11]);
+ x_lo[5] = vget_low_s16(in[4]);
+ x_hi[5] = vget_high_s16(in[4]);
+ x_lo[6] = vget_low_s16(in[9]);
+ x_hi[6] = vget_high_s16(in[9]);
+ x_lo[7] = vget_low_s16(in[6]);
+ x_hi[7] = vget_high_s16(in[6]);
+ x_lo[8] = vget_low_s16(in[7]);
+ x_hi[8] = vget_high_s16(in[7]);
+ x_lo[9] = vget_low_s16(in[8]);
+ x_hi[9] = vget_high_s16(in[8]);
+ x_lo[10] = vget_low_s16(in[5]);
+ x_hi[10] = vget_high_s16(in[5]);
+ x_lo[11] = vget_low_s16(in[10]);
+ x_hi[11] = vget_high_s16(in[10]);
+ x_lo[12] = vget_low_s16(in[3]);
+ x_hi[12] = vget_high_s16(in[3]);
+ x_lo[13] = vget_low_s16(in[12]);
+ x_hi[13] = vget_high_s16(in[12]);
+ x_lo[14] = vget_low_s16(in[1]);
+ x_hi[14] = vget_high_s16(in[1]);
+ x_lo[15] = vget_low_s16(in[14]);
+ x_hi[15] = vget_high_s16(in[14]);
+
+ // stage 1
+ // s0 = cospi_1_64 * x0 + cospi_31_64 * x1;
+ // s1 = cospi_31_64 * x0 - cospi_1_64 * x1;
+ butterfly_two_coeff_s16_s32_noround(x_lo[0], x_hi[0], x_lo[1], x_hi[1],
+ cospi_1_64, cospi_31_64, &s_lo[0],
+ &s_hi[0], &s_lo[1], &s_hi[1]);
+ // s2 = cospi_5_64 * x2 + cospi_27_64 * x3;
+ // s3 = cospi_27_64 * x2 - cospi_5_64 * x3;
+ butterfly_two_coeff_s16_s32_noround(x_lo[2], x_hi[2], x_lo[3], x_hi[3],
+ cospi_5_64, cospi_27_64, &s_lo[2],
+ &s_hi[2], &s_lo[3], &s_hi[3]);
+ // s4 = cospi_9_64 * x4 + cospi_23_64 * x5;
+ // s5 = cospi_23_64 * x4 - cospi_9_64 * x5;
+ butterfly_two_coeff_s16_s32_noround(x_lo[4], x_hi[4], x_lo[5], x_hi[5],
+ cospi_9_64, cospi_23_64, &s_lo[4],
+ &s_hi[4], &s_lo[5], &s_hi[5]);
+ // s6 = cospi_13_64 * x6 + cospi_19_64 * x7;
+ // s7 = cospi_19_64 * x6 - cospi_13_64 * x7;
+ butterfly_two_coeff_s16_s32_noround(x_lo[6], x_hi[6], x_lo[7], x_hi[7],
+ cospi_13_64, cospi_19_64, &s_lo[6],
+ &s_hi[6], &s_lo[7], &s_hi[7]);
+ // s8 = cospi_17_64 * x8 + cospi_15_64 * x9;
+ // s9 = cospi_15_64 * x8 - cospi_17_64 * x9;
+ butterfly_two_coeff_s16_s32_noround(x_lo[8], x_hi[8], x_lo[9], x_hi[9],
+ cospi_17_64, cospi_15_64, &s_lo[8],
+ &s_hi[8], &s_lo[9], &s_hi[9]);
+ // s10 = cospi_21_64 * x10 + cospi_11_64 * x11;
+ // s11 = cospi_11_64 * x10 - cospi_21_64 * x11;
+ butterfly_two_coeff_s16_s32_noround(x_lo[10], x_hi[10], x_lo[11], x_hi[11],
+ cospi_21_64, cospi_11_64, &s_lo[10],
+ &s_hi[10], &s_lo[11], &s_hi[11]);
+ // s12 = cospi_25_64 * x12 + cospi_7_64 * x13;
+ // s13 = cospi_7_64 * x12 - cospi_25_64 * x13;
+ butterfly_two_coeff_s16_s32_noround(x_lo[12], x_hi[12], x_lo[13], x_hi[13],
+ cospi_25_64, cospi_7_64, &s_lo[12],
+ &s_hi[12], &s_lo[13], &s_hi[13]);
+ // s14 = cospi_29_64 * x14 + cospi_3_64 * x15;
+ // s15 = cospi_3_64 * x14 - cospi_29_64 * x15;
+ butterfly_two_coeff_s16_s32_noround(x_lo[14], x_hi[14], x_lo[15], x_hi[15],
+ cospi_29_64, cospi_3_64, &s_lo[14],
+ &s_hi[14], &s_lo[15], &s_hi[15]);
+
+ // fdct_round_shift
+ t_lo[0] = vrshrq_n_s32(vaddq_s32(s_lo[0], s_lo[8]), DCT_CONST_BITS);
+ t_hi[0] = vrshrq_n_s32(vaddq_s32(s_hi[0], s_hi[8]), DCT_CONST_BITS);
+ t_lo[1] = vrshrq_n_s32(vaddq_s32(s_lo[1], s_lo[9]), DCT_CONST_BITS);
+ t_hi[1] = vrshrq_n_s32(vaddq_s32(s_hi[1], s_hi[9]), DCT_CONST_BITS);
+ t_lo[2] = vrshrq_n_s32(vaddq_s32(s_lo[2], s_lo[10]), DCT_CONST_BITS);
+ t_hi[2] = vrshrq_n_s32(vaddq_s32(s_hi[2], s_hi[10]), DCT_CONST_BITS);
+ t_lo[3] = vrshrq_n_s32(vaddq_s32(s_lo[3], s_lo[11]), DCT_CONST_BITS);
+ t_hi[3] = vrshrq_n_s32(vaddq_s32(s_hi[3], s_hi[11]), DCT_CONST_BITS);
+ t_lo[4] = vrshrq_n_s32(vaddq_s32(s_lo[4], s_lo[12]), DCT_CONST_BITS);
+ t_hi[4] = vrshrq_n_s32(vaddq_s32(s_hi[4], s_hi[12]), DCT_CONST_BITS);
+ t_lo[5] = vrshrq_n_s32(vaddq_s32(s_lo[5], s_lo[13]), DCT_CONST_BITS);
+ t_hi[5] = vrshrq_n_s32(vaddq_s32(s_hi[5], s_hi[13]), DCT_CONST_BITS);
+ t_lo[6] = vrshrq_n_s32(vaddq_s32(s_lo[6], s_lo[14]), DCT_CONST_BITS);
+ t_hi[6] = vrshrq_n_s32(vaddq_s32(s_hi[6], s_hi[14]), DCT_CONST_BITS);
+ t_lo[7] = vrshrq_n_s32(vaddq_s32(s_lo[7], s_lo[15]), DCT_CONST_BITS);
+ t_hi[7] = vrshrq_n_s32(vaddq_s32(s_hi[7], s_hi[15]), DCT_CONST_BITS);
+ t_lo[8] = vrshrq_n_s32(vsubq_s32(s_lo[0], s_lo[8]), DCT_CONST_BITS);
+ t_hi[8] = vrshrq_n_s32(vsubq_s32(s_hi[0], s_hi[8]), DCT_CONST_BITS);
+ t_lo[9] = vrshrq_n_s32(vsubq_s32(s_lo[1], s_lo[9]), DCT_CONST_BITS);
+ t_hi[9] = vrshrq_n_s32(vsubq_s32(s_hi[1], s_hi[9]), DCT_CONST_BITS);
+ t_lo[10] = vrshrq_n_s32(vsubq_s32(s_lo[2], s_lo[10]), DCT_CONST_BITS);
+ t_hi[10] = vrshrq_n_s32(vsubq_s32(s_hi[2], s_hi[10]), DCT_CONST_BITS);
+ t_lo[11] = vrshrq_n_s32(vsubq_s32(s_lo[3], s_lo[11]), DCT_CONST_BITS);
+ t_hi[11] = vrshrq_n_s32(vsubq_s32(s_hi[3], s_hi[11]), DCT_CONST_BITS);
+ t_lo[12] = vrshrq_n_s32(vsubq_s32(s_lo[4], s_lo[12]), DCT_CONST_BITS);
+ t_hi[12] = vrshrq_n_s32(vsubq_s32(s_hi[4], s_hi[12]), DCT_CONST_BITS);
+ t_lo[13] = vrshrq_n_s32(vsubq_s32(s_lo[5], s_lo[13]), DCT_CONST_BITS);
+ t_hi[13] = vrshrq_n_s32(vsubq_s32(s_hi[5], s_hi[13]), DCT_CONST_BITS);
+ t_lo[14] = vrshrq_n_s32(vsubq_s32(s_lo[6], s_lo[14]), DCT_CONST_BITS);
+ t_hi[14] = vrshrq_n_s32(vsubq_s32(s_hi[6], s_hi[14]), DCT_CONST_BITS);
+ t_lo[15] = vrshrq_n_s32(vsubq_s32(s_lo[7], s_lo[15]), DCT_CONST_BITS);
+ t_hi[15] = vrshrq_n_s32(vsubq_s32(s_hi[7], s_hi[15]), DCT_CONST_BITS);
+
+ // stage 2
+ s_lo[0] = t_lo[0];
+ s_hi[0] = t_hi[0];
+ s_lo[1] = t_lo[1];
+ s_hi[1] = t_hi[1];
+ s_lo[2] = t_lo[2];
+ s_hi[2] = t_hi[2];
+ s_lo[3] = t_lo[3];
+ s_hi[3] = t_hi[3];
+ s_lo[4] = t_lo[4];
+ s_hi[4] = t_hi[4];
+ s_lo[5] = t_lo[5];
+ s_hi[5] = t_hi[5];
+ s_lo[6] = t_lo[6];
+ s_hi[6] = t_hi[6];
+ s_lo[7] = t_lo[7];
+ s_hi[7] = t_hi[7];
+ // s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ // s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ butterfly_two_coeff_s32_noround(t_lo[8], t_hi[8], t_lo[9], t_hi[9],
+ cospi_4_64, cospi_28_64, &s_lo[8], &s_hi[8],
+ &s_lo[9], &s_hi[9]);
+ // s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ // s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ butterfly_two_coeff_s32_noround(t_lo[10], t_hi[10], t_lo[11], t_hi[11],
+ cospi_20_64, cospi_12_64, &s_lo[10],
+ &s_hi[10], &s_lo[11], &s_hi[11]);
+ // s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+ // s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ butterfly_two_coeff_s32_noround(t_lo[13], t_hi[13], t_lo[12], t_hi[12],
+ cospi_28_64, cospi_4_64, &s_lo[13], &s_hi[13],
+ &s_lo[12], &s_hi[12]);
+ // s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+ // s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+ butterfly_two_coeff_s32_noround(t_lo[15], t_hi[15], t_lo[14], t_hi[14],
+ cospi_12_64, cospi_20_64, &s_lo[15],
+ &s_hi[15], &s_lo[14], &s_hi[14]);
+
+ // s0 + s4
+ t_lo[0] = vaddq_s32(s_lo[0], s_lo[4]);
+ t_hi[0] = vaddq_s32(s_hi[0], s_hi[4]);
+ // s1 + s5
+ t_lo[1] = vaddq_s32(s_lo[1], s_lo[5]);
+ t_hi[1] = vaddq_s32(s_hi[1], s_hi[5]);
+ // s2 + s6
+ t_lo[2] = vaddq_s32(s_lo[2], s_lo[6]);
+ t_hi[2] = vaddq_s32(s_hi[2], s_hi[6]);
+ // s3 + s7
+ t_lo[3] = vaddq_s32(s_lo[3], s_lo[7]);
+ t_hi[3] = vaddq_s32(s_hi[3], s_hi[7]);
+ // s0 - s4
+ t_lo[4] = vsubq_s32(s_lo[0], s_lo[4]);
+ t_hi[4] = vsubq_s32(s_hi[0], s_hi[4]);
+ // s1 - s7
+ t_lo[5] = vsubq_s32(s_lo[1], s_lo[5]);
+ t_hi[5] = vsubq_s32(s_hi[1], s_hi[5]);
+ // s2 - s6
+ t_lo[6] = vsubq_s32(s_lo[2], s_lo[6]);
+ t_hi[6] = vsubq_s32(s_hi[2], s_hi[6]);
+ // s3 - s7
+ t_lo[7] = vsubq_s32(s_lo[3], s_lo[7]);
+ t_hi[7] = vsubq_s32(s_hi[3], s_hi[7]);
+ // s8 + s12
+ t_lo[8] = vaddq_s32(s_lo[8], s_lo[12]);
+ t_hi[8] = vaddq_s32(s_hi[8], s_hi[12]);
+ // s9 + s13
+ t_lo[9] = vaddq_s32(s_lo[9], s_lo[13]);
+ t_hi[9] = vaddq_s32(s_hi[9], s_hi[13]);
+ // s10 + s14
+ t_lo[10] = vaddq_s32(s_lo[10], s_lo[14]);
+ t_hi[10] = vaddq_s32(s_hi[10], s_hi[14]);
+ // s11 + s15
+ t_lo[11] = vaddq_s32(s_lo[11], s_lo[15]);
+ t_hi[11] = vaddq_s32(s_hi[11], s_hi[15]);
+ // s8 + s12
+ t_lo[12] = vsubq_s32(s_lo[8], s_lo[12]);
+ t_hi[12] = vsubq_s32(s_hi[8], s_hi[12]);
+ // s9 + s13
+ t_lo[13] = vsubq_s32(s_lo[9], s_lo[13]);
+ t_hi[13] = vsubq_s32(s_hi[9], s_hi[13]);
+ // s10 + s14
+ t_lo[14] = vsubq_s32(s_lo[10], s_lo[14]);
+ t_hi[14] = vsubq_s32(s_hi[10], s_hi[14]);
+ // s11 + s15
+ t_lo[15] = vsubq_s32(s_lo[11], s_lo[15]);
+ t_hi[15] = vsubq_s32(s_hi[11], s_hi[15]);
+
+ t_lo[8] = vrshrq_n_s32(t_lo[8], DCT_CONST_BITS);
+ t_hi[8] = vrshrq_n_s32(t_hi[8], DCT_CONST_BITS);
+ t_lo[9] = vrshrq_n_s32(t_lo[9], DCT_CONST_BITS);
+ t_hi[9] = vrshrq_n_s32(t_hi[9], DCT_CONST_BITS);
+ t_lo[10] = vrshrq_n_s32(t_lo[10], DCT_CONST_BITS);
+ t_hi[10] = vrshrq_n_s32(t_hi[10], DCT_CONST_BITS);
+ t_lo[11] = vrshrq_n_s32(t_lo[11], DCT_CONST_BITS);
+ t_hi[11] = vrshrq_n_s32(t_hi[11], DCT_CONST_BITS);
+ t_lo[12] = vrshrq_n_s32(t_lo[12], DCT_CONST_BITS);
+ t_hi[12] = vrshrq_n_s32(t_hi[12], DCT_CONST_BITS);
+ t_lo[13] = vrshrq_n_s32(t_lo[13], DCT_CONST_BITS);
+ t_hi[13] = vrshrq_n_s32(t_hi[13], DCT_CONST_BITS);
+ t_lo[14] = vrshrq_n_s32(t_lo[14], DCT_CONST_BITS);
+ t_hi[14] = vrshrq_n_s32(t_hi[14], DCT_CONST_BITS);
+ t_lo[15] = vrshrq_n_s32(t_lo[15], DCT_CONST_BITS);
+ t_hi[15] = vrshrq_n_s32(t_hi[15], DCT_CONST_BITS);
+
+ // stage 3
+ s_lo[0] = t_lo[0];
+ s_hi[0] = t_hi[0];
+ s_lo[1] = t_lo[1];
+ s_hi[1] = t_hi[1];
+ s_lo[2] = t_lo[2];
+ s_hi[2] = t_hi[2];
+ s_lo[3] = t_lo[3];
+ s_hi[3] = t_hi[3];
+ // s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ // s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+ butterfly_two_coeff_s32_noround(t_lo[4], t_hi[4], t_lo[5], t_hi[5],
+ cospi_8_64, cospi_24_64, &s_lo[4], &s_hi[4],
+ &s_lo[5], &s_hi[5]);
+ // s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+ // s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ butterfly_two_coeff_s32_noround(t_lo[7], t_hi[7], t_lo[6], t_hi[6],
+ cospi_24_64, cospi_8_64, &s_lo[7], &s_hi[7],
+ &s_lo[6], &s_hi[6]);
+ s_lo[8] = t_lo[8];
+ s_hi[8] = t_hi[8];
+ s_lo[9] = t_lo[9];
+ s_hi[9] = t_hi[9];
+ s_lo[10] = t_lo[10];
+ s_hi[10] = t_hi[10];
+ s_lo[11] = t_lo[11];
+ s_hi[11] = t_hi[11];
+ // s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ // s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+ butterfly_two_coeff_s32_noround(t_lo[12], t_hi[12], t_lo[13], t_hi[13],
+ cospi_8_64, cospi_24_64, &s_lo[12], &s_hi[12],
+ &s_lo[13], &s_hi[13]);
+ // s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+ // s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+ butterfly_two_coeff_s32_noround(t_lo[15], t_hi[15], t_lo[14], t_hi[14],
+ cospi_24_64, cospi_8_64, &s_lo[15], &s_hi[15],
+ &s_lo[14], &s_hi[14]);
+
+ // s0 + s4
+ t_lo[0] = vaddq_s32(s_lo[0], s_lo[2]);
+ t_hi[0] = vaddq_s32(s_hi[0], s_hi[2]);
+ // s1 + s3
+ t_lo[1] = vaddq_s32(s_lo[1], s_lo[3]);
+ t_hi[1] = vaddq_s32(s_hi[1], s_hi[3]);
+ // s0 - s4
+ t_lo[2] = vsubq_s32(s_lo[0], s_lo[2]);
+ t_hi[2] = vsubq_s32(s_hi[0], s_hi[2]);
+ // s1 - s3
+ t_lo[3] = vsubq_s32(s_lo[1], s_lo[3]);
+ t_hi[3] = vsubq_s32(s_hi[1], s_hi[3]);
+ // s4 + s6
+ t_lo[4] = vaddq_s32(s_lo[4], s_lo[6]);
+ t_hi[4] = vaddq_s32(s_hi[4], s_hi[6]);
+ // s5 + s7
+ t_lo[5] = vaddq_s32(s_lo[5], s_lo[7]);
+ t_hi[5] = vaddq_s32(s_hi[5], s_hi[7]);
+ // s4 - s6
+ t_lo[6] = vsubq_s32(s_lo[4], s_lo[6]);
+ t_hi[6] = vsubq_s32(s_hi[4], s_hi[6]);
+ // s5 - s7
+ t_lo[7] = vsubq_s32(s_lo[5], s_lo[7]);
+ t_hi[7] = vsubq_s32(s_hi[5], s_hi[7]);
+ // s8 + s10
+ t_lo[8] = vaddq_s32(s_lo[8], s_lo[10]);
+ t_hi[8] = vaddq_s32(s_hi[8], s_hi[10]);
+ // s9 + s11
+ t_lo[9] = vaddq_s32(s_lo[9], s_lo[11]);
+ t_hi[9] = vaddq_s32(s_hi[9], s_hi[11]);
+ // s8 - s10
+ t_lo[10] = vsubq_s32(s_lo[8], s_lo[10]);
+ t_hi[10] = vsubq_s32(s_hi[8], s_hi[10]);
+ // s9 - s11
+ t_lo[11] = vsubq_s32(s_lo[9], s_lo[11]);
+ t_hi[11] = vsubq_s32(s_hi[9], s_hi[11]);
+ // s12 + s14
+ t_lo[12] = vaddq_s32(s_lo[12], s_lo[14]);
+ t_hi[12] = vaddq_s32(s_hi[12], s_hi[14]);
+ // s13 + s15
+ t_lo[13] = vaddq_s32(s_lo[13], s_lo[15]);
+ t_hi[13] = vaddq_s32(s_hi[13], s_hi[15]);
+ // s12 - s14
+ t_lo[14] = vsubq_s32(s_lo[12], s_lo[14]);
+ t_hi[14] = vsubq_s32(s_hi[12], s_hi[14]);
+ // s13 - s15
+ t_lo[15] = vsubq_s32(s_lo[13], s_lo[15]);
+ t_hi[15] = vsubq_s32(s_hi[13], s_hi[15]);
+
+ t_lo[4] = vrshrq_n_s32(t_lo[4], DCT_CONST_BITS);
+ t_hi[4] = vrshrq_n_s32(t_hi[4], DCT_CONST_BITS);
+ t_lo[5] = vrshrq_n_s32(t_lo[5], DCT_CONST_BITS);
+ t_hi[5] = vrshrq_n_s32(t_hi[5], DCT_CONST_BITS);
+ t_lo[6] = vrshrq_n_s32(t_lo[6], DCT_CONST_BITS);
+ t_hi[6] = vrshrq_n_s32(t_hi[6], DCT_CONST_BITS);
+ t_lo[7] = vrshrq_n_s32(t_lo[7], DCT_CONST_BITS);
+ t_hi[7] = vrshrq_n_s32(t_hi[7], DCT_CONST_BITS);
+ t_lo[12] = vrshrq_n_s32(t_lo[12], DCT_CONST_BITS);
+ t_hi[12] = vrshrq_n_s32(t_hi[12], DCT_CONST_BITS);
+ t_lo[13] = vrshrq_n_s32(t_lo[13], DCT_CONST_BITS);
+ t_hi[13] = vrshrq_n_s32(t_hi[13], DCT_CONST_BITS);
+ t_lo[14] = vrshrq_n_s32(t_lo[14], DCT_CONST_BITS);
+ t_hi[14] = vrshrq_n_s32(t_hi[14], DCT_CONST_BITS);
+ t_lo[15] = vrshrq_n_s32(t_lo[15], DCT_CONST_BITS);
+ t_hi[15] = vrshrq_n_s32(t_hi[15], DCT_CONST_BITS);
+
+ // stage 4
+ // s2 = (-cospi_16_64) * (x2 + x3);
+ // s3 = cospi_16_64 * (x2 - x3);
+ butterfly_one_coeff_s32_noround(t_lo[3], t_hi[3], t_lo[2], t_hi[2],
+ -cospi_16_64, &s_lo[2], &s_hi[2], &s_lo[3],
+ &s_hi[3]);
+ // s6 = cospi_16_64 * (x6 + x7);
+ // s7 = cospi_16_64 * (-x6 + x7);
+ butterfly_one_coeff_s32_noround(t_lo[7], t_hi[7], t_lo[6], t_hi[6],
+ cospi_16_64, &s_lo[6], &s_hi[6], &s_lo[7],
+ &s_hi[7]);
+ // s10 = cospi_16_64 * (x10 + x11);
+ // s11 = cospi_16_64 * (-x10 + x11);
+ butterfly_one_coeff_s32_noround(t_lo[11], t_hi[11], t_lo[10], t_hi[10],
+ cospi_16_64, &s_lo[10], &s_hi[10], &s_lo[11],
+ &s_hi[11]);
+ // s14 = (-cospi_16_64) * (x14 + x15);
+ // s15 = cospi_16_64 * (x14 - x15);
+ butterfly_one_coeff_s32_noround(t_lo[15], t_hi[15], t_lo[14], t_hi[14],
+ -cospi_16_64, &s_lo[14], &s_hi[14], &s_lo[15],
+ &s_hi[15]);
+
+ // final fdct_round_shift
+ x_lo[2] = vrshrn_n_s32(s_lo[2], DCT_CONST_BITS);
+ x_hi[2] = vrshrn_n_s32(s_hi[2], DCT_CONST_BITS);
+ x_lo[3] = vrshrn_n_s32(s_lo[3], DCT_CONST_BITS);
+ x_hi[3] = vrshrn_n_s32(s_hi[3], DCT_CONST_BITS);
+ x_lo[6] = vrshrn_n_s32(s_lo[6], DCT_CONST_BITS);
+ x_hi[6] = vrshrn_n_s32(s_hi[6], DCT_CONST_BITS);
+ x_lo[7] = vrshrn_n_s32(s_lo[7], DCT_CONST_BITS);
+ x_hi[7] = vrshrn_n_s32(s_hi[7], DCT_CONST_BITS);
+ x_lo[10] = vrshrn_n_s32(s_lo[10], DCT_CONST_BITS);
+ x_hi[10] = vrshrn_n_s32(s_hi[10], DCT_CONST_BITS);
+ x_lo[11] = vrshrn_n_s32(s_lo[11], DCT_CONST_BITS);
+ x_hi[11] = vrshrn_n_s32(s_hi[11], DCT_CONST_BITS);
+ x_lo[14] = vrshrn_n_s32(s_lo[14], DCT_CONST_BITS);
+ x_hi[14] = vrshrn_n_s32(s_hi[14], DCT_CONST_BITS);
+ x_lo[15] = vrshrn_n_s32(s_lo[15], DCT_CONST_BITS);
+ x_hi[15] = vrshrn_n_s32(s_hi[15], DCT_CONST_BITS);
+
+ // x0, x1, x4, x5, x8, x9, x12, x13 narrow down to 16-bits directly
+ x_lo[0] = vmovn_s32(t_lo[0]);
+ x_hi[0] = vmovn_s32(t_hi[0]);
+ x_lo[1] = vmovn_s32(t_lo[1]);
+ x_hi[1] = vmovn_s32(t_hi[1]);
+ x_lo[4] = vmovn_s32(t_lo[4]);
+ x_hi[4] = vmovn_s32(t_hi[4]);
+ x_lo[5] = vmovn_s32(t_lo[5]);
+ x_hi[5] = vmovn_s32(t_hi[5]);
+ x_lo[8] = vmovn_s32(t_lo[8]);
+ x_hi[8] = vmovn_s32(t_hi[8]);
+ x_lo[9] = vmovn_s32(t_lo[9]);
+ x_hi[9] = vmovn_s32(t_hi[9]);
+ x_lo[12] = vmovn_s32(t_lo[12]);
+ x_hi[12] = vmovn_s32(t_hi[12]);
+ x_lo[13] = vmovn_s32(t_lo[13]);
+ x_hi[13] = vmovn_s32(t_hi[13]);
+
+ in[0] = vcombine_s16(x_lo[0], x_hi[0]);
+ in[1] = vnegq_s16(vcombine_s16(x_lo[8], x_hi[8]));
+ in[2] = vcombine_s16(x_lo[12], x_hi[12]);
+ in[3] = vnegq_s16(vcombine_s16(x_lo[4], x_hi[4]));
+ in[4] = vcombine_s16(x_lo[6], x_hi[6]);
+ in[5] = vcombine_s16(x_lo[14], x_hi[14]);
+ in[6] = vcombine_s16(x_lo[10], x_hi[10]);
+ in[7] = vcombine_s16(x_lo[2], x_hi[2]);
+ in[8] = vcombine_s16(x_lo[3], x_hi[3]);
+ in[9] = vcombine_s16(x_lo[11], x_hi[11]);
+ in[10] = vcombine_s16(x_lo[15], x_hi[15]);
+ in[11] = vcombine_s16(x_lo[7], x_hi[7]);
+ in[12] = vcombine_s16(x_lo[5], x_hi[5]);
+ in[13] = vnegq_s16(vcombine_s16(x_lo[13], x_hi[13]));
+ in[14] = vcombine_s16(x_lo[9], x_hi[9]);
+ in[15] = vnegq_s16(vcombine_s16(x_lo[1], x_hi[1]));
+}
+
+static void fdct16x16_neon(int16x8_t *in0, int16x8_t *in1) {
+ // Left half.
+ fdct16_8col(in0);
+ // Right half.
+ fdct16_8col(in1);
+ transpose_s16_16x16(in0, in1);
+}
+
+static void fadst16x16_neon(int16x8_t *in0, int16x8_t *in1) {
+ fadst16_8col(in0);
+ fadst16_8col(in1);
+ transpose_s16_16x16(in0, in1);
+}
+
+void vp9_fht16x16_neon(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
+ int16x8_t in0[16], in1[16];
+
+ switch (tx_type) {
+ case DCT_DCT: vpx_fdct16x16_neon(input, output, stride); break;
+ case ADST_DCT:
+ load_buffer_16x16(input, in0, in1, stride);
+ fadst16x16_neon(in0, in1);
+ right_shift_16x16(in0, in1);
+ fdct16x16_neon(in0, in1);
+ write_buffer_16x16(output, in0, in1, 16);
+ break;
+ case DCT_ADST:
+ load_buffer_16x16(input, in0, in1, stride);
+ fdct16x16_neon(in0, in1);
+ right_shift_16x16(in0, in1);
+ fadst16x16_neon(in0, in1);
+ write_buffer_16x16(output, in0, in1, 16);
+ break;
+ default:
+ assert(tx_type == ADST_ADST);
+ load_buffer_16x16(input, in0, in1, stride);
+ fadst16x16_neon(in0, in1);
+ right_shift_16x16(in0, in1);
+ fadst16x16_neon(in0, in1);
+ write_buffer_16x16(output, in0, in1, 16);
+ break;
+ }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+
+static INLINE void highbd_load_buffer_4x4(const int16_t *input,
+ int32x4_t *in /*[4]*/, int stride) {
+ // { 0, 1, 1, 1 };
+ const int32x4_t nonzero_bias_a = vextq_s32(vdupq_n_s32(0), vdupq_n_s32(1), 3);
+ // { 1, 0, 0, 0 };
+ const int32x4_t nonzero_bias_b = vextq_s32(vdupq_n_s32(1), vdupq_n_s32(0), 3);
+ int32x4_t mask;
+
+ in[0] = vshll_n_s16(vld1_s16(input + 0 * stride), 4);
+ in[1] = vshll_n_s16(vld1_s16(input + 1 * stride), 4);
+ in[2] = vshll_n_s16(vld1_s16(input + 2 * stride), 4);
+ in[3] = vshll_n_s16(vld1_s16(input + 3 * stride), 4);
+
+ // Copy the SSE method, use a mask to avoid an 'if' branch here to increase by
+ // one non-zero first elements
+ mask = vreinterpretq_s32_u32(vceqq_s32(in[0], nonzero_bias_a));
+ in[0] = vaddq_s32(in[0], mask);
+ in[0] = vaddq_s32(in[0], nonzero_bias_b);
+}
+
+static INLINE void highbd_write_buffer_4x4(tran_low_t *output, int32x4_t *res) {
+ const int32x4_t one = vdupq_n_s32(1);
+ res[0] = vshrq_n_s32(vaddq_s32(res[0], one), 2);
+ res[1] = vshrq_n_s32(vaddq_s32(res[1], one), 2);
+ res[2] = vshrq_n_s32(vaddq_s32(res[2], one), 2);
+ res[3] = vshrq_n_s32(vaddq_s32(res[3], one), 2);
+ vst1q_s32(output + 0 * 4, res[0]);
+ vst1q_s32(output + 1 * 4, res[1]);
+ vst1q_s32(output + 2 * 4, res[2]);
+ vst1q_s32(output + 3 * 4, res[3]);
+}
+
+static INLINE void highbd_fadst4x4_neon(int32x4_t *in /*[4]*/) {
+ int32x2_t s_lo[4], s_hi[4];
+ int64x2_t u_lo[4], u_hi[4], t_lo[4], t_hi[4];
+
+ s_lo[0] = vget_low_s32(in[0]);
+ s_hi[0] = vget_high_s32(in[0]);
+ s_lo[1] = vget_low_s32(in[1]);
+ s_hi[1] = vget_high_s32(in[1]);
+ s_lo[2] = vget_low_s32(in[2]);
+ s_hi[2] = vget_high_s32(in[2]);
+ s_lo[3] = vget_low_s32(in[3]);
+ s_hi[3] = vget_high_s32(in[3]);
+
+ // t0 = s0 * sinpi_1_9 + s1 * sinpi_2_9 + s3 * sinpi_4_9
+ t_lo[0] = vmull_n_s32(s_lo[0], sinpi_1_9);
+ t_lo[0] = vmlal_n_s32(t_lo[0], s_lo[1], sinpi_2_9);
+ t_lo[0] = vmlal_n_s32(t_lo[0], s_lo[3], sinpi_4_9);
+ t_hi[0] = vmull_n_s32(s_hi[0], sinpi_1_9);
+ t_hi[0] = vmlal_n_s32(t_hi[0], s_hi[1], sinpi_2_9);
+ t_hi[0] = vmlal_n_s32(t_hi[0], s_hi[3], sinpi_4_9);
+
+ // t1 = (s0 + s1) * sinpi_3_9 - s3 * sinpi_3_9
+ t_lo[1] = vmull_n_s32(s_lo[0], sinpi_3_9);
+ t_lo[1] = vmlal_n_s32(t_lo[1], s_lo[1], sinpi_3_9);
+ t_lo[1] = vmlsl_n_s32(t_lo[1], s_lo[3], sinpi_3_9);
+ t_hi[1] = vmull_n_s32(s_hi[0], sinpi_3_9);
+ t_hi[1] = vmlal_n_s32(t_hi[1], s_hi[1], sinpi_3_9);
+ t_hi[1] = vmlsl_n_s32(t_hi[1], s_hi[3], sinpi_3_9);
+
+ // t2 = s0 * sinpi_4_9 - s1* sinpi_1_9 + s3 * sinpi_2_9
+ t_lo[2] = vmull_n_s32(s_lo[0], sinpi_4_9);
+ t_lo[2] = vmlsl_n_s32(t_lo[2], s_lo[1], sinpi_1_9);
+ t_lo[2] = vmlal_n_s32(t_lo[2], s_lo[3], sinpi_2_9);
+ t_hi[2] = vmull_n_s32(s_hi[0], sinpi_4_9);
+ t_hi[2] = vmlsl_n_s32(t_hi[2], s_hi[1], sinpi_1_9);
+ t_hi[2] = vmlal_n_s32(t_hi[2], s_hi[3], sinpi_2_9);
+
+ // t3 = s2 * sinpi_3_9
+ t_lo[3] = vmull_n_s32(s_lo[2], sinpi_3_9);
+ t_hi[3] = vmull_n_s32(s_hi[2], sinpi_3_9);
+
+ /*
+ * u0 = t0 + t3
+ * u1 = t1
+ * u2 = t2 - t3
+ * u3 = t2 - t0 + t3
+ */
+ u_lo[0] = vaddq_s64(t_lo[0], t_lo[3]);
+ u_hi[0] = vaddq_s64(t_hi[0], t_hi[3]);
+ u_lo[1] = t_lo[1];
+ u_hi[1] = t_hi[1];
+ u_lo[2] = vsubq_s64(t_lo[2], t_lo[3]);
+ u_hi[2] = vsubq_s64(t_hi[2], t_hi[3]);
+ u_lo[3] = vaddq_s64(vsubq_s64(t_lo[2], t_lo[0]), t_lo[3]);
+ u_hi[3] = vaddq_s64(vsubq_s64(t_hi[2], t_hi[0]), t_hi[3]);
+
+ // fdct_round_shift
+ in[0] = vcombine_s32(vrshrn_n_s64(u_lo[0], DCT_CONST_BITS),
+ vrshrn_n_s64(u_hi[0], DCT_CONST_BITS));
+ in[1] = vcombine_s32(vrshrn_n_s64(u_lo[1], DCT_CONST_BITS),
+ vrshrn_n_s64(u_hi[1], DCT_CONST_BITS));
+ in[2] = vcombine_s32(vrshrn_n_s64(u_lo[2], DCT_CONST_BITS),
+ vrshrn_n_s64(u_hi[2], DCT_CONST_BITS));
+ in[3] = vcombine_s32(vrshrn_n_s64(u_lo[3], DCT_CONST_BITS),
+ vrshrn_n_s64(u_hi[3], DCT_CONST_BITS));
+
+ transpose_s32_4x4(&in[0], &in[1], &in[2], &in[3]);
+}
+
+void vp9_highbd_fht4x4_neon(const int16_t *input, tran_low_t *output,
+ int stride, int tx_type) {
+ int32x4_t in[4];
+ // int i;
+
+ switch (tx_type) {
+ case DCT_DCT: vpx_highbd_fdct4x4_neon(input, output, stride); break;
+ case ADST_DCT:
+ highbd_load_buffer_4x4(input, in, stride);
+ highbd_fadst4x4_neon(in);
+ vpx_highbd_fdct4x4_pass1_neon(in);
+ highbd_write_buffer_4x4(output, in);
+ break;
+ case DCT_ADST:
+ highbd_load_buffer_4x4(input, in, stride);
+ vpx_highbd_fdct4x4_pass1_neon(in);
+ highbd_fadst4x4_neon(in);
+ highbd_write_buffer_4x4(output, in);
+ break;
+ default:
+ assert(tx_type == ADST_ADST);
+ highbd_load_buffer_4x4(input, in, stride);
+ highbd_fadst4x4_neon(in);
+ highbd_fadst4x4_neon(in);
+ highbd_write_buffer_4x4(output, in);
+ break;
+ }
+}
+
+static INLINE void highbd_load_buffer_8x8(const int16_t *input,
+ int32x4_t *lo /*[8]*/,
+ int32x4_t *hi /*[8]*/, int stride) {
+ int16x8_t in[8];
+ in[0] = vld1q_s16(input + 0 * stride);
+ in[1] = vld1q_s16(input + 1 * stride);
+ in[2] = vld1q_s16(input + 2 * stride);
+ in[3] = vld1q_s16(input + 3 * stride);
+ in[4] = vld1q_s16(input + 4 * stride);
+ in[5] = vld1q_s16(input + 5 * stride);
+ in[6] = vld1q_s16(input + 6 * stride);
+ in[7] = vld1q_s16(input + 7 * stride);
+ lo[0] = vshll_n_s16(vget_low_s16(in[0]), 2);
+ hi[0] = vshll_n_s16(vget_high_s16(in[0]), 2);
+ lo[1] = vshll_n_s16(vget_low_s16(in[1]), 2);
+ hi[1] = vshll_n_s16(vget_high_s16(in[1]), 2);
+ lo[2] = vshll_n_s16(vget_low_s16(in[2]), 2);
+ hi[2] = vshll_n_s16(vget_high_s16(in[2]), 2);
+ lo[3] = vshll_n_s16(vget_low_s16(in[3]), 2);
+ hi[3] = vshll_n_s16(vget_high_s16(in[3]), 2);
+ lo[4] = vshll_n_s16(vget_low_s16(in[4]), 2);
+ hi[4] = vshll_n_s16(vget_high_s16(in[4]), 2);
+ lo[5] = vshll_n_s16(vget_low_s16(in[5]), 2);
+ hi[5] = vshll_n_s16(vget_high_s16(in[5]), 2);
+ lo[6] = vshll_n_s16(vget_low_s16(in[6]), 2);
+ hi[6] = vshll_n_s16(vget_high_s16(in[6]), 2);
+ lo[7] = vshll_n_s16(vget_low_s16(in[7]), 2);
+ hi[7] = vshll_n_s16(vget_high_s16(in[7]), 2);
+}
+
+/* right shift and rounding
+ * first get the sign bit (bit 15).
+ * If bit == 1, it's the simple case of shifting right by one bit.
+ * If bit == 2, it essentially computes the expression:
+ *
+ * out[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
+ *
+ * for each row.
+ */
+static INLINE void highbd_right_shift_8x8(int32x4_t *lo, int32x4_t *hi,
+ const int bit) {
+ int32x4_t sign_lo[8], sign_hi[8];
+ sign_lo[0] = vshrq_n_s32(lo[0], 31);
+ sign_hi[0] = vshrq_n_s32(hi[0], 31);
+ sign_lo[1] = vshrq_n_s32(lo[1], 31);
+ sign_hi[1] = vshrq_n_s32(hi[1], 31);
+ sign_lo[2] = vshrq_n_s32(lo[2], 31);
+ sign_hi[2] = vshrq_n_s32(hi[2], 31);
+ sign_lo[3] = vshrq_n_s32(lo[3], 31);
+ sign_hi[3] = vshrq_n_s32(hi[3], 31);
+ sign_lo[4] = vshrq_n_s32(lo[4], 31);
+ sign_hi[4] = vshrq_n_s32(hi[4], 31);
+ sign_lo[5] = vshrq_n_s32(lo[5], 31);
+ sign_hi[5] = vshrq_n_s32(hi[5], 31);
+ sign_lo[6] = vshrq_n_s32(lo[6], 31);
+ sign_hi[6] = vshrq_n_s32(hi[6], 31);
+ sign_lo[7] = vshrq_n_s32(lo[7], 31);
+ sign_hi[7] = vshrq_n_s32(hi[7], 31);
+
+ if (bit == 2) {
+ const int32x4_t const_rounding = vdupq_n_s32(1);
+ lo[0] = vaddq_s32(lo[0], const_rounding);
+ hi[0] = vaddq_s32(hi[0], const_rounding);
+ lo[1] = vaddq_s32(lo[1], const_rounding);
+ hi[1] = vaddq_s32(hi[1], const_rounding);
+ lo[2] = vaddq_s32(lo[2], const_rounding);
+ hi[2] = vaddq_s32(hi[2], const_rounding);
+ lo[3] = vaddq_s32(lo[3], const_rounding);
+ hi[3] = vaddq_s32(hi[3], const_rounding);
+ lo[4] = vaddq_s32(lo[4], const_rounding);
+ hi[4] = vaddq_s32(hi[4], const_rounding);
+ lo[5] = vaddq_s32(lo[5], const_rounding);
+ hi[5] = vaddq_s32(hi[5], const_rounding);
+ lo[6] = vaddq_s32(lo[6], const_rounding);
+ hi[6] = vaddq_s32(hi[6], const_rounding);
+ lo[7] = vaddq_s32(lo[7], const_rounding);
+ hi[7] = vaddq_s32(hi[7], const_rounding);
+ }
+
+ lo[0] = vsubq_s32(lo[0], sign_lo[0]);
+ hi[0] = vsubq_s32(hi[0], sign_hi[0]);
+ lo[1] = vsubq_s32(lo[1], sign_lo[1]);
+ hi[1] = vsubq_s32(hi[1], sign_hi[1]);
+ lo[2] = vsubq_s32(lo[2], sign_lo[2]);
+ hi[2] = vsubq_s32(hi[2], sign_hi[2]);
+ lo[3] = vsubq_s32(lo[3], sign_lo[3]);
+ hi[3] = vsubq_s32(hi[3], sign_hi[3]);
+ lo[4] = vsubq_s32(lo[4], sign_lo[4]);
+ hi[4] = vsubq_s32(hi[4], sign_hi[4]);
+ lo[5] = vsubq_s32(lo[5], sign_lo[5]);
+ hi[5] = vsubq_s32(hi[5], sign_hi[5]);
+ lo[6] = vsubq_s32(lo[6], sign_lo[6]);
+ hi[6] = vsubq_s32(hi[6], sign_hi[6]);
+ lo[7] = vsubq_s32(lo[7], sign_lo[7]);
+ hi[7] = vsubq_s32(hi[7], sign_hi[7]);
+
+ if (bit == 1) {
+ lo[0] = vshrq_n_s32(lo[0], 1);
+ hi[0] = vshrq_n_s32(hi[0], 1);
+ lo[1] = vshrq_n_s32(lo[1], 1);
+ hi[1] = vshrq_n_s32(hi[1], 1);
+ lo[2] = vshrq_n_s32(lo[2], 1);
+ hi[2] = vshrq_n_s32(hi[2], 1);
+ lo[3] = vshrq_n_s32(lo[3], 1);
+ hi[3] = vshrq_n_s32(hi[3], 1);
+ lo[4] = vshrq_n_s32(lo[4], 1);
+ hi[4] = vshrq_n_s32(hi[4], 1);
+ lo[5] = vshrq_n_s32(lo[5], 1);
+ hi[5] = vshrq_n_s32(hi[5], 1);
+ lo[6] = vshrq_n_s32(lo[6], 1);
+ hi[6] = vshrq_n_s32(hi[6], 1);
+ lo[7] = vshrq_n_s32(lo[7], 1);
+ hi[7] = vshrq_n_s32(hi[7], 1);
+ } else {
+ lo[0] = vshrq_n_s32(lo[0], 2);
+ hi[0] = vshrq_n_s32(hi[0], 2);
+ lo[1] = vshrq_n_s32(lo[1], 2);
+ hi[1] = vshrq_n_s32(hi[1], 2);
+ lo[2] = vshrq_n_s32(lo[2], 2);
+ hi[2] = vshrq_n_s32(hi[2], 2);
+ lo[3] = vshrq_n_s32(lo[3], 2);
+ hi[3] = vshrq_n_s32(hi[3], 2);
+ lo[4] = vshrq_n_s32(lo[4], 2);
+ hi[4] = vshrq_n_s32(hi[4], 2);
+ lo[5] = vshrq_n_s32(lo[5], 2);
+ hi[5] = vshrq_n_s32(hi[5], 2);
+ lo[6] = vshrq_n_s32(lo[6], 2);
+ hi[6] = vshrq_n_s32(hi[6], 2);
+ lo[7] = vshrq_n_s32(lo[7], 2);
+ hi[7] = vshrq_n_s32(hi[7], 2);
+ }
+}
+
+static INLINE void highbd_write_buffer_8x8(tran_low_t *output, int32x4_t *lo,
+ int32x4_t *hi, int stride) {
+ vst1q_s32(output + 0 * stride, lo[0]);
+ vst1q_s32(output + 0 * stride + 4, hi[0]);
+ vst1q_s32(output + 1 * stride, lo[1]);
+ vst1q_s32(output + 1 * stride + 4, hi[1]);
+ vst1q_s32(output + 2 * stride, lo[2]);
+ vst1q_s32(output + 2 * stride + 4, hi[2]);
+ vst1q_s32(output + 3 * stride, lo[3]);
+ vst1q_s32(output + 3 * stride + 4, hi[3]);
+ vst1q_s32(output + 4 * stride, lo[4]);
+ vst1q_s32(output + 4 * stride + 4, hi[4]);
+ vst1q_s32(output + 5 * stride, lo[5]);
+ vst1q_s32(output + 5 * stride + 4, hi[5]);
+ vst1q_s32(output + 6 * stride, lo[6]);
+ vst1q_s32(output + 6 * stride + 4, hi[6]);
+ vst1q_s32(output + 7 * stride, lo[7]);
+ vst1q_s32(output + 7 * stride + 4, hi[7]);
+}
+
+static INLINE void highbd_fadst8x8_neon(int32x4_t *lo /*[8]*/,
+ int32x4_t *hi /*[8]*/) {
+ int32x4_t s_lo[8], s_hi[8];
+ int32x4_t t_lo[8], t_hi[8];
+ int32x4_t x_lo[8], x_hi[8];
+ int64x2_t s64_lo[16], s64_hi[16];
+
+ x_lo[0] = lo[7];
+ x_hi[0] = hi[7];
+ x_lo[1] = lo[0];
+ x_hi[1] = hi[0];
+ x_lo[2] = lo[5];
+ x_hi[2] = hi[5];
+ x_lo[3] = lo[2];
+ x_hi[3] = hi[2];
+ x_lo[4] = lo[3];
+ x_hi[4] = hi[3];
+ x_lo[5] = lo[4];
+ x_hi[5] = hi[4];
+ x_lo[6] = lo[1];
+ x_hi[6] = hi[1];
+ x_lo[7] = lo[6];
+ x_hi[7] = hi[6];
+
+ // stage 1
+ // s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ // s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[0], x_hi[0], x_lo[1], x_hi[1], cospi_2_64, cospi_30_64,
+ &s64_lo[2 * 0], &s64_hi[2 * 0], &s64_lo[2 * 1], &s64_hi[2 * 1]);
+ // s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+ // s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[2], x_hi[2], x_lo[3], x_hi[3], cospi_10_64, cospi_22_64,
+ &s64_lo[2 * 2], &s64_hi[2 * 2], &s64_lo[2 * 3], &s64_hi[2 * 3]);
+
+ // s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+ // s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[4], x_hi[4], x_lo[5], x_hi[5], cospi_18_64, cospi_14_64,
+ &s64_lo[2 * 4], &s64_hi[2 * 4], &s64_lo[2 * 5], &s64_hi[2 * 5]);
+
+ // s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ // s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[6], x_hi[6], x_lo[7], x_hi[7], cospi_26_64, cospi_6_64,
+ &s64_lo[2 * 6], &s64_hi[2 * 6], &s64_lo[2 * 7], &s64_hi[2 * 7]);
+
+ // fdct_round_shift, indices are doubled
+ t_lo[0] = add_s64_round_narrow(&s64_lo[2 * 0], &s64_lo[2 * 4]);
+ t_hi[0] = add_s64_round_narrow(&s64_hi[2 * 0], &s64_hi[2 * 4]);
+ t_lo[1] = add_s64_round_narrow(&s64_lo[2 * 1], &s64_lo[2 * 5]);
+ t_hi[1] = add_s64_round_narrow(&s64_hi[2 * 1], &s64_hi[2 * 5]);
+ t_lo[2] = add_s64_round_narrow(&s64_lo[2 * 2], &s64_lo[2 * 6]);
+ t_hi[2] = add_s64_round_narrow(&s64_hi[2 * 2], &s64_hi[2 * 6]);
+ t_lo[3] = add_s64_round_narrow(&s64_lo[2 * 3], &s64_lo[2 * 7]);
+ t_hi[3] = add_s64_round_narrow(&s64_hi[2 * 3], &s64_hi[2 * 7]);
+ t_lo[4] = sub_s64_round_narrow(&s64_lo[2 * 0], &s64_lo[2 * 4]);
+ t_hi[4] = sub_s64_round_narrow(&s64_hi[2 * 0], &s64_hi[2 * 4]);
+ t_lo[5] = sub_s64_round_narrow(&s64_lo[2 * 1], &s64_lo[2 * 5]);
+ t_hi[5] = sub_s64_round_narrow(&s64_hi[2 * 1], &s64_hi[2 * 5]);
+ t_lo[6] = sub_s64_round_narrow(&s64_lo[2 * 2], &s64_lo[2 * 6]);
+ t_hi[6] = sub_s64_round_narrow(&s64_hi[2 * 2], &s64_hi[2 * 6]);
+ t_lo[7] = sub_s64_round_narrow(&s64_lo[2 * 3], &s64_lo[2 * 7]);
+ t_hi[7] = sub_s64_round_narrow(&s64_hi[2 * 3], &s64_hi[2 * 7]);
+
+ // stage 2
+ s_lo[0] = t_lo[0];
+ s_hi[0] = t_hi[0];
+ s_lo[1] = t_lo[1];
+ s_hi[1] = t_hi[1];
+ s_lo[2] = t_lo[2];
+ s_hi[2] = t_hi[2];
+ s_lo[3] = t_lo[3];
+ s_hi[3] = t_hi[3];
+ // s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ // s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[4], t_hi[4], t_lo[5], t_hi[5], cospi_8_64, cospi_24_64,
+ &s64_lo[2 * 4], &s64_hi[2 * 4], &s64_lo[2 * 5], &s64_hi[2 * 5]);
+
+ // s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+ // s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[6], t_hi[6], t_lo[7], t_hi[7], -cospi_24_64, cospi_8_64,
+ &s64_lo[2 * 6], &s64_hi[2 * 6], &s64_lo[2 * 7], &s64_hi[2 * 7]);
+
+ // fdct_round_shift
+ // s0 + s2
+ t_lo[0] = add_s32_s64_narrow(s_lo[0], s_lo[2]);
+ t_hi[0] = add_s32_s64_narrow(s_hi[0], s_hi[2]);
+ // s0 - s2
+ t_lo[2] = sub_s32_s64_narrow(s_lo[0], s_lo[2]);
+ t_hi[2] = sub_s32_s64_narrow(s_hi[0], s_hi[2]);
+
+ // s1 + s3
+ t_lo[1] = add_s32_s64_narrow(s_lo[1], s_lo[3]);
+ t_hi[1] = add_s32_s64_narrow(s_hi[1], s_hi[3]);
+ // s1 - s3
+ t_lo[3] = sub_s32_s64_narrow(s_lo[1], s_lo[3]);
+ t_hi[3] = sub_s32_s64_narrow(s_hi[1], s_hi[3]);
+
+ // s4 + s6
+ t_lo[4] = add_s64_round_narrow(&s64_lo[2 * 4], &s64_lo[2 * 6]);
+ t_hi[4] = add_s64_round_narrow(&s64_hi[2 * 4], &s64_hi[2 * 6]);
+ // s4 - s6
+ t_lo[6] = sub_s64_round_narrow(&s64_lo[2 * 4], &s64_lo[2 * 6]);
+ t_hi[6] = sub_s64_round_narrow(&s64_hi[2 * 4], &s64_hi[2 * 6]);
+
+ // s5 + s7
+ t_lo[5] = add_s64_round_narrow(&s64_lo[2 * 5], &s64_lo[2 * 7]);
+ t_hi[5] = add_s64_round_narrow(&s64_hi[2 * 5], &s64_hi[2 * 7]);
+ // s5 - s7
+ t_lo[7] = sub_s64_round_narrow(&s64_lo[2 * 5], &s64_lo[2 * 7]);
+ t_hi[7] = sub_s64_round_narrow(&s64_hi[2 * 5], &s64_hi[2 * 7]);
+
+ // stage 3
+ // s2 = cospi_16_64 * (x2 + x3)
+ // s3 = cospi_16_64 * (x2 - x3)
+ butterfly_one_coeff_s32_fast(t_lo[2], t_hi[2], t_lo[3], t_hi[3], cospi_16_64,
+ &s_lo[2], &s_hi[2], &s_lo[3], &s_hi[3]);
+
+ // s6 = cospi_16_64 * (x6 + x7)
+ // s7 = cospi_16_64 * (x6 - x7)
+ butterfly_one_coeff_s32_fast(t_lo[6], t_hi[6], t_lo[7], t_hi[7], cospi_16_64,
+ &s_lo[6], &s_hi[6], &s_lo[7], &s_hi[7]);
+
+ // x0, x2, x4, x6 pass through
+ lo[0] = t_lo[0];
+ hi[0] = t_hi[0];
+ lo[2] = s_lo[6];
+ hi[2] = s_hi[6];
+ lo[4] = s_lo[3];
+ hi[4] = s_hi[3];
+ lo[6] = t_lo[5];
+ hi[6] = t_hi[5];
+
+ lo[1] = vnegq_s32(t_lo[4]);
+ hi[1] = vnegq_s32(t_hi[4]);
+ lo[3] = vnegq_s32(s_lo[2]);
+ hi[3] = vnegq_s32(s_hi[2]);
+ lo[5] = vnegq_s32(s_lo[7]);
+ hi[5] = vnegq_s32(s_hi[7]);
+ lo[7] = vnegq_s32(t_lo[1]);
+ hi[7] = vnegq_s32(t_hi[1]);
+
+ transpose_s32_8x8_2(lo, hi, lo, hi);
+}
+
+void vp9_highbd_fht8x8_neon(const int16_t *input, tran_low_t *output,
+ int stride, int tx_type) {
+ int32x4_t lo[8], hi[8];
+
+ switch (tx_type) {
+ case DCT_DCT: vpx_highbd_fdct8x8_neon(input, output, stride); break;
+ case ADST_DCT:
+ highbd_load_buffer_8x8(input, lo, hi, stride);
+ highbd_fadst8x8_neon(lo, hi);
+ // pass1 variant is not precise enough
+ vpx_highbd_fdct8x8_pass2_neon(lo, hi);
+ highbd_right_shift_8x8(lo, hi, 1);
+ highbd_write_buffer_8x8(output, lo, hi, 8);
+ break;
+ case DCT_ADST:
+ highbd_load_buffer_8x8(input, lo, hi, stride);
+ // pass1 variant is not precise enough
+ vpx_highbd_fdct8x8_pass2_neon(lo, hi);
+ highbd_fadst8x8_neon(lo, hi);
+ highbd_right_shift_8x8(lo, hi, 1);
+ highbd_write_buffer_8x8(output, lo, hi, 8);
+ break;
+ default:
+ assert(tx_type == ADST_ADST);
+ highbd_load_buffer_8x8(input, lo, hi, stride);
+ highbd_fadst8x8_neon(lo, hi);
+ highbd_fadst8x8_neon(lo, hi);
+ highbd_right_shift_8x8(lo, hi, 1);
+ highbd_write_buffer_8x8(output, lo, hi, 8);
+ break;
+ }
+}
+
+static INLINE void highbd_load_buffer_16x16(
+ const int16_t *input, int32x4_t *left1 /*[16]*/, int32x4_t *right1 /*[16]*/,
+ int32x4_t *left2 /*[16]*/, int32x4_t *right2 /*[16]*/, int stride) {
+ // load first 8 columns
+ highbd_load_buffer_8x8(input, left1, right1, stride);
+ highbd_load_buffer_8x8(input + 8 * stride, left1 + 8, right1 + 8, stride);
+
+ input += 8;
+ // load second 8 columns
+ highbd_load_buffer_8x8(input, left2, right2, stride);
+ highbd_load_buffer_8x8(input + 8 * stride, left2 + 8, right2 + 8, stride);
+}
+
+static INLINE void highbd_write_buffer_16x16(
+ tran_low_t *output, int32x4_t *left1 /*[16]*/, int32x4_t *right1 /*[16]*/,
+ int32x4_t *left2 /*[16]*/, int32x4_t *right2 /*[16]*/, int stride) {
+ // write first 8 columns
+ highbd_write_buffer_8x8(output, left1, right1, stride);
+ highbd_write_buffer_8x8(output + 8 * stride, left1 + 8, right1 + 8, stride);
+
+ // write second 8 columns
+ output += 8;
+ highbd_write_buffer_8x8(output, left2, right2, stride);
+ highbd_write_buffer_8x8(output + 8 * stride, left2 + 8, right2 + 8, stride);
+}
+
+static INLINE void highbd_right_shift_16x16(int32x4_t *left1 /*[16]*/,
+ int32x4_t *right1 /*[16]*/,
+ int32x4_t *left2 /*[16]*/,
+ int32x4_t *right2 /*[16]*/,
+ const int bit) {
+ // perform rounding operations
+ highbd_right_shift_8x8(left1, right1, bit);
+ highbd_right_shift_8x8(left1 + 8, right1 + 8, bit);
+ highbd_right_shift_8x8(left2, right2, bit);
+ highbd_right_shift_8x8(left2 + 8, right2 + 8, bit);
+}
+
+static void highbd_fdct16_8col(int32x4_t *left, int32x4_t *right) {
+ // perform 16x16 1-D DCT for 8 columns
+ int32x4_t s1_lo[8], s1_hi[8], s2_lo[8], s2_hi[8], s3_lo[8], s3_hi[8];
+ int32x4_t left8[8], right8[8];
+
+ // stage 1
+ left8[0] = vaddq_s32(left[0], left[15]);
+ right8[0] = vaddq_s32(right[0], right[15]);
+ left8[1] = vaddq_s32(left[1], left[14]);
+ right8[1] = vaddq_s32(right[1], right[14]);
+ left8[2] = vaddq_s32(left[2], left[13]);
+ right8[2] = vaddq_s32(right[2], right[13]);
+ left8[3] = vaddq_s32(left[3], left[12]);
+ right8[3] = vaddq_s32(right[3], right[12]);
+ left8[4] = vaddq_s32(left[4], left[11]);
+ right8[4] = vaddq_s32(right[4], right[11]);
+ left8[5] = vaddq_s32(left[5], left[10]);
+ right8[5] = vaddq_s32(right[5], right[10]);
+ left8[6] = vaddq_s32(left[6], left[9]);
+ right8[6] = vaddq_s32(right[6], right[9]);
+ left8[7] = vaddq_s32(left[7], left[8]);
+ right8[7] = vaddq_s32(right[7], right[8]);
+
+ // step 1
+ s1_lo[0] = vsubq_s32(left[7], left[8]);
+ s1_hi[0] = vsubq_s32(right[7], right[8]);
+ s1_lo[1] = vsubq_s32(left[6], left[9]);
+ s1_hi[1] = vsubq_s32(right[6], right[9]);
+ s1_lo[2] = vsubq_s32(left[5], left[10]);
+ s1_hi[2] = vsubq_s32(right[5], right[10]);
+ s1_lo[3] = vsubq_s32(left[4], left[11]);
+ s1_hi[3] = vsubq_s32(right[4], right[11]);
+ s1_lo[4] = vsubq_s32(left[3], left[12]);
+ s1_hi[4] = vsubq_s32(right[3], right[12]);
+ s1_lo[5] = vsubq_s32(left[2], left[13]);
+ s1_hi[5] = vsubq_s32(right[2], right[13]);
+ s1_lo[6] = vsubq_s32(left[1], left[14]);
+ s1_hi[6] = vsubq_s32(right[1], right[14]);
+ s1_lo[7] = vsubq_s32(left[0], left[15]);
+ s1_hi[7] = vsubq_s32(right[0], right[15]);
+
+ // pass1 variant is not accurate enough
+ vpx_highbd_fdct8x8_pass2_notranspose_neon(left8, right8);
+
+ // step 2
+ // step2[2] = (step1[5] - step1[2]) * cospi_16_64;
+ // step2[5] = (step1[5] + step1[2]) * cospi_16_64;
+ butterfly_one_coeff_s32_s64_narrow(s1_lo[5], s1_hi[5], s1_lo[2], s1_hi[2],
+ cospi_16_64, &s2_lo[5], &s2_hi[5],
+ &s2_lo[2], &s2_hi[2]);
+ // step2[3] = (step1[4] - step1[3]) * cospi_16_64;
+ // step2[4] = (step1[4] + step1[3]) * cospi_16_64;
+ butterfly_one_coeff_s32_s64_narrow(s1_lo[4], s1_hi[4], s1_lo[3], s1_hi[3],
+ cospi_16_64, &s2_lo[4], &s2_hi[4],
+ &s2_lo[3], &s2_hi[3]);
+
+ // step 3
+ s3_lo[0] = vaddq_s32(s1_lo[0], s2_lo[3]);
+ s3_hi[0] = vaddq_s32(s1_hi[0], s2_hi[3]);
+ s3_lo[1] = vaddq_s32(s1_lo[1], s2_lo[2]);
+ s3_hi[1] = vaddq_s32(s1_hi[1], s2_hi[2]);
+ s3_lo[2] = vsubq_s32(s1_lo[1], s2_lo[2]);
+ s3_hi[2] = vsubq_s32(s1_hi[1], s2_hi[2]);
+ s3_lo[3] = vsubq_s32(s1_lo[0], s2_lo[3]);
+ s3_hi[3] = vsubq_s32(s1_hi[0], s2_hi[3]);
+ s3_lo[4] = vsubq_s32(s1_lo[7], s2_lo[4]);
+ s3_hi[4] = vsubq_s32(s1_hi[7], s2_hi[4]);
+ s3_lo[5] = vsubq_s32(s1_lo[6], s2_lo[5]);
+ s3_hi[5] = vsubq_s32(s1_hi[6], s2_hi[5]);
+ s3_lo[6] = vaddq_s32(s1_lo[6], s2_lo[5]);
+ s3_hi[6] = vaddq_s32(s1_hi[6], s2_hi[5]);
+ s3_lo[7] = vaddq_s32(s1_lo[7], s2_lo[4]);
+ s3_hi[7] = vaddq_s32(s1_hi[7], s2_hi[4]);
+
+ // step 4
+ // s2[1] = cospi_24_64 * s3[6] - cospi_8_64 * s3[1]
+ // s2[6] = cospi_8_64 * s3[6] + cospi_24_64 * s3[1]
+ butterfly_two_coeff_s32_s64_narrow(s3_lo[6], s3_hi[6], s3_lo[1], s3_hi[1],
+ cospi_8_64, cospi_24_64, &s2_lo[6],
+ &s2_hi[6], &s2_lo[1], &s2_hi[1]);
+
+ // s2[5] = cospi_8_64 * s3[2] - cospi_24_64 * s3[5]
+ // s2[2] = cospi_24_64 * s3[2] + cospi_8_64 * s3[5]
+ butterfly_two_coeff_s32_s64_narrow(s3_lo[2], s3_hi[2], s3_lo[5], s3_hi[5],
+ cospi_24_64, cospi_8_64, &s2_lo[2],
+ &s2_hi[2], &s2_lo[5], &s2_hi[5]);
+
+ // step 5
+ s1_lo[0] = vaddq_s32(s3_lo[0], s2_lo[1]);
+ s1_hi[0] = vaddq_s32(s3_hi[0], s2_hi[1]);
+ s1_lo[1] = vsubq_s32(s3_lo[0], s2_lo[1]);
+ s1_hi[1] = vsubq_s32(s3_hi[0], s2_hi[1]);
+ s1_lo[2] = vaddq_s32(s3_lo[3], s2_lo[2]);
+ s1_hi[2] = vaddq_s32(s3_hi[3], s2_hi[2]);
+ s1_lo[3] = vsubq_s32(s3_lo[3], s2_lo[2]);
+ s1_hi[3] = vsubq_s32(s3_hi[3], s2_hi[2]);
+ s1_lo[4] = vsubq_s32(s3_lo[4], s2_lo[5]);
+ s1_hi[4] = vsubq_s32(s3_hi[4], s2_hi[5]);
+ s1_lo[5] = vaddq_s32(s3_lo[4], s2_lo[5]);
+ s1_hi[5] = vaddq_s32(s3_hi[4], s2_hi[5]);
+ s1_lo[6] = vsubq_s32(s3_lo[7], s2_lo[6]);
+ s1_hi[6] = vsubq_s32(s3_hi[7], s2_hi[6]);
+ s1_lo[7] = vaddq_s32(s3_lo[7], s2_lo[6]);
+ s1_hi[7] = vaddq_s32(s3_hi[7], s2_hi[6]);
+
+ // step 6
+ // out[1] = step1[7] * cospi_2_64 + step1[0] * cospi_30_64
+ // out[15] = step1[7] * cospi_30_64 - step1[0] * cospi_2_64
+ butterfly_two_coeff_s32_s64_narrow(s1_lo[7], s1_hi[7], s1_lo[0], s1_hi[0],
+ cospi_2_64, cospi_30_64, &left[1],
+ &right[1], &left[15], &right[15]);
+
+ // out[9] = step1[6] * cospi_18_64 + step1[1] * cospi_14_64
+ // out[7] = step1[6] * cospi_14_64 - step1[1] * cospi_18_64
+ butterfly_two_coeff_s32_s64_narrow(s1_lo[6], s1_hi[6], s1_lo[1], s1_hi[1],
+ cospi_18_64, cospi_14_64, &left[9],
+ &right[9], &left[7], &right[7]);
+
+ // out[5] = step1[5] * cospi_10_64 + step1[2] * cospi_22_64
+ // out[11] = step1[5] * cospi_22_64 - step1[2] * cospi_10_64
+ butterfly_two_coeff_s32_s64_narrow(s1_lo[5], s1_hi[5], s1_lo[2], s1_hi[2],
+ cospi_10_64, cospi_22_64, &left[5],
+ &right[5], &left[11], &right[11]);
+
+ // out[13] = step1[4] * cospi_26_64 + step1[3] * cospi_6_64
+ // out[3] = step1[4] * cospi_6_64 - step1[3] * cospi_26_64
+ butterfly_two_coeff_s32_s64_narrow(s1_lo[4], s1_hi[4], s1_lo[3], s1_hi[3],
+ cospi_26_64, cospi_6_64, &left[13],
+ &right[13], &left[3], &right[3]);
+
+ left[0] = left8[0];
+ right[0] = right8[0];
+ left[2] = left8[1];
+ right[2] = right8[1];
+ left[4] = left8[2];
+ right[4] = right8[2];
+ left[6] = left8[3];
+ right[6] = right8[3];
+ left[8] = left8[4];
+ right[8] = right8[4];
+ left[10] = left8[5];
+ right[10] = right8[5];
+ left[12] = left8[6];
+ right[12] = right8[6];
+ left[14] = left8[7];
+ right[14] = right8[7];
+}
+
+static void highbd_fadst16_8col(int32x4_t *left, int32x4_t *right) {
+ // perform 16x16 1-D ADST for 8 columns
+ int32x4_t x_lo[16], x_hi[16];
+ int32x4_t s_lo[16], s_hi[16];
+ int32x4_t t_lo[16], t_hi[16];
+ int64x2_t s64_lo[32], s64_hi[32];
+
+ x_lo[0] = left[15];
+ x_hi[0] = right[15];
+ x_lo[1] = left[0];
+ x_hi[1] = right[0];
+ x_lo[2] = left[13];
+ x_hi[2] = right[13];
+ x_lo[3] = left[2];
+ x_hi[3] = right[2];
+ x_lo[4] = left[11];
+ x_hi[4] = right[11];
+ x_lo[5] = left[4];
+ x_hi[5] = right[4];
+ x_lo[6] = left[9];
+ x_hi[6] = right[9];
+ x_lo[7] = left[6];
+ x_hi[7] = right[6];
+ x_lo[8] = left[7];
+ x_hi[8] = right[7];
+ x_lo[9] = left[8];
+ x_hi[9] = right[8];
+ x_lo[10] = left[5];
+ x_hi[10] = right[5];
+ x_lo[11] = left[10];
+ x_hi[11] = right[10];
+ x_lo[12] = left[3];
+ x_hi[12] = right[3];
+ x_lo[13] = left[12];
+ x_hi[13] = right[12];
+ x_lo[14] = left[1];
+ x_hi[14] = right[1];
+ x_lo[15] = left[14];
+ x_hi[15] = right[14];
+
+ // stage 1, indices are doubled
+ // s0 = cospi_1_64 * x0 + cospi_31_64 * x1;
+ // s1 = cospi_31_64 * x0 - cospi_1_64 * x1;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[0], x_hi[0], x_lo[1], x_hi[1], cospi_1_64, cospi_31_64,
+ &s64_lo[2 * 0], &s64_hi[2 * 0], &s64_lo[2 * 1], &s64_hi[2 * 1]);
+ // s2 = cospi_5_64 * x2 + cospi_27_64 * x3;
+ // s3 = cospi_27_64 * x2 - cospi_5_64 * x3;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[2], x_hi[2], x_lo[3], x_hi[3], cospi_5_64, cospi_27_64,
+ &s64_lo[2 * 2], &s64_hi[2 * 2], &s64_lo[2 * 3], &s64_hi[2 * 3]);
+ // s4 = cospi_9_64 * x4 + cospi_23_64 * x5;
+ // s5 = cospi_23_64 * x4 - cospi_9_64 * x5;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[4], x_hi[4], x_lo[5], x_hi[5], cospi_9_64, cospi_23_64,
+ &s64_lo[2 * 4], &s64_hi[2 * 4], &s64_lo[2 * 5], &s64_hi[2 * 5]);
+ // s6 = cospi_13_64 * x6 + cospi_19_64 * x7;
+ // s7 = cospi_19_64 * x6 - cospi_13_64 * x7;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[6], x_hi[6], x_lo[7], x_hi[7], cospi_13_64, cospi_19_64,
+ &s64_lo[2 * 6], &s64_hi[2 * 6], &s64_lo[2 * 7], &s64_hi[2 * 7]);
+ // s8 = cospi_17_64 * x8 + cospi_15_64 * x9;
+ // s9 = cospi_15_64 * x8 - cospi_17_64 * x9;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[8], x_hi[8], x_lo[9], x_hi[9], cospi_17_64, cospi_15_64,
+ &s64_lo[2 * 8], &s64_hi[2 * 8], &s64_lo[2 * 9], &s64_hi[2 * 9]);
+ // s10 = cospi_21_64 * x10 + cospi_11_64 * x11;
+ // s11 = cospi_11_64 * x10 - cospi_21_64 * x11;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[10], x_hi[10], x_lo[11], x_hi[11], cospi_21_64, cospi_11_64,
+ &s64_lo[2 * 10], &s64_hi[2 * 10], &s64_lo[2 * 11], &s64_hi[2 * 11]);
+ // s12 = cospi_25_64 * x12 + cospi_7_64 * x13;
+ // s13 = cospi_7_64 * x12 - cospi_25_64 * x13;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[12], x_hi[12], x_lo[13], x_hi[13], cospi_25_64, cospi_7_64,
+ &s64_lo[2 * 12], &s64_hi[2 * 12], &s64_lo[2 * 13], &s64_hi[2 * 13]);
+ // s14 = cospi_29_64 * x14 + cospi_3_64 * x15;
+ // s15 = cospi_3_64 * x14 - cospi_29_64 * x15;
+ butterfly_two_coeff_s32_s64_noround(
+ x_lo[14], x_hi[14], x_lo[15], x_hi[15], cospi_29_64, cospi_3_64,
+ &s64_lo[2 * 14], &s64_hi[2 * 14], &s64_lo[2 * 15], &s64_hi[2 * 15]);
+
+ // fdct_round_shift, indices are doubled
+ t_lo[0] = add_s64_round_narrow(&s64_lo[2 * 0], &s64_lo[2 * 8]);
+ t_hi[0] = add_s64_round_narrow(&s64_hi[2 * 0], &s64_hi[2 * 8]);
+ t_lo[1] = add_s64_round_narrow(&s64_lo[2 * 1], &s64_lo[2 * 9]);
+ t_hi[1] = add_s64_round_narrow(&s64_hi[2 * 1], &s64_hi[2 * 9]);
+ t_lo[2] = add_s64_round_narrow(&s64_lo[2 * 2], &s64_lo[2 * 10]);
+ t_hi[2] = add_s64_round_narrow(&s64_hi[2 * 2], &s64_hi[2 * 10]);
+ t_lo[3] = add_s64_round_narrow(&s64_lo[2 * 3], &s64_lo[2 * 11]);
+ t_hi[3] = add_s64_round_narrow(&s64_hi[2 * 3], &s64_hi[2 * 11]);
+ t_lo[4] = add_s64_round_narrow(&s64_lo[2 * 4], &s64_lo[2 * 12]);
+ t_hi[4] = add_s64_round_narrow(&s64_hi[2 * 4], &s64_hi[2 * 12]);
+ t_lo[5] = add_s64_round_narrow(&s64_lo[2 * 5], &s64_lo[2 * 13]);
+ t_hi[5] = add_s64_round_narrow(&s64_hi[2 * 5], &s64_hi[2 * 13]);
+ t_lo[6] = add_s64_round_narrow(&s64_lo[2 * 6], &s64_lo[2 * 14]);
+ t_hi[6] = add_s64_round_narrow(&s64_hi[2 * 6], &s64_hi[2 * 14]);
+ t_lo[7] = add_s64_round_narrow(&s64_lo[2 * 7], &s64_lo[2 * 15]);
+ t_hi[7] = add_s64_round_narrow(&s64_hi[2 * 7], &s64_hi[2 * 15]);
+ t_lo[8] = sub_s64_round_narrow(&s64_lo[2 * 0], &s64_lo[2 * 8]);
+ t_hi[8] = sub_s64_round_narrow(&s64_hi[2 * 0], &s64_hi[2 * 8]);
+ t_lo[9] = sub_s64_round_narrow(&s64_lo[2 * 1], &s64_lo[2 * 9]);
+ t_hi[9] = sub_s64_round_narrow(&s64_hi[2 * 1], &s64_hi[2 * 9]);
+ t_lo[10] = sub_s64_round_narrow(&s64_lo[2 * 2], &s64_lo[2 * 10]);
+ t_hi[10] = sub_s64_round_narrow(&s64_hi[2 * 2], &s64_hi[2 * 10]);
+ t_lo[11] = sub_s64_round_narrow(&s64_lo[2 * 3], &s64_lo[2 * 11]);
+ t_hi[11] = sub_s64_round_narrow(&s64_hi[2 * 3], &s64_hi[2 * 11]);
+ t_lo[12] = sub_s64_round_narrow(&s64_lo[2 * 4], &s64_lo[2 * 12]);
+ t_hi[12] = sub_s64_round_narrow(&s64_hi[2 * 4], &s64_hi[2 * 12]);
+ t_lo[13] = sub_s64_round_narrow(&s64_lo[2 * 5], &s64_lo[2 * 13]);
+ t_hi[13] = sub_s64_round_narrow(&s64_hi[2 * 5], &s64_hi[2 * 13]);
+ t_lo[14] = sub_s64_round_narrow(&s64_lo[2 * 6], &s64_lo[2 * 14]);
+ t_hi[14] = sub_s64_round_narrow(&s64_hi[2 * 6], &s64_hi[2 * 14]);
+ t_lo[15] = sub_s64_round_narrow(&s64_lo[2 * 7], &s64_lo[2 * 15]);
+ t_hi[15] = sub_s64_round_narrow(&s64_hi[2 * 7], &s64_hi[2 * 15]);
+
+ // stage 2
+ s_lo[0] = t_lo[0];
+ s_hi[0] = t_hi[0];
+ s_lo[1] = t_lo[1];
+ s_hi[1] = t_hi[1];
+ s_lo[2] = t_lo[2];
+ s_hi[2] = t_hi[2];
+ s_lo[3] = t_lo[3];
+ s_hi[3] = t_hi[3];
+ s_lo[4] = t_lo[4];
+ s_hi[4] = t_hi[4];
+ s_lo[5] = t_lo[5];
+ s_hi[5] = t_hi[5];
+ s_lo[6] = t_lo[6];
+ s_hi[6] = t_hi[6];
+ s_lo[7] = t_lo[7];
+ s_hi[7] = t_hi[7];
+ // s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ // s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[8], t_hi[8], t_lo[9], t_hi[9], cospi_4_64, cospi_28_64,
+ &s64_lo[2 * 8], &s64_hi[2 * 8], &s64_lo[2 * 9], &s64_hi[2 * 9]);
+ // s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ // s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[10], t_hi[10], t_lo[11], t_hi[11], cospi_20_64, cospi_12_64,
+ &s64_lo[2 * 10], &s64_hi[2 * 10], &s64_lo[2 * 11], &s64_hi[2 * 11]);
+ // s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+ // s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[13], t_hi[13], t_lo[12], t_hi[12], cospi_28_64, cospi_4_64,
+ &s64_lo[2 * 13], &s64_hi[2 * 13], &s64_lo[2 * 12], &s64_hi[2 * 12]);
+ // s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+ // s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[15], t_hi[15], t_lo[14], t_hi[14], cospi_12_64, cospi_20_64,
+ &s64_lo[2 * 15], &s64_hi[2 * 15], &s64_lo[2 * 14], &s64_hi[2 * 14]);
+
+ // s0 + s4
+ t_lo[0] = add_s32_s64_narrow(s_lo[0], s_lo[4]);
+ t_hi[0] = add_s32_s64_narrow(s_hi[0], s_hi[4]);
+ // s1 + s5
+ t_lo[1] = add_s32_s64_narrow(s_lo[1], s_lo[5]);
+ t_hi[1] = add_s32_s64_narrow(s_hi[1], s_hi[5]);
+ // s2 + s6
+ t_lo[2] = add_s32_s64_narrow(s_lo[2], s_lo[6]);
+ t_hi[2] = add_s32_s64_narrow(s_hi[2], s_hi[6]);
+ // s3 + s7
+ t_lo[3] = add_s32_s64_narrow(s_lo[3], s_lo[7]);
+ t_hi[3] = add_s32_s64_narrow(s_hi[3], s_hi[7]);
+
+ // s0 - s4
+ t_lo[4] = sub_s32_s64_narrow(s_lo[0], s_lo[4]);
+ t_hi[4] = sub_s32_s64_narrow(s_hi[0], s_hi[4]);
+ // s1 - s5
+ t_lo[5] = sub_s32_s64_narrow(s_lo[1], s_lo[5]);
+ t_hi[5] = sub_s32_s64_narrow(s_hi[1], s_hi[5]);
+ // s2 - s6
+ t_lo[6] = sub_s32_s64_narrow(s_lo[2], s_lo[6]);
+ t_hi[6] = sub_s32_s64_narrow(s_hi[2], s_hi[6]);
+ // s3 - s7
+ t_lo[7] = sub_s32_s64_narrow(s_lo[3], s_lo[7]);
+ t_hi[7] = sub_s32_s64_narrow(s_hi[3], s_hi[7]);
+
+ // fdct_round_shift()
+ // s8 + s12
+ t_lo[8] = add_s64_round_narrow(&s64_lo[2 * 8], &s64_lo[2 * 12]);
+ t_hi[8] = add_s64_round_narrow(&s64_hi[2 * 8], &s64_hi[2 * 12]);
+ // s9 + s13
+ t_lo[9] = add_s64_round_narrow(&s64_lo[2 * 9], &s64_lo[2 * 13]);
+ t_hi[9] = add_s64_round_narrow(&s64_hi[2 * 9], &s64_hi[2 * 13]);
+ // s10 + s14
+ t_lo[10] = add_s64_round_narrow(&s64_lo[2 * 10], &s64_lo[2 * 14]);
+ t_hi[10] = add_s64_round_narrow(&s64_hi[2 * 10], &s64_hi[2 * 14]);
+ // s11 + s15
+ t_lo[11] = add_s64_round_narrow(&s64_lo[2 * 11], &s64_lo[2 * 15]);
+ t_hi[11] = add_s64_round_narrow(&s64_hi[2 * 11], &s64_hi[2 * 15]);
+
+ // s8 - s12
+ t_lo[12] = sub_s64_round_narrow(&s64_lo[2 * 8], &s64_lo[2 * 12]);
+ t_hi[12] = sub_s64_round_narrow(&s64_hi[2 * 8], &s64_hi[2 * 12]);
+ // s9 - s13
+ t_lo[13] = sub_s64_round_narrow(&s64_lo[2 * 9], &s64_lo[2 * 13]);
+ t_hi[13] = sub_s64_round_narrow(&s64_hi[2 * 9], &s64_hi[2 * 13]);
+ // s10 - s14
+ t_lo[14] = sub_s64_round_narrow(&s64_lo[2 * 10], &s64_lo[2 * 14]);
+ t_hi[14] = sub_s64_round_narrow(&s64_hi[2 * 10], &s64_hi[2 * 14]);
+ // s11 - s15
+ t_lo[15] = sub_s64_round_narrow(&s64_lo[2 * 11], &s64_lo[2 * 15]);
+ t_hi[15] = sub_s64_round_narrow(&s64_hi[2 * 11], &s64_hi[2 * 15]);
+
+ // stage 3
+ s_lo[0] = t_lo[0];
+ s_hi[0] = t_hi[0];
+ s_lo[1] = t_lo[1];
+ s_hi[1] = t_hi[1];
+ s_lo[2] = t_lo[2];
+ s_hi[2] = t_hi[2];
+ s_lo[3] = t_lo[3];
+ s_hi[3] = t_hi[3];
+ // s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ // s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[4], t_hi[4], t_lo[5], t_hi[5], cospi_8_64, cospi_24_64,
+ &s64_lo[2 * 4], &s64_hi[2 * 4], &s64_lo[2 * 5], &s64_hi[2 * 5]);
+ // s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+ // s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[7], t_hi[7], t_lo[6], t_hi[6], cospi_24_64, cospi_8_64,
+ &s64_lo[2 * 7], &s64_hi[2 * 7], &s64_lo[2 * 6], &s64_hi[2 * 6]);
+ s_lo[8] = t_lo[8];
+ s_hi[8] = t_hi[8];
+ s_lo[9] = t_lo[9];
+ s_hi[9] = t_hi[9];
+ s_lo[10] = t_lo[10];
+ s_hi[10] = t_hi[10];
+ s_lo[11] = t_lo[11];
+ s_hi[11] = t_hi[11];
+ // s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ // s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[12], t_hi[12], t_lo[13], t_hi[13], cospi_8_64, cospi_24_64,
+ &s64_lo[2 * 12], &s64_hi[2 * 12], &s64_lo[2 * 13], &s64_hi[2 * 13]);
+ // s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+ // s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+ butterfly_two_coeff_s32_s64_noround(
+ t_lo[15], t_hi[15], t_lo[14], t_hi[14], cospi_24_64, cospi_8_64,
+ &s64_lo[2 * 15], &s64_hi[2 * 15], &s64_lo[2 * 14], &s64_hi[2 * 14]);
+
+ // s0 + s2
+ t_lo[0] = add_s32_s64_narrow(s_lo[0], s_lo[2]);
+ t_hi[0] = add_s32_s64_narrow(s_hi[0], s_hi[2]);
+ // s1 + s3
+ t_lo[1] = add_s32_s64_narrow(s_lo[1], s_lo[3]);
+ t_hi[1] = add_s32_s64_narrow(s_hi[1], s_hi[3]);
+ // s0 - s2
+ t_lo[2] = sub_s32_s64_narrow(s_lo[0], s_lo[2]);
+ t_hi[2] = sub_s32_s64_narrow(s_hi[0], s_hi[2]);
+ // s1 - s3
+ t_lo[3] = sub_s32_s64_narrow(s_lo[1], s_lo[3]);
+ t_hi[3] = sub_s32_s64_narrow(s_hi[1], s_hi[3]);
+ // fdct_round_shift()
+ // s4 + s6
+ t_lo[4] = add_s64_round_narrow(&s64_lo[2 * 4], &s64_lo[2 * 6]);
+ t_hi[4] = add_s64_round_narrow(&s64_hi[2 * 4], &s64_hi[2 * 6]);
+ // s5 + s7
+ t_lo[5] = add_s64_round_narrow(&s64_lo[2 * 5], &s64_lo[2 * 7]);
+ t_hi[5] = add_s64_round_narrow(&s64_hi[2 * 5], &s64_hi[2 * 7]);
+ // s4 - s6
+ t_lo[6] = sub_s64_round_narrow(&s64_lo[2 * 4], &s64_lo[2 * 6]);
+ t_hi[6] = sub_s64_round_narrow(&s64_hi[2 * 4], &s64_hi[2 * 6]);
+ // s5 - s7
+ t_lo[7] = sub_s64_round_narrow(&s64_lo[2 * 5], &s64_lo[2 * 7]);
+ t_hi[7] = sub_s64_round_narrow(&s64_hi[2 * 5], &s64_hi[2 * 7]);
+ // s8 + s10
+ t_lo[8] = add_s32_s64_narrow(s_lo[8], s_lo[10]);
+ t_hi[8] = add_s32_s64_narrow(s_hi[8], s_hi[10]);
+ // s9 + s11
+ t_lo[9] = add_s32_s64_narrow(s_lo[9], s_lo[11]);
+ t_hi[9] = add_s32_s64_narrow(s_hi[9], s_hi[11]);
+ // s8 - s10
+ t_lo[10] = sub_s32_s64_narrow(s_lo[8], s_lo[10]);
+ t_hi[10] = sub_s32_s64_narrow(s_hi[8], s_hi[10]);
+ // s9 - s11
+ t_lo[11] = sub_s32_s64_narrow(s_lo[9], s_lo[11]);
+ t_hi[11] = sub_s32_s64_narrow(s_hi[9], s_hi[11]);
+ // fdct_round_shift()
+ // s12 + s14
+ t_lo[12] = add_s64_round_narrow(&s64_lo[2 * 12], &s64_lo[2 * 14]);
+ t_hi[12] = add_s64_round_narrow(&s64_hi[2 * 12], &s64_hi[2 * 14]);
+ // s13 + s15
+ t_lo[13] = add_s64_round_narrow(&s64_lo[2 * 13], &s64_lo[2 * 15]);
+ t_hi[13] = add_s64_round_narrow(&s64_hi[2 * 13], &s64_hi[2 * 15]);
+ // s12 - s14
+ t_lo[14] = sub_s64_round_narrow(&s64_lo[2 * 12], &s64_lo[2 * 14]);
+ t_hi[14] = sub_s64_round_narrow(&s64_hi[2 * 12], &s64_hi[2 * 14]);
+ // s13 - s15
+ t_lo[15] = sub_s64_round_narrow(&s64_lo[2 * 13], &s64_lo[2 * 15]);
+ t_hi[15] = sub_s64_round_narrow(&s64_hi[2 * 13], &s64_hi[2 * 15]);
+
+ // stage 4, with fdct_round_shift
+ // s2 = (-cospi_16_64) * (x2 + x3);
+ // s3 = cospi_16_64 * (x2 - x3);
+ butterfly_one_coeff_s32_s64_narrow(t_lo[3], t_hi[3], t_lo[2], t_hi[2],
+ -cospi_16_64, &x_lo[2], &x_hi[2], &x_lo[3],
+ &x_hi[3]);
+ // s6 = cospi_16_64 * (x6 + x7);
+ // s7 = cospi_16_64 * (-x6 + x7);
+ butterfly_one_coeff_s32_s64_narrow(t_lo[7], t_hi[7], t_lo[6], t_hi[6],
+ cospi_16_64, &x_lo[6], &x_hi[6], &x_lo[7],
+ &x_hi[7]);
+ // s10 = cospi_16_64 * (x10 + x11);
+ // s11 = cospi_16_64 * (-x10 + x11);
+ butterfly_one_coeff_s32_s64_narrow(t_lo[11], t_hi[11], t_lo[10], t_hi[10],
+ cospi_16_64, &x_lo[10], &x_hi[10],
+ &x_lo[11], &x_hi[11]);
+ // s14 = (-cospi_16_64) * (x14 + x15);
+ // s15 = cospi_16_64 * (x14 - x15);
+ butterfly_one_coeff_s32_s64_narrow(t_lo[15], t_hi[15], t_lo[14], t_hi[14],
+ -cospi_16_64, &x_lo[14], &x_hi[14],
+ &x_lo[15], &x_hi[15]);
+
+ // Just copy x0, x1, x4, x5, x8, x9, x12, x13
+ x_lo[0] = t_lo[0];
+ x_hi[0] = t_hi[0];
+ x_lo[1] = t_lo[1];
+ x_hi[1] = t_hi[1];
+ x_lo[4] = t_lo[4];
+ x_hi[4] = t_hi[4];
+ x_lo[5] = t_lo[5];
+ x_hi[5] = t_hi[5];
+ x_lo[8] = t_lo[8];
+ x_hi[8] = t_hi[8];
+ x_lo[9] = t_lo[9];
+ x_hi[9] = t_hi[9];
+ x_lo[12] = t_lo[12];
+ x_hi[12] = t_hi[12];
+ x_lo[13] = t_lo[13];
+ x_hi[13] = t_hi[13];
+
+ left[0] = x_lo[0];
+ right[0] = x_hi[0];
+ left[1] = vnegq_s32(x_lo[8]);
+ right[1] = vnegq_s32(x_hi[8]);
+ left[2] = x_lo[12];
+ right[2] = x_hi[12];
+ left[3] = vnegq_s32(x_lo[4]);
+ right[3] = vnegq_s32(x_hi[4]);
+ left[4] = x_lo[6];
+ right[4] = x_hi[6];
+ left[5] = x_lo[14];
+ right[5] = x_hi[14];
+ left[6] = x_lo[10];
+ right[6] = x_hi[10];
+ left[7] = x_lo[2];
+ right[7] = x_hi[2];
+ left[8] = x_lo[3];
+ right[8] = x_hi[3];
+ left[9] = x_lo[11];
+ right[9] = x_hi[11];
+ left[10] = x_lo[15];
+ right[10] = x_hi[15];
+ left[11] = x_lo[7];
+ right[11] = x_hi[7];
+ left[12] = x_lo[5];
+ right[12] = x_hi[5];
+ left[13] = vnegq_s32(x_lo[13]);
+ right[13] = vnegq_s32(x_hi[13]);
+ left[14] = x_lo[9];
+ right[14] = x_hi[9];
+ left[15] = vnegq_s32(x_lo[1]);
+ right[15] = vnegq_s32(x_hi[1]);
+}
+
+static void highbd_fdct16x16_neon(int32x4_t *left1, int32x4_t *right1,
+ int32x4_t *left2, int32x4_t *right2) {
+ // Left half.
+ highbd_fdct16_8col(left1, right1);
+ // Right half.
+ highbd_fdct16_8col(left2, right2);
+ transpose_s32_16x16(left1, right1, left2, right2);
+}
+
+static void highbd_fadst16x16_neon(int32x4_t *left1, int32x4_t *right1,
+ int32x4_t *left2, int32x4_t *right2) {
+ // Left half.
+ highbd_fadst16_8col(left1, right1);
+ // Right half.
+ highbd_fadst16_8col(left2, right2);
+ transpose_s32_16x16(left1, right1, left2, right2);
+}
+
+void vp9_highbd_fht16x16_neon(const int16_t *input, tran_low_t *output,
+ int stride, int tx_type) {
+ int32x4_t left1[16], right1[16], left2[16], right2[16];
+
+ switch (tx_type) {
+ case DCT_DCT: vpx_highbd_fdct16x16_neon(input, output, stride); break;
+ case ADST_DCT:
+ highbd_load_buffer_16x16(input, left1, right1, left2, right2, stride);
+ highbd_fadst16x16_neon(left1, right1, left2, right2);
+ highbd_write_buffer_16x16(output, left1, right1, left2, right2, 16);
+ highbd_right_shift_16x16(left1, right1, left2, right2, 2);
+ highbd_fdct16x16_neon(left1, right1, left2, right2);
+ highbd_write_buffer_16x16(output, left1, right1, left2, right2, 16);
+ break;
+ case DCT_ADST:
+ highbd_load_buffer_16x16(input, left1, right1, left2, right2, stride);
+ highbd_fdct16x16_neon(left1, right1, left2, right2);
+ highbd_right_shift_16x16(left1, right1, left2, right2, 2);
+ highbd_fadst16x16_neon(left1, right1, left2, right2);
+ highbd_write_buffer_16x16(output, left1, right1, left2, right2, 16);
+ break;
+ default:
+ assert(tx_type == ADST_ADST);
+ highbd_load_buffer_16x16(input, left1, right1, left2, right2, stride);
+ highbd_fadst16x16_neon(left1, right1, left2, right2);
+ highbd_right_shift_16x16(left1, right1, left2, right2, 2);
+ highbd_fadst16x16_neon(left1, right1, left2, right2);
+ highbd_write_buffer_16x16(output, left1, right1, left2, right2, 16);
+ break;
+ }
+}
+
+#endif // CONFIG_VP9_HIGHBITDEPTH
diff --git a/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_denoiser_neon.c b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_denoiser_neon.c
new file mode 100644
index 0000000000..d631cd437d
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_denoiser_neon.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/encoder/vp9_context_tree.h"
+#include "vp9/encoder/vp9_denoiser.h"
+#include "vpx_mem/vpx_mem.h"
+
+// Compute the sum of all pixel differences of this MB.
+static INLINE int horizontal_add_s8x16(const int8x16_t v_sum_diff_total) {
+#if VPX_ARCH_AARCH64
+ return vaddlvq_s8(v_sum_diff_total);
+#else
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff_total);
+ const int32x4_t fedc_ba98_7654_3210 = vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+ const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
+ const int64x1_t x = vqadd_s64(vget_high_s64(fedcba98_76543210),
+ vget_low_s64(fedcba98_76543210));
+ const int sum_diff = vget_lane_s32(vreinterpret_s32_s64(x), 0);
+ return sum_diff;
+#endif
+}
+
+// Denoise a 16x1 vector.
+static INLINE int8x16_t denoiser_16x1_neon(
+ const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
+ const uint8x16_t v_level1_threshold, const uint8x16_t v_level2_threshold,
+ const uint8x16_t v_level3_threshold, const uint8x16_t v_level1_adjustment,
+ const uint8x16_t v_delta_level_1_and_2,
+ const uint8x16_t v_delta_level_2_and_3, int8x16_t v_sum_diff_total) {
+ const uint8x16_t v_sig = vld1q_u8(sig);
+ const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
+
+ /* Calculate absolute difference and sign masks. */
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
+
+ /* Figure out which level that put us in. */
+ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
+ const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
+ const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
+
+ /* Calculate absolute adjustments for level 1, 2 and 3. */
+ const uint8x16_t v_level2_adjustment =
+ vandq_u8(v_level2_mask, v_delta_level_1_and_2);
+ const uint8x16_t v_level3_adjustment =
+ vandq_u8(v_level3_mask, v_delta_level_2_and_3);
+ const uint8x16_t v_level1and2_adjustment =
+ vaddq_u8(v_level1_adjustment, v_level2_adjustment);
+ const uint8x16_t v_level1and2and3_adjustment =
+ vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
+
+ /* Figure adjustment absolute value by selecting between the absolute
+ * difference if in level0 or the value for level 1, 2 and 3.
+ */
+ const uint8x16_t v_abs_adjustment =
+ vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
+
+ /* Calculate positive and negative adjustments. Apply them to the signal
+ * and accumulate them. Adjustments are less than eight and the maximum
+ * sum of them (7 * 16) can fit in a signed char.
+ */
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
+ v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
+
+ /* Store results. */
+ vst1q_u8(running_avg_y, v_running_avg_y);
+
+ /* Sum all the accumulators to have the sum of all pixel differences
+ * for this macroblock.
+ */
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
+ vreinterpretq_s8_u8(v_neg_adjustment));
+ v_sum_diff_total = vaddq_s8(v_sum_diff_total, v_sum_diff);
+ }
+ return v_sum_diff_total;
+}
+
+static INLINE int8x16_t denoiser_adjust_16x1_neon(
+ const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
+ const uint8x16_t k_delta, int8x16_t v_sum_diff_total) {
+ uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y);
+ const uint8x16_t v_sig = vld1q_u8(sig);
+ const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
+
+ /* Calculate absolute difference and sign masks. */
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
+ // Clamp absolute difference to delta to get the adjustment.
+ const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
+
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment);
+ v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment);
+
+ /* Store results. */
+ vst1q_u8(running_avg_y, v_running_avg_y);
+
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
+ vreinterpretq_s8_u8(v_pos_adjustment));
+ v_sum_diff_total = vaddq_s8(v_sum_diff_total, v_sum_diff);
+ }
+ return v_sum_diff_total;
+}
+
+// Denoise 8x8 and 8x16 blocks.
+static int vp9_denoiser_8xN_neon(const uint8_t *sig, int sig_stride,
+ const uint8_t *mc_running_avg_y,
+ int mc_avg_y_stride, uint8_t *running_avg_y,
+ int avg_y_stride, int increase_denoising,
+ BLOCK_SIZE bs, int motion_magnitude,
+ int width) {
+ int sum_diff_thresh, r, sum_diff = 0;
+ const int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
+ ? 1
+ : 0;
+ uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16];
+
+ const uint8x16_t v_level1_adjustment = vmovq_n_u8(
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3);
+ const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
+ const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
+ const uint8x16_t v_level1_threshold = vdupq_n_u8(4 + shift_inc);
+ const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
+ const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
+
+ const int b_height = (4 << b_height_log2_lookup[bs]) >> 1;
+
+ int8x16_t v_sum_diff_total = vdupq_n_s8(0);
+
+ for (r = 0; r < b_height; ++r) {
+ memcpy(sig_buffer[r], sig, width);
+ memcpy(sig_buffer[r] + width, sig + sig_stride, width);
+ memcpy(mc_running_buffer[r], mc_running_avg_y, width);
+ memcpy(mc_running_buffer[r] + width, mc_running_avg_y + mc_avg_y_stride,
+ width);
+ memcpy(running_buffer[r], running_avg_y, width);
+ memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width);
+ v_sum_diff_total = denoiser_16x1_neon(
+ sig_buffer[r], mc_running_buffer[r], running_buffer[r],
+ v_level1_threshold, v_level2_threshold, v_level3_threshold,
+ v_level1_adjustment, v_delta_level_1_and_2, v_delta_level_2_and_3,
+ v_sum_diff_total);
+ {
+ const uint8x16_t v_running_buffer = vld1q_u8(running_buffer[r]);
+ const uint8x8_t v_running_buffer_high = vget_high_u8(v_running_buffer);
+ const uint8x8_t v_running_buffer_low = vget_low_u8(v_running_buffer);
+ vst1_u8(running_avg_y, v_running_buffer_low);
+ vst1_u8(running_avg_y + avg_y_stride, v_running_buffer_high);
+ }
+ // Update pointers for next iteration.
+ sig += (sig_stride << 1);
+ mc_running_avg_y += (mc_avg_y_stride << 1);
+ running_avg_y += (avg_y_stride << 1);
+ }
+
+ {
+ sum_diff = horizontal_add_s8x16(v_sum_diff_total);
+ sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
+ if (abs(sum_diff) > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // check if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the acceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ const int delta =
+ ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const uint8x16_t k_delta = vmovq_n_u8(delta);
+ running_avg_y -= avg_y_stride * (b_height << 1);
+ for (r = 0; r < b_height; ++r) {
+ v_sum_diff_total = denoiser_adjust_16x1_neon(
+ sig_buffer[r], mc_running_buffer[r], running_buffer[r], k_delta,
+ v_sum_diff_total);
+ {
+ const uint8x16_t v_running_buffer = vld1q_u8(running_buffer[r]);
+ const uint8x8_t v_running_buffer_high =
+ vget_high_u8(v_running_buffer);
+ const uint8x8_t v_running_buffer_low =
+ vget_low_u8(v_running_buffer);
+ vst1_u8(running_avg_y, v_running_buffer_low);
+ vst1_u8(running_avg_y + avg_y_stride, v_running_buffer_high);
+ }
+ // Update pointers for next iteration.
+ running_avg_y += (avg_y_stride << 1);
+ }
+ sum_diff = horizontal_add_s8x16(v_sum_diff_total);
+ if (abs(sum_diff) > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+
+ return FILTER_BLOCK;
+}
+
+// Denoise 16x16, 16x32, 32x16, 32x32, 32x64, 64x32 and 64x64 blocks.
+static int vp9_denoiser_NxM_neon(const uint8_t *sig, int sig_stride,
+ const uint8_t *mc_running_avg_y,
+ int mc_avg_y_stride, uint8_t *running_avg_y,
+ int avg_y_stride, int increase_denoising,
+ BLOCK_SIZE bs, int motion_magnitude) {
+ const int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
+ ? 1
+ : 0;
+ const uint8x16_t v_level1_adjustment = vmovq_n_u8(
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3);
+ const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
+ const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
+ const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
+ const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
+ const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
+
+ const int b_width = (4 << b_width_log2_lookup[bs]);
+ const int b_height = (4 << b_height_log2_lookup[bs]);
+ const int b_width_shift4 = b_width >> 4;
+
+ int8x16_t v_sum_diff_total[4][4];
+ int r, c, sum_diff = 0;
+
+ for (r = 0; r < 4; ++r) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ v_sum_diff_total[c][r] = vdupq_n_s8(0);
+ }
+ }
+
+ for (r = 0; r < b_height; ++r) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ v_sum_diff_total[c][r >> 4] = denoiser_16x1_neon(
+ sig, mc_running_avg_y, running_avg_y, v_level1_threshold,
+ v_level2_threshold, v_level3_threshold, v_level1_adjustment,
+ v_delta_level_1_and_2, v_delta_level_2_and_3,
+ v_sum_diff_total[c][r >> 4]);
+
+ // Update pointers for next iteration.
+ sig += 16;
+ mc_running_avg_y += 16;
+ running_avg_y += 16;
+ }
+
+ if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ sum_diff += horizontal_add_s8x16(v_sum_diff_total[c][r >> 4]);
+ }
+ }
+
+ // Update pointers for next iteration.
+ sig = sig - b_width + sig_stride;
+ mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride;
+ running_avg_y = running_avg_y - b_width + avg_y_stride;
+ }
+
+ {
+ const int sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
+ if (abs(sum_diff) > sum_diff_thresh) {
+ const int delta =
+ ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const uint8x16_t k_delta = vdupq_n_u8(delta);
+ sig -= sig_stride * b_height;
+ mc_running_avg_y -= mc_avg_y_stride * b_height;
+ running_avg_y -= avg_y_stride * b_height;
+ sum_diff = 0;
+
+ for (r = 0; r < b_height; ++r) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ v_sum_diff_total[c][r >> 4] =
+ denoiser_adjust_16x1_neon(sig, mc_running_avg_y, running_avg_y,
+ k_delta, v_sum_diff_total[c][r >> 4]);
+
+ // Update pointers for next iteration.
+ sig += 16;
+ mc_running_avg_y += 16;
+ running_avg_y += 16;
+ }
+ if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ sum_diff += horizontal_add_s8x16(v_sum_diff_total[c][r >> 4]);
+ }
+ }
+
+ sig = sig - b_width + sig_stride;
+ mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride;
+ running_avg_y = running_avg_y - b_width + avg_y_stride;
+ }
+
+ if (abs(sum_diff) > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+ return FILTER_BLOCK;
+}
+
+int vp9_denoiser_filter_neon(const uint8_t *sig, int sig_stride,
+ const uint8_t *mc_avg, int mc_avg_stride,
+ uint8_t *avg, int avg_stride,
+ int increase_denoising, BLOCK_SIZE bs,
+ int motion_magnitude) {
+ // Rank by frequency of the block type to have an early termination.
+ if (bs == BLOCK_16X16 || bs == BLOCK_32X32 || bs == BLOCK_64X64 ||
+ bs == BLOCK_16X32 || bs == BLOCK_16X8 || bs == BLOCK_32X16 ||
+ bs == BLOCK_32X64 || bs == BLOCK_64X32) {
+ return vp9_denoiser_NxM_neon(sig, sig_stride, mc_avg, mc_avg_stride, avg,
+ avg_stride, increase_denoising, bs,
+ motion_magnitude);
+ } else if (bs == BLOCK_8X8 || bs == BLOCK_8X16) {
+ return vp9_denoiser_8xN_neon(sig, sig_stride, mc_avg, mc_avg_stride, avg,
+ avg_stride, increase_denoising, bs,
+ motion_magnitude, 8);
+ }
+ return COPY_BLOCK;
+}
diff --git a/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_diamond_search_sad_neon.c b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_diamond_search_sad_neon.c
new file mode 100644
index 0000000000..b82b3f9db5
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_diamond_search_sad_neon.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vp9/encoder/vp9_encoder.h"
+#include "vpx_ports/mem.h"
+
+#ifdef __GNUC__
+#define LIKELY(v) __builtin_expect(v, 1)
+#define UNLIKELY(v) __builtin_expect(v, 0)
+#else
+#define LIKELY(v) (v)
+#define UNLIKELY(v) (v)
+#endif
+
+static INLINE int_mv pack_int_mv(int16_t row, int16_t col) {
+ int_mv result;
+ result.as_mv.row = row;
+ result.as_mv.col = col;
+ return result;
+}
+
+/*****************************************************************************
+ * This function utilizes 3 properties of the cost function lookup tables, *
+ * constructed in using 'cal_nmvjointsadcost' and 'cal_nmvsadcosts' in *
+ * vp9_encoder.c. *
+ * For the joint cost: *
+ * - mvjointsadcost[1] == mvjointsadcost[2] == mvjointsadcost[3] *
+ * For the component costs: *
+ * - For all i: mvsadcost[0][i] == mvsadcost[1][i] *
+ * (Equal costs for both components) *
+ * - For all i: mvsadcost[0][i] == mvsadcost[0][-i] *
+ * (Cost function is even) *
+ * If these do not hold, then this function cannot be used without *
+ * modification, in which case you can revert to using the C implementation, *
+ * which does not rely on these properties. *
+ *****************************************************************************/
+int vp9_diamond_search_sad_neon(const MACROBLOCK *x,
+ const search_site_config *cfg, MV *ref_mv,
+ uint32_t start_mv_sad, MV *best_mv,
+ int search_param, int sad_per_bit, int *num00,
+ const vp9_sad_fn_ptr_t *sad_fn_ptr,
+ const MV *center_mv) {
+ static const uint32_t data[4] = { 0, 1, 2, 3 };
+ const uint32x4_t v_idx_d = vld1q_u32((const uint32_t *)data);
+
+ const int32x4_t zero_s32 = vdupq_n_s32(0);
+ const int_mv maxmv = pack_int_mv(x->mv_limits.row_max, x->mv_limits.col_max);
+ const int16x8_t v_max_mv_w = vreinterpretq_s16_s32(vdupq_n_s32(maxmv.as_int));
+ const int_mv minmv = pack_int_mv(x->mv_limits.row_min, x->mv_limits.col_min);
+ const int16x8_t v_min_mv_w = vreinterpretq_s16_s32(vdupq_n_s32(minmv.as_int));
+
+ const int32x4_t v_spb_d = vdupq_n_s32(sad_per_bit);
+
+ const int32x4_t v_joint_cost_0_d = vdupq_n_s32(x->nmvjointsadcost[0]);
+ const int32x4_t v_joint_cost_1_d = vdupq_n_s32(x->nmvjointsadcost[1]);
+
+ // search_param determines the length of the initial step and hence the number
+ // of iterations.
+ // 0 = initial step (MAX_FIRST_STEP) pel
+ // 1 = (MAX_FIRST_STEP/2) pel,
+ // 2 = (MAX_FIRST_STEP/4) pel...
+ const MV *ss_mv = &cfg->ss_mv[cfg->searches_per_step * search_param];
+ const intptr_t *ss_os = &cfg->ss_os[cfg->searches_per_step * search_param];
+ const int tot_steps = cfg->total_steps - search_param;
+
+ const int_mv fcenter_mv =
+ pack_int_mv(center_mv->row >> 3, center_mv->col >> 3);
+ const int16x8_t vfcmv = vreinterpretq_s16_s32(vdupq_n_s32(fcenter_mv.as_int));
+
+ const int ref_row = ref_mv->row;
+ const int ref_col = ref_mv->col;
+
+ int_mv bmv = pack_int_mv(ref_row, ref_col);
+ int_mv new_bmv = bmv;
+ int16x8_t v_bmv_w = vreinterpretq_s16_s32(vdupq_n_s32(bmv.as_int));
+
+ const int what_stride = x->plane[0].src.stride;
+ const int in_what_stride = x->e_mbd.plane[0].pre[0].stride;
+ const uint8_t *const what = x->plane[0].src.buf;
+ const uint8_t *const in_what =
+ x->e_mbd.plane[0].pre[0].buf + ref_row * in_what_stride + ref_col;
+
+ // Work out the start point for the search
+ const uint8_t *best_address = in_what;
+ const uint8_t *new_best_address = best_address;
+#if VPX_ARCH_AARCH64
+ int64x2_t v_ba_q = vdupq_n_s64((intptr_t)best_address);
+#else
+ int32x4_t v_ba_d = vdupq_n_s32((intptr_t)best_address);
+#endif
+ // Starting position
+ unsigned int best_sad = start_mv_sad;
+ int i, j, step;
+
+ // Check the prerequisite cost function properties that are easy to check
+ // in an assert. See the function-level documentation for details on all
+ // prerequisites.
+ assert(x->nmvjointsadcost[1] == x->nmvjointsadcost[2]);
+ assert(x->nmvjointsadcost[1] == x->nmvjointsadcost[3]);
+
+ *num00 = 0;
+
+ for (i = 0, step = 0; step < tot_steps; step++) {
+ for (j = 0; j < cfg->searches_per_step; j += 4, i += 4) {
+ int16x8_t v_diff_mv_w;
+ int8x16_t v_inside_d;
+ uint32x4_t v_outside_d;
+ int32x4_t v_cost_d, v_sad_d;
+#if VPX_ARCH_AARCH64
+ int64x2_t v_blocka[2];
+#else
+ int32x4_t v_blocka[1];
+ uint32x2_t horiz_max_0, horiz_max_1;
+#endif
+
+ uint32_t horiz_max;
+ // Compute the candidate motion vectors
+ const int16x8_t v_ss_mv_w = vld1q_s16((const int16_t *)&ss_mv[i]);
+ const int16x8_t v_these_mv_w = vaddq_s16(v_bmv_w, v_ss_mv_w);
+ // Clamp them to the search bounds
+ int16x8_t v_these_mv_clamp_w = v_these_mv_w;
+ v_these_mv_clamp_w = vminq_s16(v_these_mv_clamp_w, v_max_mv_w);
+ v_these_mv_clamp_w = vmaxq_s16(v_these_mv_clamp_w, v_min_mv_w);
+ // The ones that did not change are inside the search area
+ v_inside_d = vreinterpretq_s8_u32(
+ vceqq_s32(vreinterpretq_s32_s16(v_these_mv_clamp_w),
+ vreinterpretq_s32_s16(v_these_mv_w)));
+
+ // If none of them are inside, then move on
+#if VPX_ARCH_AARCH64
+ horiz_max = vmaxvq_u32(vreinterpretq_u32_s8(v_inside_d));
+#else
+ horiz_max_0 = vmax_u32(vget_low_u32(vreinterpretq_u32_s8(v_inside_d)),
+ vget_high_u32(vreinterpretq_u32_s8(v_inside_d)));
+ horiz_max_1 = vpmax_u32(horiz_max_0, horiz_max_0);
+ vst1_lane_u32(&horiz_max, horiz_max_1, 0);
+#endif
+ if (LIKELY(horiz_max == 0)) {
+ continue;
+ }
+
+ // The inverse mask indicates which of the MVs are outside
+ v_outside_d =
+ vreinterpretq_u32_s8(veorq_s8(v_inside_d, vdupq_n_s8((int8_t)0xff)));
+ // Shift right to keep the sign bit clear, we will use this later
+ // to set the cost to the maximum value.
+ v_outside_d = vshrq_n_u32(v_outside_d, 1);
+
+ // Compute the difference MV
+ v_diff_mv_w = vsubq_s16(v_these_mv_clamp_w, vfcmv);
+ // We utilise the fact that the cost function is even, and use the
+ // absolute difference. This allows us to use unsigned indexes later
+ // and reduces cache pressure somewhat as only a half of the table
+ // is ever referenced.
+ v_diff_mv_w = vabsq_s16(v_diff_mv_w);
+
+ // Compute the SIMD pointer offsets.
+ {
+#if VPX_ARCH_AARCH64 // sizeof(intptr_t) == 8
+ // Load the offsets
+ int64x2_t v_bo10_q = vld1q_s64((const int64_t *)&ss_os[i + 0]);
+ int64x2_t v_bo32_q = vld1q_s64((const int64_t *)&ss_os[i + 2]);
+ // Set the ones falling outside to zero
+ v_bo10_q = vandq_s64(
+ v_bo10_q,
+ vmovl_s32(vget_low_s32(vreinterpretq_s32_s8(v_inside_d))));
+ v_bo32_q = vandq_s64(
+ v_bo32_q,
+ vmovl_s32(vget_high_s32(vreinterpretq_s32_s8(v_inside_d))));
+ // Compute the candidate addresses
+ v_blocka[0] = vaddq_s64(v_ba_q, v_bo10_q);
+ v_blocka[1] = vaddq_s64(v_ba_q, v_bo32_q);
+#else // sizeof(intptr_t) == 4
+ int32x4_t v_bo_d = vld1q_s32((const int32_t *)&ss_os[i]);
+ v_bo_d = vandq_s32(v_bo_d, vreinterpretq_s32_s8(v_inside_d));
+ v_blocka[0] = vaddq_s32(v_ba_d, v_bo_d);
+#endif
+ }
+
+ sad_fn_ptr->sdx4df(what, what_stride, (const uint8_t **)&v_blocka[0],
+ in_what_stride, (uint32_t *)&v_sad_d);
+
+ // Look up the component cost of the residual motion vector
+ {
+ uint32_t cost[4];
+ DECLARE_ALIGNED(16, int16_t, rowcol[8]);
+ vst1q_s16(rowcol, v_diff_mv_w);
+
+ // Note: This is a use case for gather instruction
+ cost[0] = x->nmvsadcost[0][rowcol[0]] + x->nmvsadcost[0][rowcol[1]];
+ cost[1] = x->nmvsadcost[0][rowcol[2]] + x->nmvsadcost[0][rowcol[3]];
+ cost[2] = x->nmvsadcost[0][rowcol[4]] + x->nmvsadcost[0][rowcol[5]];
+ cost[3] = x->nmvsadcost[0][rowcol[6]] + x->nmvsadcost[0][rowcol[7]];
+
+ v_cost_d = vld1q_s32((int32_t *)cost);
+ }
+
+ // Now add in the joint cost
+ {
+ const uint32x4_t v_sel_d =
+ vceqq_s32(vreinterpretq_s32_s16(v_diff_mv_w), zero_s32);
+ const int32x4_t v_joint_cost_d = vreinterpretq_s32_u8(
+ vbslq_u8(vreinterpretq_u8_u32(v_sel_d),
+ vreinterpretq_u8_s32(v_joint_cost_0_d),
+ vreinterpretq_u8_s32(v_joint_cost_1_d)));
+ v_cost_d = vaddq_s32(v_cost_d, v_joint_cost_d);
+ }
+
+ // Multiply by sad_per_bit
+ v_cost_d = vmulq_s32(v_cost_d, v_spb_d);
+ // ROUND_POWER_OF_TWO(v_cost_d, VP9_PROB_COST_SHIFT)
+ v_cost_d =
+ vaddq_s32(v_cost_d, vdupq_n_s32(1 << (VP9_PROB_COST_SHIFT - 1)));
+ v_cost_d = vshrq_n_s32(v_cost_d, VP9_PROB_COST_SHIFT);
+ // Add the cost to the sad
+ v_sad_d = vaddq_s32(v_sad_d, v_cost_d);
+
+ // Make the motion vectors outside the search area have max cost
+ // by or'ing in the comparison mask, this way the minimum search won't
+ // pick them.
+ v_sad_d = vorrq_s32(v_sad_d, vreinterpretq_s32_u32(v_outside_d));
+
+ // Find the minimum value and index horizontally in v_sad_d
+ {
+ uint32_t local_best_sad;
+#if VPX_ARCH_AARCH64
+ local_best_sad = vminvq_u32(vreinterpretq_u32_s32(v_sad_d));
+#else
+ uint32x2_t horiz_min_0 =
+ vmin_u32(vget_low_u32(vreinterpretq_u32_s32(v_sad_d)),
+ vget_high_u32(vreinterpretq_u32_s32(v_sad_d)));
+ uint32x2_t horiz_min_1 = vpmin_u32(horiz_min_0, horiz_min_0);
+ vst1_lane_u32(&local_best_sad, horiz_min_1, 0);
+#endif
+
+ // Update the global minimum if the local minimum is smaller
+ if (LIKELY(local_best_sad < best_sad)) {
+#if defined(__GNUC__) && __GNUC__ >= 4 && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+ uint32_t local_best_idx;
+ const uint32x4_t v_sel_d =
+ vceqq_s32(v_sad_d, vdupq_n_s32(local_best_sad));
+ uint32x4_t v_mask_d = vandq_u32(v_sel_d, v_idx_d);
+ v_mask_d = vbslq_u32(v_sel_d, v_mask_d, vdupq_n_u32(0xffffffff));
+
+#if VPX_ARCH_AARCH64
+ local_best_idx = vminvq_u32(v_mask_d);
+#else
+ horiz_min_0 =
+ vmin_u32(vget_low_u32(v_mask_d), vget_high_u32(v_mask_d));
+ horiz_min_1 = vpmin_u32(horiz_min_0, horiz_min_0);
+ vst1_lane_u32(&local_best_idx, horiz_min_1, 0);
+#endif
+
+ new_bmv = ((const int_mv *)&v_these_mv_w)[local_best_idx];
+#if defined(__GNUC__) && __GNUC__ >= 4 && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+ new_best_address = ((const uint8_t **)v_blocka)[local_best_idx];
+
+ best_sad = local_best_sad;
+ }
+ }
+ }
+
+ bmv = new_bmv;
+ best_address = new_best_address;
+
+ v_bmv_w = vreinterpretq_s16_s32(vdupq_n_s32(bmv.as_int));
+#if VPX_ARCH_AARCH64
+ v_ba_q = vdupq_n_s64((intptr_t)best_address);
+#else
+ v_ba_d = vdupq_n_s32((intptr_t)best_address);
+#endif
+
+ if (UNLIKELY(best_address == in_what)) {
+ (*num00)++;
+ }
+ }
+
+ *best_mv = bmv.as_mv;
+ return best_sad;
+}
diff --git a/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_error_neon.c b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_error_neon.c
new file mode 100644
index 0000000000..0cf0bf250e
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_error_neon.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "./vp9_rtcd.h"
+#include "vpx_dsp/arm/mem_neon.h"
+#include "vpx_dsp/arm/sum_neon.h"
+
+int64_t vp9_block_error_neon(const tran_low_t *coeff, const tran_low_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz) {
+ uint64x2_t err_u64 = vdupq_n_u64(0);
+ int64x2_t ssz_s64 = vdupq_n_s64(0);
+
+ assert(block_size >= 16);
+ assert((block_size % 16) == 0);
+
+ do {
+ uint32x4_t err;
+ int32x4_t ssz0, ssz1;
+
+ const int16x8_t c0 = load_tran_low_to_s16q(coeff);
+ const int16x8_t c1 = load_tran_low_to_s16q(coeff + 8);
+ const int16x8_t d0 = load_tran_low_to_s16q(dqcoeff);
+ const int16x8_t d1 = load_tran_low_to_s16q(dqcoeff + 8);
+
+ const uint16x8_t diff0 = vreinterpretq_u16_s16(vabdq_s16(c0, d0));
+ const uint16x8_t diff1 = vreinterpretq_u16_s16(vabdq_s16(c1, d1));
+
+ // diff is 15-bits, the squares 30, so we can store 4 in 32-bits before
+ // accumulating them in 64-bits.
+ err = vmull_u16(vget_low_u16(diff0), vget_low_u16(diff0));
+ err = vmlal_u16(err, vget_high_u16(diff0), vget_high_u16(diff0));
+ err = vmlal_u16(err, vget_low_u16(diff1), vget_low_u16(diff1));
+ err = vmlal_u16(err, vget_high_u16(diff1), vget_high_u16(diff1));
+ err_u64 = vpadalq_u32(err_u64, err);
+
+ // We can't do the same here as we're operating on signed integers, so we
+ // can store 2 15-bit diff before accumulating into 64-bits.
+ ssz0 = vmull_s16(vget_low_s16(c0), vget_low_s16(c0));
+ ssz0 = vmlal_s16(ssz0, vget_high_s16(c0), vget_high_s16(c0));
+ ssz_s64 = vpadalq_s32(ssz_s64, ssz0);
+
+ ssz1 = vmull_s16(vget_low_s16(c1), vget_low_s16(c1));
+ ssz1 = vmlal_s16(ssz1, vget_high_s16(c1), vget_high_s16(c1));
+ ssz_s64 = vpadalq_s32(ssz_s64, ssz1);
+
+ coeff += 16;
+ dqcoeff += 16;
+ block_size -= 16;
+ } while (block_size != 0);
+
+ *ssz = horizontal_add_int64x2(ssz_s64);
+ return (int64_t)horizontal_add_uint64x2(err_u64);
+}
+
+int64_t vp9_block_error_fp_neon(const tran_low_t *coeff,
+ const tran_low_t *dqcoeff, int block_size) {
+ uint64x2_t err_u64[2] = { vdupq_n_u64(0), vdupq_n_u64(0) };
+
+ assert(block_size >= 16);
+ assert((block_size % 16) == 0);
+
+ do {
+ uint32x4_t err0, err1;
+
+ const int16x8_t c0 = load_tran_low_to_s16q(coeff);
+ const int16x8_t c1 = load_tran_low_to_s16q(coeff + 8);
+ const int16x8_t d0 = load_tran_low_to_s16q(dqcoeff);
+ const int16x8_t d1 = load_tran_low_to_s16q(dqcoeff + 8);
+
+ const uint16x8_t diff0 = vreinterpretq_u16_s16(vabdq_s16(c0, d0));
+ const uint16x8_t diff1 = vreinterpretq_u16_s16(vabdq_s16(c1, d1));
+
+ // diff is 15-bits, the squares 30, so in theory we can store 4 in 32-bits
+ // before accumulating them in 64-bits. However splitting into 2 mull, mlal
+ // pairs is beneficial since it allows us to use both Neon
+ // multiply-accumulate pipes - on CPUs that have them - rather than having
+ // a single chain of 4 instructions executing serially.
+ err0 = vmull_u16(vget_low_u16(diff0), vget_low_u16(diff0));
+ err0 = vmlal_u16(err0, vget_high_u16(diff0), vget_high_u16(diff0));
+ err_u64[0] = vpadalq_u32(err_u64[0], err0);
+
+ err1 = vmull_u16(vget_low_u16(diff1), vget_low_u16(diff1));
+ err1 = vmlal_u16(err1, vget_high_u16(diff1), vget_high_u16(diff1));
+ err_u64[1] = vpadalq_u32(err_u64[1], err1);
+
+ coeff += 16;
+ dqcoeff += 16;
+ block_size -= 16;
+ } while (block_size != 0);
+
+ return horizontal_add_uint64x2(vaddq_u64(err_u64[0], err_u64[1]));
+}
diff --git a/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_frame_scale_neon.c b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_frame_scale_neon.c
new file mode 100644
index 0000000000..bc8dd4a341
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_frame_scale_neon.c
@@ -0,0 +1,844 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vpx_dsp/arm/mem_neon.h"
+#include "vpx_dsp/arm/transpose_neon.h"
+#include "vpx_dsp/arm/vpx_convolve8_neon.h"
+#include "vpx_dsp/vpx_filter.h"
+#include "vpx_scale/yv12config.h"
+
+// Note: The scaling functions could write extra rows and columns in dst, which
+// exceed the right and bottom boundaries of the destination frame. We rely on
+// the following frame extension function to fix these rows and columns.
+
+static INLINE void scale_plane_2_to_1_phase_0(const uint8_t *src,
+ const int src_stride,
+ uint8_t *dst,
+ const int dst_stride, const int w,
+ const int h) {
+ const int max_width = (w + 15) & ~15;
+ int y = h;
+
+ assert(w && h);
+
+ do {
+ int x = max_width;
+ do {
+ const uint8x16x2_t s = vld2q_u8(src);
+ vst1q_u8(dst, s.val[0]);
+ src += 32;
+ dst += 16;
+ x -= 16;
+ } while (x);
+ src += 2 * (src_stride - max_width);
+ dst += dst_stride - max_width;
+ } while (--y);
+}
+
+static INLINE void scale_plane_4_to_1_phase_0(const uint8_t *src,
+ const int src_stride,
+ uint8_t *dst,
+ const int dst_stride, const int w,
+ const int h) {
+ const int max_width = (w + 15) & ~15;
+ int y = h;
+
+ assert(w && h);
+
+ do {
+ int x = max_width;
+ do {
+ const uint8x16x4_t s = vld4q_u8(src);
+ vst1q_u8(dst, s.val[0]);
+ src += 64;
+ dst += 16;
+ x -= 16;
+ } while (x);
+ src += 4 * (src_stride - max_width);
+ dst += dst_stride - max_width;
+ } while (--y);
+}
+
+static INLINE void scale_plane_bilinear_kernel(
+ const uint8x16_t in0, const uint8x16_t in1, const uint8x16_t in2,
+ const uint8x16_t in3, const uint8x8_t coef0, const uint8x8_t coef1,
+ uint8_t *const dst) {
+ const uint16x8_t h0 = vmull_u8(vget_low_u8(in0), coef0);
+ const uint16x8_t h1 = vmull_u8(vget_high_u8(in0), coef0);
+ const uint16x8_t h2 = vmull_u8(vget_low_u8(in2), coef0);
+ const uint16x8_t h3 = vmull_u8(vget_high_u8(in2), coef0);
+ const uint16x8_t h4 = vmlal_u8(h0, vget_low_u8(in1), coef1);
+ const uint16x8_t h5 = vmlal_u8(h1, vget_high_u8(in1), coef1);
+ const uint16x8_t h6 = vmlal_u8(h2, vget_low_u8(in3), coef1);
+ const uint16x8_t h7 = vmlal_u8(h3, vget_high_u8(in3), coef1);
+
+ const uint8x8_t hor0 = vrshrn_n_u16(h4, 7); // temp: 00 01 02 03 04 05 06 07
+ const uint8x8_t hor1 = vrshrn_n_u16(h5, 7); // temp: 08 09 0A 0B 0C 0D 0E 0F
+ const uint8x8_t hor2 = vrshrn_n_u16(h6, 7); // temp: 10 11 12 13 14 15 16 17
+ const uint8x8_t hor3 = vrshrn_n_u16(h7, 7); // temp: 18 19 1A 1B 1C 1D 1E 1F
+ const uint16x8_t v0 = vmull_u8(hor0, coef0);
+ const uint16x8_t v1 = vmull_u8(hor1, coef0);
+ const uint16x8_t v2 = vmlal_u8(v0, hor2, coef1);
+ const uint16x8_t v3 = vmlal_u8(v1, hor3, coef1);
+ // dst: 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ const uint8x16_t d = vcombine_u8(vrshrn_n_u16(v2, 7), vrshrn_n_u16(v3, 7));
+ vst1q_u8(dst, d);
+}
+
+static INLINE void scale_plane_2_to_1_bilinear(
+ const uint8_t *const src, const int src_stride, uint8_t *dst,
+ const int dst_stride, const int w, const int h, const int16_t c0,
+ const int16_t c1) {
+ const int max_width = (w + 15) & ~15;
+ const uint8_t *src0 = src;
+ const uint8_t *src1 = src + src_stride;
+ const uint8x8_t coef0 = vdup_n_u8(c0);
+ const uint8x8_t coef1 = vdup_n_u8(c1);
+ int y = h;
+
+ assert(w && h);
+
+ do {
+ int x = max_width;
+ do {
+ // 000 002 004 006 008 00A 00C 00E 010 012 014 016 018 01A 01C 01E
+ // 001 003 005 007 009 00B 00D 00F 011 013 015 017 019 01B 01D 01F
+ const uint8x16x2_t s0 = vld2q_u8(src0);
+ // 100 102 104 106 108 10A 10C 10E 110 112 114 116 118 11A 11C 11E
+ // 101 103 105 107 109 10B 10D 10F 111 113 115 117 119 11B 11D 11F
+ const uint8x16x2_t s1 = vld2q_u8(src1);
+ scale_plane_bilinear_kernel(s0.val[0], s0.val[1], s1.val[0], s1.val[1],
+ coef0, coef1, dst);
+ src0 += 32;
+ src1 += 32;
+ dst += 16;
+ x -= 16;
+ } while (x);
+ src0 += 2 * (src_stride - max_width);
+ src1 += 2 * (src_stride - max_width);
+ dst += dst_stride - max_width;
+ } while (--y);
+}
+
+static INLINE void scale_plane_4_to_1_bilinear(
+ const uint8_t *const src, const int src_stride, uint8_t *dst,
+ const int dst_stride, const int w, const int h, const int16_t c0,
+ const int16_t c1) {
+ const int max_width = (w + 15) & ~15;
+ const uint8_t *src0 = src;
+ const uint8_t *src1 = src + src_stride;
+ const uint8x8_t coef0 = vdup_n_u8(c0);
+ const uint8x8_t coef1 = vdup_n_u8(c1);
+ int y = h;
+
+ assert(w && h);
+
+ do {
+ int x = max_width;
+ do {
+ // (*) -- useless
+ // 000 004 008 00C 010 014 018 01C 020 024 028 02C 030 034 038 03C
+ // 001 005 009 00D 011 015 019 01D 021 025 029 02D 031 035 039 03D
+ // 002 006 00A 00E 012 016 01A 01E 022 026 02A 02E 032 036 03A 03E (*)
+ // 003 007 00B 00F 013 017 01B 01F 023 027 02B 02F 033 037 03B 03F (*)
+ const uint8x16x4_t s0 = vld4q_u8(src0);
+ // 100 104 108 10C 110 114 118 11C 120 124 128 12C 130 134 138 13C
+ // 101 105 109 10D 111 115 119 11D 121 125 129 12D 131 135 139 13D
+ // 102 106 10A 10E 112 116 11A 11E 122 126 12A 12E 132 136 13A 13E (*)
+ // 103 107 10B 10F 113 117 11B 11F 123 127 12B 12F 133 137 13B 13F (*)
+ const uint8x16x4_t s1 = vld4q_u8(src1);
+ scale_plane_bilinear_kernel(s0.val[0], s0.val[1], s1.val[0], s1.val[1],
+ coef0, coef1, dst);
+ src0 += 64;
+ src1 += 64;
+ dst += 16;
+ x -= 16;
+ } while (x);
+ src0 += 4 * (src_stride - max_width);
+ src1 += 4 * (src_stride - max_width);
+ dst += dst_stride - max_width;
+ } while (--y);
+}
+
+static INLINE uint8x8_t scale_filter_bilinear(const uint8x8_t *const s,
+ const uint8x8_t *const coef) {
+ const uint16x8_t h0 = vmull_u8(s[0], coef[0]);
+ const uint16x8_t h1 = vmlal_u8(h0, s[1], coef[1]);
+
+ return vrshrn_n_u16(h1, 7);
+}
+
+static void scale_plane_2_to_1_general(const uint8_t *src, const int src_stride,
+ uint8_t *dst, const int dst_stride,
+ const int w, const int h,
+ const int16_t *const coef,
+ uint8_t *const temp_buffer) {
+ const int width_hor = (w + 3) & ~3;
+ const int width_ver = (w + 7) & ~7;
+ const int height_hor = (2 * h + SUBPEL_TAPS - 2 + 7) & ~7;
+ const int height_ver = (h + 3) & ~3;
+ const int16x8_t filters = vld1q_s16(coef);
+ int x, y = height_hor;
+ uint8_t *t = temp_buffer;
+ uint8x8_t s[14], d[4];
+
+ assert(w && h);
+
+ src -= (SUBPEL_TAPS / 2 - 1) * src_stride + SUBPEL_TAPS / 2 + 1;
+
+ // horizontal 4x8
+ // Note: processing 4x8 is about 20% faster than processing row by row using
+ // vld4_u8().
+ do {
+ load_u8_8x8(src + 2, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5],
+ &s[6], &s[7]);
+ transpose_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]);
+ x = width_hor;
+
+ do {
+ src += 8;
+ load_u8_8x8(src, src_stride, &s[6], &s[7], &s[8], &s[9], &s[10], &s[11],
+ &s[12], &s[13]);
+ transpose_u8_8x8(&s[6], &s[7], &s[8], &s[9], &s[10], &s[11], &s[12],
+ &s[13]);
+
+ d[0] = scale_filter_8(&s[0], filters); // 00 10 20 30 40 50 60 70
+ d[1] = scale_filter_8(&s[2], filters); // 01 11 21 31 41 51 61 71
+ d[2] = scale_filter_8(&s[4], filters); // 02 12 22 32 42 52 62 72
+ d[3] = scale_filter_8(&s[6], filters); // 03 13 23 33 43 53 63 73
+ // 00 01 02 03 40 41 42 43
+ // 10 11 12 13 50 51 52 53
+ // 20 21 22 23 60 61 62 63
+ // 30 31 32 33 70 71 72 73
+ transpose_u8_8x4(&d[0], &d[1], &d[2], &d[3]);
+ vst1_lane_u32((uint32_t *)(t + 0 * width_hor), vreinterpret_u32_u8(d[0]),
+ 0);
+ vst1_lane_u32((uint32_t *)(t + 1 * width_hor), vreinterpret_u32_u8(d[1]),
+ 0);
+ vst1_lane_u32((uint32_t *)(t + 2 * width_hor), vreinterpret_u32_u8(d[2]),
+ 0);
+ vst1_lane_u32((uint32_t *)(t + 3 * width_hor), vreinterpret_u32_u8(d[3]),
+ 0);
+ vst1_lane_u32((uint32_t *)(t + 4 * width_hor), vreinterpret_u32_u8(d[0]),
+ 1);
+ vst1_lane_u32((uint32_t *)(t + 5 * width_hor), vreinterpret_u32_u8(d[1]),
+ 1);
+ vst1_lane_u32((uint32_t *)(t + 6 * width_hor), vreinterpret_u32_u8(d[2]),
+ 1);
+ vst1_lane_u32((uint32_t *)(t + 7 * width_hor), vreinterpret_u32_u8(d[3]),
+ 1);
+
+ s[0] = s[8];
+ s[1] = s[9];
+ s[2] = s[10];
+ s[3] = s[11];
+ s[4] = s[12];
+ s[5] = s[13];
+
+ t += 4;
+ x -= 4;
+ } while (x);
+ src += 8 * src_stride - 2 * width_hor;
+ t += 7 * width_hor;
+ y -= 8;
+ } while (y);
+
+ // vertical 8x4
+ x = width_ver;
+ t = temp_buffer;
+ do {
+ load_u8_8x8(t, width_hor, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6],
+ &s[7]);
+ t += 6 * width_hor;
+ y = height_ver;
+
+ do {
+ load_u8_8x8(t, width_hor, &s[6], &s[7], &s[8], &s[9], &s[10], &s[11],
+ &s[12], &s[13]);
+ t += 8 * width_hor;
+
+ d[0] = scale_filter_8(&s[0], filters); // 00 01 02 03 04 05 06 07
+ d[1] = scale_filter_8(&s[2], filters); // 10 11 12 13 14 15 16 17
+ d[2] = scale_filter_8(&s[4], filters); // 20 21 22 23 24 25 26 27
+ d[3] = scale_filter_8(&s[6], filters); // 30 31 32 33 34 35 36 37
+ vst1_u8(dst + 0 * dst_stride, d[0]);
+ vst1_u8(dst + 1 * dst_stride, d[1]);
+ vst1_u8(dst + 2 * dst_stride, d[2]);
+ vst1_u8(dst + 3 * dst_stride, d[3]);
+
+ s[0] = s[8];
+ s[1] = s[9];
+ s[2] = s[10];
+ s[3] = s[11];
+ s[4] = s[12];
+ s[5] = s[13];
+
+ dst += 4 * dst_stride;
+ y -= 4;
+ } while (y);
+ t -= width_hor * (2 * height_ver + 6);
+ t += 8;
+ dst -= height_ver * dst_stride;
+ dst += 8;
+ x -= 8;
+ } while (x);
+}
+
+static void scale_plane_4_to_1_general(const uint8_t *src, const int src_stride,
+ uint8_t *dst, const int dst_stride,
+ const int w, const int h,
+ const int16_t *const coef,
+ uint8_t *const temp_buffer) {
+ const int width_hor = (w + 1) & ~1;
+ const int width_ver = (w + 7) & ~7;
+ const int height_hor = (4 * h + SUBPEL_TAPS - 2 + 7) & ~7;
+ const int height_ver = (h + 1) & ~1;
+ const int16x8_t filters = vld1q_s16(coef);
+ int x, y = height_hor;
+ uint8_t *t = temp_buffer;
+ uint8x8_t s[12], d[2];
+
+ assert(w && h);
+
+ src -= (SUBPEL_TAPS / 2 - 1) * src_stride + SUBPEL_TAPS / 2 + 3;
+
+ // horizontal 2x8
+ // Note: processing 2x8 is about 20% faster than processing row by row using
+ // vld4_u8().
+ do {
+ load_u8_8x8(src + 4, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5],
+ &s[6], &s[7]);
+ transpose_u8_4x8(&s[0], &s[1], &s[2], &s[3], s[4], s[5], s[6], s[7]);
+ x = width_hor;
+
+ do {
+ uint8x8x2_t dd;
+ src += 8;
+ load_u8_8x8(src, src_stride, &s[4], &s[5], &s[6], &s[7], &s[8], &s[9],
+ &s[10], &s[11]);
+ transpose_u8_8x8(&s[4], &s[5], &s[6], &s[7], &s[8], &s[9], &s[10],
+ &s[11]);
+
+ d[0] = scale_filter_8(&s[0], filters); // 00 10 20 30 40 50 60 70
+ d[1] = scale_filter_8(&s[4], filters); // 01 11 21 31 41 51 61 71
+ // dd.val[0]: 00 01 20 21 40 41 60 61
+ // dd.val[1]: 10 11 30 31 50 51 70 71
+ dd = vtrn_u8(d[0], d[1]);
+ vst1_lane_u16((uint16_t *)(t + 0 * width_hor),
+ vreinterpret_u16_u8(dd.val[0]), 0);
+ vst1_lane_u16((uint16_t *)(t + 1 * width_hor),
+ vreinterpret_u16_u8(dd.val[1]), 0);
+ vst1_lane_u16((uint16_t *)(t + 2 * width_hor),
+ vreinterpret_u16_u8(dd.val[0]), 1);
+ vst1_lane_u16((uint16_t *)(t + 3 * width_hor),
+ vreinterpret_u16_u8(dd.val[1]), 1);
+ vst1_lane_u16((uint16_t *)(t + 4 * width_hor),
+ vreinterpret_u16_u8(dd.val[0]), 2);
+ vst1_lane_u16((uint16_t *)(t + 5 * width_hor),
+ vreinterpret_u16_u8(dd.val[1]), 2);
+ vst1_lane_u16((uint16_t *)(t + 6 * width_hor),
+ vreinterpret_u16_u8(dd.val[0]), 3);
+ vst1_lane_u16((uint16_t *)(t + 7 * width_hor),
+ vreinterpret_u16_u8(dd.val[1]), 3);
+
+ s[0] = s[8];
+ s[1] = s[9];
+ s[2] = s[10];
+ s[3] = s[11];
+
+ t += 2;
+ x -= 2;
+ } while (x);
+ src += 8 * src_stride - 4 * width_hor;
+ t += 7 * width_hor;
+ y -= 8;
+ } while (y);
+
+ // vertical 8x2
+ x = width_ver;
+ t = temp_buffer;
+ do {
+ load_u8_8x4(t, width_hor, &s[0], &s[1], &s[2], &s[3]);
+ t += 4 * width_hor;
+ y = height_ver;
+
+ do {
+ load_u8_8x8(t, width_hor, &s[4], &s[5], &s[6], &s[7], &s[8], &s[9],
+ &s[10], &s[11]);
+ t += 8 * width_hor;
+
+ d[0] = scale_filter_8(&s[0], filters); // 00 01 02 03 04 05 06 07
+ d[1] = scale_filter_8(&s[4], filters); // 10 11 12 13 14 15 16 17
+ vst1_u8(dst + 0 * dst_stride, d[0]);
+ vst1_u8(dst + 1 * dst_stride, d[1]);
+
+ s[0] = s[8];
+ s[1] = s[9];
+ s[2] = s[10];
+ s[3] = s[11];
+
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ t -= width_hor * (4 * height_ver + 4);
+ t += 8;
+ dst -= height_ver * dst_stride;
+ dst += 8;
+ x -= 8;
+ } while (x);
+}
+
+// Notes for 4 to 3 scaling:
+//
+// 1. 6 rows are calculated in each horizontal inner loop, so width_hor must be
+// multiple of 6, and no less than w.
+//
+// 2. 8 rows are calculated in each vertical inner loop, so width_ver must be
+// multiple of 8, and no less than w.
+//
+// 3. 8 columns are calculated in each horizontal inner loop for further
+// vertical scaling, so height_hor must be multiple of 8, and no less than
+// 4 * h / 3.
+//
+// 4. 6 columns are calculated in each vertical inner loop, so height_ver must
+// be multiple of 6, and no less than h.
+//
+// 5. The physical location of the last row of the 4 to 3 scaled frame is
+// decided by phase_scaler, and are always less than 1 pixel below the last row
+// of the original image.
+
+static void scale_plane_4_to_3_bilinear(const uint8_t *src,
+ const int src_stride, uint8_t *dst,
+ const int dst_stride, const int w,
+ const int h, const int phase_scaler,
+ uint8_t *const temp_buffer) {
+ static const int step_q4 = 16 * 4 / 3;
+ const int width_hor = (w + 5) - ((w + 5) % 6);
+ const int stride_hor = width_hor + 2; // store 2 extra pixels
+ const int width_ver = (w + 7) & ~7;
+ // We only need 1 extra row below because there are only 2 bilinear
+ // coefficients.
+ const int height_hor = (4 * h / 3 + 1 + 7) & ~7;
+ const int height_ver = (h + 5) - ((h + 5) % 6);
+ int x, y = height_hor;
+ uint8_t *t = temp_buffer;
+ uint8x8_t s[9], d[8], c[6];
+
+ assert(w && h);
+
+ c[0] = vdup_n_u8((uint8_t)vp9_filter_kernels[BILINEAR][phase_scaler][3]);
+ c[1] = vdup_n_u8((uint8_t)vp9_filter_kernels[BILINEAR][phase_scaler][4]);
+ c[2] = vdup_n_u8(
+ (uint8_t)vp9_filter_kernels[BILINEAR][(phase_scaler + 1 * step_q4) &
+ SUBPEL_MASK][3]);
+ c[3] = vdup_n_u8(
+ (uint8_t)vp9_filter_kernels[BILINEAR][(phase_scaler + 1 * step_q4) &
+ SUBPEL_MASK][4]);
+ c[4] = vdup_n_u8(
+ (uint8_t)vp9_filter_kernels[BILINEAR][(phase_scaler + 2 * step_q4) &
+ SUBPEL_MASK][3]);
+ c[5] = vdup_n_u8(
+ (uint8_t)vp9_filter_kernels[BILINEAR][(phase_scaler + 2 * step_q4) &
+ SUBPEL_MASK][4]);
+
+ d[6] = vdup_n_u8(0);
+ d[7] = vdup_n_u8(0);
+
+ // horizontal 6x8
+ do {
+ load_u8_8x8(src, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5],
+ &s[6], &s[7]);
+ src += 1;
+ transpose_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]);
+ x = width_hor;
+
+ do {
+ load_u8_8x8(src, src_stride, &s[1], &s[2], &s[3], &s[4], &s[5], &s[6],
+ &s[7], &s[8]);
+ src += 8;
+ transpose_u8_8x8(&s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7], &s[8]);
+
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ d[0] = scale_filter_bilinear(&s[0], &c[0]);
+ d[1] =
+ scale_filter_bilinear(&s[(phase_scaler + 1 * step_q4) >> 4], &c[2]);
+ d[2] =
+ scale_filter_bilinear(&s[(phase_scaler + 2 * step_q4) >> 4], &c[4]);
+ d[3] = scale_filter_bilinear(&s[4], &c[0]);
+ d[4] = scale_filter_bilinear(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)],
+ &c[2]);
+ d[5] = scale_filter_bilinear(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)],
+ &c[4]);
+
+ // 00 01 02 03 04 05 xx xx
+ // 10 11 12 13 14 15 xx xx
+ // 20 21 22 23 24 25 xx xx
+ // 30 31 32 33 34 35 xx xx
+ // 40 41 42 43 44 45 xx xx
+ // 50 51 52 53 54 55 xx xx
+ // 60 61 62 63 64 65 xx xx
+ // 70 71 72 73 74 75 xx xx
+ transpose_u8_8x8(&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]);
+ // store 2 extra pixels
+ vst1_u8(t + 0 * stride_hor, d[0]);
+ vst1_u8(t + 1 * stride_hor, d[1]);
+ vst1_u8(t + 2 * stride_hor, d[2]);
+ vst1_u8(t + 3 * stride_hor, d[3]);
+ vst1_u8(t + 4 * stride_hor, d[4]);
+ vst1_u8(t + 5 * stride_hor, d[5]);
+ vst1_u8(t + 6 * stride_hor, d[6]);
+ vst1_u8(t + 7 * stride_hor, d[7]);
+
+ s[0] = s[8];
+
+ t += 6;
+ x -= 6;
+ } while (x);
+ src += 8 * src_stride - 4 * width_hor / 3 - 1;
+ t += 7 * stride_hor + 2;
+ y -= 8;
+ } while (y);
+
+ // vertical 8x6
+ x = width_ver;
+ t = temp_buffer;
+ do {
+ load_u8_8x8(t, stride_hor, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6],
+ &s[7]);
+ t += stride_hor;
+ y = height_ver;
+
+ do {
+ load_u8_8x8(t, stride_hor, &s[1], &s[2], &s[3], &s[4], &s[5], &s[6],
+ &s[7], &s[8]);
+ t += 8 * stride_hor;
+
+ d[0] = scale_filter_bilinear(&s[0], &c[0]);
+ d[1] =
+ scale_filter_bilinear(&s[(phase_scaler + 1 * step_q4) >> 4], &c[2]);
+ d[2] =
+ scale_filter_bilinear(&s[(phase_scaler + 2 * step_q4) >> 4], &c[4]);
+ d[3] = scale_filter_bilinear(&s[4], &c[0]);
+ d[4] = scale_filter_bilinear(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)],
+ &c[2]);
+ d[5] = scale_filter_bilinear(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)],
+ &c[4]);
+ vst1_u8(dst + 0 * dst_stride, d[0]);
+ vst1_u8(dst + 1 * dst_stride, d[1]);
+ vst1_u8(dst + 2 * dst_stride, d[2]);
+ vst1_u8(dst + 3 * dst_stride, d[3]);
+ vst1_u8(dst + 4 * dst_stride, d[4]);
+ vst1_u8(dst + 5 * dst_stride, d[5]);
+
+ s[0] = s[8];
+
+ dst += 6 * dst_stride;
+ y -= 6;
+ } while (y);
+ t -= stride_hor * (4 * height_ver / 3 + 1);
+ t += 8;
+ dst -= height_ver * dst_stride;
+ dst += 8;
+ x -= 8;
+ } while (x);
+}
+
+static void scale_plane_4_to_3_general(const uint8_t *src, const int src_stride,
+ uint8_t *dst, const int dst_stride,
+ const int w, const int h,
+ const InterpKernel *const coef,
+ const int phase_scaler,
+ uint8_t *const temp_buffer) {
+ static const int step_q4 = 16 * 4 / 3;
+ const int width_hor = (w + 5) - ((w + 5) % 6);
+ const int stride_hor = width_hor + 2; // store 2 extra pixels
+ const int width_ver = (w + 7) & ~7;
+ // We need (SUBPEL_TAPS - 1) extra rows: (SUBPEL_TAPS / 2 - 1) extra rows
+ // above and (SUBPEL_TAPS / 2) extra rows below.
+ const int height_hor = (4 * h / 3 + SUBPEL_TAPS - 1 + 7) & ~7;
+ const int height_ver = (h + 5) - ((h + 5) % 6);
+ const int16x8_t filters0 =
+ vld1q_s16(coef[(phase_scaler + 0 * step_q4) & SUBPEL_MASK]);
+ const int16x8_t filters1 =
+ vld1q_s16(coef[(phase_scaler + 1 * step_q4) & SUBPEL_MASK]);
+ const int16x8_t filters2 =
+ vld1q_s16(coef[(phase_scaler + 2 * step_q4) & SUBPEL_MASK]);
+ int x, y = height_hor;
+ uint8_t *t = temp_buffer;
+ uint8x8_t s[15], d[8];
+
+ assert(w && h);
+
+ src -= (SUBPEL_TAPS / 2 - 1) * src_stride + SUBPEL_TAPS / 2;
+ d[6] = vdup_n_u8(0);
+ d[7] = vdup_n_u8(0);
+
+ // horizontal 6x8
+ do {
+ load_u8_8x8(src + 1, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5],
+ &s[6], &s[7]);
+ transpose_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]);
+ x = width_hor;
+
+ do {
+ src += 8;
+ load_u8_8x8(src, src_stride, &s[7], &s[8], &s[9], &s[10], &s[11], &s[12],
+ &s[13], &s[14]);
+ transpose_u8_8x8(&s[7], &s[8], &s[9], &s[10], &s[11], &s[12], &s[13],
+ &s[14]);
+
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ d[0] = scale_filter_8(&s[0], filters0);
+ d[1] = scale_filter_8(&s[(phase_scaler + 1 * step_q4) >> 4], filters1);
+ d[2] = scale_filter_8(&s[(phase_scaler + 2 * step_q4) >> 4], filters2);
+ d[3] = scale_filter_8(&s[4], filters0);
+ d[4] =
+ scale_filter_8(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], filters1);
+ d[5] =
+ scale_filter_8(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], filters2);
+
+ // 00 01 02 03 04 05 xx xx
+ // 10 11 12 13 14 15 xx xx
+ // 20 21 22 23 24 25 xx xx
+ // 30 31 32 33 34 35 xx xx
+ // 40 41 42 43 44 45 xx xx
+ // 50 51 52 53 54 55 xx xx
+ // 60 61 62 63 64 65 xx xx
+ // 70 71 72 73 74 75 xx xx
+ transpose_u8_8x8(&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]);
+ // store 2 extra pixels
+ vst1_u8(t + 0 * stride_hor, d[0]);
+ vst1_u8(t + 1 * stride_hor, d[1]);
+ vst1_u8(t + 2 * stride_hor, d[2]);
+ vst1_u8(t + 3 * stride_hor, d[3]);
+ vst1_u8(t + 4 * stride_hor, d[4]);
+ vst1_u8(t + 5 * stride_hor, d[5]);
+ vst1_u8(t + 6 * stride_hor, d[6]);
+ vst1_u8(t + 7 * stride_hor, d[7]);
+
+ s[0] = s[8];
+ s[1] = s[9];
+ s[2] = s[10];
+ s[3] = s[11];
+ s[4] = s[12];
+ s[5] = s[13];
+ s[6] = s[14];
+
+ t += 6;
+ x -= 6;
+ } while (x);
+ src += 8 * src_stride - 4 * width_hor / 3;
+ t += 7 * stride_hor + 2;
+ y -= 8;
+ } while (y);
+
+ // vertical 8x6
+ x = width_ver;
+ t = temp_buffer;
+ do {
+ load_u8_8x8(t, stride_hor, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6],
+ &s[7]);
+ t += 7 * stride_hor;
+ y = height_ver;
+
+ do {
+ load_u8_8x8(t, stride_hor, &s[7], &s[8], &s[9], &s[10], &s[11], &s[12],
+ &s[13], &s[14]);
+ t += 8 * stride_hor;
+
+ d[0] = scale_filter_8(&s[0], filters0);
+ d[1] = scale_filter_8(&s[(phase_scaler + 1 * step_q4) >> 4], filters1);
+ d[2] = scale_filter_8(&s[(phase_scaler + 2 * step_q4) >> 4], filters2);
+ d[3] = scale_filter_8(&s[4], filters0);
+ d[4] =
+ scale_filter_8(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], filters1);
+ d[5] =
+ scale_filter_8(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], filters2);
+ vst1_u8(dst + 0 * dst_stride, d[0]);
+ vst1_u8(dst + 1 * dst_stride, d[1]);
+ vst1_u8(dst + 2 * dst_stride, d[2]);
+ vst1_u8(dst + 3 * dst_stride, d[3]);
+ vst1_u8(dst + 4 * dst_stride, d[4]);
+ vst1_u8(dst + 5 * dst_stride, d[5]);
+
+ s[0] = s[8];
+ s[1] = s[9];
+ s[2] = s[10];
+ s[3] = s[11];
+ s[4] = s[12];
+ s[5] = s[13];
+ s[6] = s[14];
+
+ dst += 6 * dst_stride;
+ y -= 6;
+ } while (y);
+ t -= stride_hor * (4 * height_ver / 3 + 7);
+ t += 8;
+ dst -= height_ver * dst_stride;
+ dst += 8;
+ x -= 8;
+ } while (x);
+}
+
+void vp9_scale_and_extend_frame_neon(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst,
+ INTERP_FILTER filter_type,
+ int phase_scaler) {
+ const int src_w = src->y_crop_width;
+ const int src_h = src->y_crop_height;
+ const int dst_w = dst->y_crop_width;
+ const int dst_h = dst->y_crop_height;
+ const int dst_uv_w = dst->uv_crop_width;
+ const int dst_uv_h = dst->uv_crop_height;
+ int scaled = 0;
+
+ // phase_scaler is usually 0 or 8.
+ assert(phase_scaler >= 0 && phase_scaler < 16);
+
+ if (2 * dst_w == src_w && 2 * dst_h == src_h) {
+ // 2 to 1
+ scaled = 1;
+ if (phase_scaler == 0) {
+ scale_plane_2_to_1_phase_0(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, dst_w, dst_h);
+ scale_plane_2_to_1_phase_0(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h);
+ scale_plane_2_to_1_phase_0(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h);
+ } else if (filter_type == BILINEAR) {
+ const int16_t c0 = vp9_filter_kernels[BILINEAR][phase_scaler][3];
+ const int16_t c1 = vp9_filter_kernels[BILINEAR][phase_scaler][4];
+ scale_plane_2_to_1_bilinear(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, dst_w, dst_h, c0, c1);
+ scale_plane_2_to_1_bilinear(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h, c0, c1);
+ scale_plane_2_to_1_bilinear(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h, c0, c1);
+ } else {
+ const int buffer_stride = (dst_w + 3) & ~3;
+ const int buffer_height = (2 * dst_h + SUBPEL_TAPS - 2 + 7) & ~7;
+ uint8_t *const temp_buffer =
+ (uint8_t *)malloc(buffer_stride * buffer_height);
+ if (temp_buffer) {
+ scale_plane_2_to_1_general(
+ src->y_buffer, src->y_stride, dst->y_buffer, dst->y_stride, dst_w,
+ dst_h, vp9_filter_kernels[filter_type][phase_scaler], temp_buffer);
+ scale_plane_2_to_1_general(
+ src->u_buffer, src->uv_stride, dst->u_buffer, dst->uv_stride,
+ dst_uv_w, dst_uv_h, vp9_filter_kernels[filter_type][phase_scaler],
+ temp_buffer);
+ scale_plane_2_to_1_general(
+ src->v_buffer, src->uv_stride, dst->v_buffer, dst->uv_stride,
+ dst_uv_w, dst_uv_h, vp9_filter_kernels[filter_type][phase_scaler],
+ temp_buffer);
+ free(temp_buffer);
+ } else {
+ scaled = 0;
+ }
+ }
+ } else if (4 * dst_w == src_w && 4 * dst_h == src_h) {
+ // 4 to 1
+ scaled = 1;
+ if (phase_scaler == 0) {
+ scale_plane_4_to_1_phase_0(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, dst_w, dst_h);
+ scale_plane_4_to_1_phase_0(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h);
+ scale_plane_4_to_1_phase_0(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h);
+ } else if (filter_type == BILINEAR) {
+ const int16_t c0 = vp9_filter_kernels[BILINEAR][phase_scaler][3];
+ const int16_t c1 = vp9_filter_kernels[BILINEAR][phase_scaler][4];
+ scale_plane_4_to_1_bilinear(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, dst_w, dst_h, c0, c1);
+ scale_plane_4_to_1_bilinear(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h, c0, c1);
+ scale_plane_4_to_1_bilinear(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h, c0, c1);
+ } else {
+ const int buffer_stride = (dst_w + 1) & ~1;
+ const int buffer_height = (4 * dst_h + SUBPEL_TAPS - 2 + 7) & ~7;
+ uint8_t *const temp_buffer =
+ (uint8_t *)malloc(buffer_stride * buffer_height);
+ if (temp_buffer) {
+ scale_plane_4_to_1_general(
+ src->y_buffer, src->y_stride, dst->y_buffer, dst->y_stride, dst_w,
+ dst_h, vp9_filter_kernels[filter_type][phase_scaler], temp_buffer);
+ scale_plane_4_to_1_general(
+ src->u_buffer, src->uv_stride, dst->u_buffer, dst->uv_stride,
+ dst_uv_w, dst_uv_h, vp9_filter_kernels[filter_type][phase_scaler],
+ temp_buffer);
+ scale_plane_4_to_1_general(
+ src->v_buffer, src->uv_stride, dst->v_buffer, dst->uv_stride,
+ dst_uv_w, dst_uv_h, vp9_filter_kernels[filter_type][phase_scaler],
+ temp_buffer);
+ free(temp_buffer);
+ } else {
+ scaled = 0;
+ }
+ }
+ } else if (4 * dst_w == 3 * src_w && 4 * dst_h == 3 * src_h) {
+ // 4 to 3
+ const int buffer_stride = (dst_w + 5) - ((dst_w + 5) % 6) + 2;
+ const int buffer_height = (4 * dst_h / 3 + SUBPEL_TAPS - 1 + 7) & ~7;
+ uint8_t *const temp_buffer =
+ (uint8_t *)malloc(buffer_stride * buffer_height);
+ if (temp_buffer) {
+ scaled = 1;
+ if (filter_type == BILINEAR) {
+ scale_plane_4_to_3_bilinear(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, dst_w, dst_h, phase_scaler,
+ temp_buffer);
+ scale_plane_4_to_3_bilinear(src->u_buffer, src->uv_stride,
+ dst->u_buffer, dst->uv_stride, dst_uv_w,
+ dst_uv_h, phase_scaler, temp_buffer);
+ scale_plane_4_to_3_bilinear(src->v_buffer, src->uv_stride,
+ dst->v_buffer, dst->uv_stride, dst_uv_w,
+ dst_uv_h, phase_scaler, temp_buffer);
+ } else {
+ scale_plane_4_to_3_general(
+ src->y_buffer, src->y_stride, dst->y_buffer, dst->y_stride, dst_w,
+ dst_h, vp9_filter_kernels[filter_type], phase_scaler, temp_buffer);
+ scale_plane_4_to_3_general(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h,
+ vp9_filter_kernels[filter_type],
+ phase_scaler, temp_buffer);
+ scale_plane_4_to_3_general(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h,
+ vp9_filter_kernels[filter_type],
+ phase_scaler, temp_buffer);
+ }
+ free(temp_buffer);
+ }
+ }
+
+ if (scaled) {
+ vpx_extend_frame_borders(dst);
+ } else {
+ // Call c version for all other scaling ratios.
+ vp9_scale_and_extend_frame_c(src, dst, filter_type, phase_scaler);
+ }
+}
diff --git a/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_highbd_error_neon.c b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_highbd_error_neon.c
new file mode 100644
index 0000000000..d9b183472d
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_highbd_error_neon.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2023 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "./vp9_rtcd.h"
+#include "vpx_dsp/arm/mem_neon.h"
+#include "vpx_dsp/arm/sum_neon.h"
+
+int64_t vp9_highbd_block_error_neon(const tran_low_t *coeff,
+ const tran_low_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz, int bd) {
+ uint64x2_t err_u64 = vdupq_n_u64(0);
+ int64x2_t ssz_s64 = vdupq_n_s64(0);
+
+ const int shift = 2 * (bd - 8);
+ const int rounding = shift > 0 ? 1 << (shift - 1) : 0;
+
+ assert(block_size >= 16);
+ assert((block_size % 16) == 0);
+
+ do {
+ const int32x4_t c = load_tran_low_to_s32q(coeff);
+ const int32x4_t d = load_tran_low_to_s32q(dqcoeff);
+
+ const uint32x4_t diff = vreinterpretq_u32_s32(vabdq_s32(c, d));
+
+ err_u64 = vmlal_u32(err_u64, vget_low_u32(diff), vget_low_u32(diff));
+ err_u64 = vmlal_u32(err_u64, vget_high_u32(diff), vget_high_u32(diff));
+
+ ssz_s64 = vmlal_s32(ssz_s64, vget_low_s32(c), vget_low_s32(c));
+ ssz_s64 = vmlal_s32(ssz_s64, vget_high_s32(c), vget_high_s32(c));
+
+ coeff += 4;
+ dqcoeff += 4;
+ block_size -= 4;
+ } while (block_size != 0);
+
+ *ssz = (horizontal_add_int64x2(ssz_s64) + rounding) >> shift;
+ return ((int64_t)horizontal_add_uint64x2(err_u64) + rounding) >> shift;
+}
diff --git a/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_highbd_temporal_filter_neon.c b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_highbd_temporal_filter_neon.c
new file mode 100644
index 0000000000..c3aef3c865
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_highbd_temporal_filter_neon.c
@@ -0,0 +1,872 @@
+/*
+ * Copyright (c) 2023 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "./vp9_rtcd.h"
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/encoder/vp9_encoder.h"
+#include "vp9/encoder/vp9_temporal_filter.h"
+#include "vp9/encoder/vp9_temporal_filter_constants.h"
+
+// Compute (a-b)**2 for 8 pixels with size 16-bit
+static INLINE void highbd_store_dist_8(const uint16_t *a, const uint16_t *b,
+ uint32_t *dst) {
+ const uint16x8_t a_reg = vld1q_u16(a);
+ const uint16x8_t b_reg = vld1q_u16(b);
+
+ uint16x8_t dist = vabdq_u16(a_reg, b_reg);
+ uint32x4_t dist_first = vmull_u16(vget_low_u16(dist), vget_low_u16(dist));
+ uint32x4_t dist_second = vmull_u16(vget_high_u16(dist), vget_high_u16(dist));
+
+ vst1q_u32(dst, dist_first);
+ vst1q_u32(dst + 4, dist_second);
+}
+
+// Sum up three neighboring distortions for the pixels
+static INLINE void highbd_get_sum_4(const uint32_t *dist, uint32x4_t *sum) {
+ uint32x4_t dist_reg, dist_left, dist_right;
+
+ dist_reg = vld1q_u32(dist);
+ dist_left = vld1q_u32(dist - 1);
+ dist_right = vld1q_u32(dist + 1);
+
+ *sum = vaddq_u32(dist_reg, dist_left);
+ *sum = vaddq_u32(*sum, dist_right);
+}
+
+static INLINE void highbd_get_sum_8(const uint32_t *dist, uint32x4_t *sum_first,
+ uint32x4_t *sum_second) {
+ highbd_get_sum_4(dist, sum_first);
+ highbd_get_sum_4(dist + 4, sum_second);
+}
+
+// Average the value based on the number of values summed (9 for pixels away
+// from the border, 4 for pixels in corners, and 6 for other edge values, plus
+// however many values from y/uv plane are).
+//
+// Add in the rounding factor and shift, clamp to 16, invert and shift. Multiply
+// by weight.
+static INLINE void highbd_average_4(uint32x4_t *output, const uint32x4_t sum,
+ const uint32x4_t *mul_constants,
+ const int strength, const int rounding,
+ const int weight) {
+ const int64x2_t strength_s64 = vdupq_n_s64(-strength - 32);
+ const uint64x2_t rounding_u64 = vdupq_n_u64((uint64_t)rounding << 32);
+ const uint32x4_t weight_u32 = vdupq_n_u32(weight);
+ const uint32x4_t sixteen = vdupq_n_u32(16);
+ uint32x4_t sum2;
+
+ // modifier * 3 / index;
+ uint64x2_t sum_lo =
+ vmlal_u32(rounding_u64, vget_low_u32(sum), vget_low_u32(*mul_constants));
+ uint64x2_t sum_hi = vmlal_u32(rounding_u64, vget_high_u32(sum),
+ vget_high_u32(*mul_constants));
+
+ // we cannot use vshrn_n_u64 as strength is not known at compile time.
+ sum_lo = vshlq_u64(sum_lo, strength_s64);
+ sum_hi = vshlq_u64(sum_hi, strength_s64);
+
+ sum2 = vcombine_u32(vmovn_u64(sum_lo), vmovn_u64(sum_hi));
+
+ // Multiply with the weight
+ sum2 = vminq_u32(sum2, sixteen);
+ sum2 = vsubq_u32(sixteen, sum2);
+ *output = vmulq_u32(sum2, weight_u32);
+}
+
+static INLINE void highbd_average_8(uint32x4_t *output_0, uint32x4_t *output_1,
+ const uint32x4_t sum_0_u32,
+ const uint32x4_t sum_1_u32,
+ const uint32x4_t *mul_constants_0,
+ const uint32x4_t *mul_constants_1,
+ const int strength, const int rounding,
+ const int weight) {
+ highbd_average_4(output_0, sum_0_u32, mul_constants_0, strength, rounding,
+ weight);
+ highbd_average_4(output_1, sum_1_u32, mul_constants_1, strength, rounding,
+ weight);
+}
+
+// Add 'sum_u32' to 'count'. Multiply by 'pred' and add to 'accumulator.'
+static INLINE void highbd_accumulate_and_store_8(
+ const uint32x4_t sum_first_u32, const uint32x4_t sum_second_u32,
+ const uint16_t *pred, uint16_t *count, uint32_t *accumulator) {
+ const uint16x8_t sum_u16 =
+ vcombine_u16(vqmovn_u32(sum_first_u32), vqmovn_u32(sum_second_u32));
+ uint16x8_t pred_u16 = vld1q_u16(pred);
+ uint16x8_t count_u16 = vld1q_u16(count);
+ uint32x4_t pred_0_u32, pred_1_u32;
+ uint32x4_t accum_0_u32, accum_1_u32;
+
+ count_u16 = vqaddq_u16(count_u16, sum_u16);
+ vst1q_u16(count, count_u16);
+
+ accum_0_u32 = vld1q_u32(accumulator);
+ accum_1_u32 = vld1q_u32(accumulator + 4);
+
+ pred_0_u32 = vmovl_u16(vget_low_u16(pred_u16));
+ pred_1_u32 = vmovl_u16(vget_high_u16(pred_u16));
+
+ // Don't use sum_u16 as that produces different results to the C version
+ accum_0_u32 = vmlaq_u32(accum_0_u32, sum_first_u32, pred_0_u32);
+ accum_1_u32 = vmlaq_u32(accum_1_u32, sum_second_u32, pred_1_u32);
+
+ vst1q_u32(accumulator, accum_0_u32);
+ vst1q_u32(accumulator + 4, accum_1_u32);
+}
+
+static INLINE void highbd_read_dist_4(const uint32_t *dist,
+ uint32x4_t *dist_reg) {
+ *dist_reg = vld1q_u32(dist);
+}
+
+static INLINE void highbd_read_dist_8(const uint32_t *dist,
+ uint32x4_t *reg_first,
+ uint32x4_t *reg_second) {
+ highbd_read_dist_4(dist, reg_first);
+ highbd_read_dist_4(dist + 4, reg_second);
+}
+
+static INLINE void highbd_read_chroma_dist_row_8(
+ int ss_x, const uint32_t *u_dist, const uint32_t *v_dist,
+ uint32x4_t *u_first, uint32x4_t *u_second, uint32x4_t *v_first,
+ uint32x4_t *v_second) {
+ if (!ss_x) {
+ // If there is no chroma subsampling in the horizontal direction, then we
+ // need to load 8 entries from chroma.
+ highbd_read_dist_8(u_dist, u_first, u_second);
+ highbd_read_dist_8(v_dist, v_first, v_second);
+ } else { // ss_x == 1
+ // Otherwise, we only need to load 8 entries
+ uint32x4_t u_reg, v_reg;
+ uint32x4x2_t pair;
+
+ highbd_read_dist_4(u_dist, &u_reg);
+
+ pair = vzipq_u32(u_reg, u_reg);
+ *u_first = pair.val[0];
+ *u_second = pair.val[1];
+
+ highbd_read_dist_4(v_dist, &v_reg);
+
+ pair = vzipq_u32(v_reg, v_reg);
+ *v_first = pair.val[0];
+ *v_second = pair.val[1];
+ }
+}
+
+static void highbd_apply_temporal_filter_luma_8(
+ const uint16_t *y_pre, int y_pre_stride, unsigned int block_width,
+ unsigned int block_height, int ss_x, int ss_y, int strength,
+ int use_whole_blk, uint32_t *y_accum, uint16_t *y_count,
+ const uint32_t *y_dist, const uint32_t *u_dist, const uint32_t *v_dist,
+ const uint32_t *const *neighbors_first,
+ const uint32_t *const *neighbors_second, int top_weight,
+ int bottom_weight) {
+ const int rounding = (1 << strength) >> 1;
+ int weight = top_weight;
+
+ uint32x4_t mul_first, mul_second;
+
+ uint32x4_t sum_row_1_first, sum_row_1_second;
+ uint32x4_t sum_row_2_first, sum_row_2_second;
+ uint32x4_t sum_row_3_first, sum_row_3_second;
+
+ uint32x4_t u_first, u_second;
+ uint32x4_t v_first, v_second;
+
+ uint32x4_t sum_row_first;
+ uint32x4_t sum_row_second;
+
+ // Loop variables
+ unsigned int h;
+
+ assert(strength >= 4 && strength <= 14 &&
+ "invalid adjusted temporal filter strength");
+ assert(block_width == 8);
+
+ (void)block_width;
+
+ // First row
+ mul_first = vld1q_u32(neighbors_first[0]);
+ mul_second = vld1q_u32(neighbors_second[0]);
+
+ // Add luma values
+ highbd_get_sum_8(y_dist, &sum_row_2_first, &sum_row_2_second);
+ highbd_get_sum_8(y_dist + DIST_STRIDE, &sum_row_3_first, &sum_row_3_second);
+
+ // We don't need to saturate here because the maximum value is UINT12_MAX ** 2
+ // * 9 ~= 2**24 * 9 < 2 ** 28 < INT32_MAX
+ sum_row_first = vaddq_u32(sum_row_2_first, sum_row_3_first);
+ sum_row_second = vaddq_u32(sum_row_2_second, sum_row_3_second);
+
+ // Add chroma values
+ highbd_read_chroma_dist_row_8(ss_x, u_dist, v_dist, &u_first, &u_second,
+ &v_first, &v_second);
+
+ // Max value here is 2 ** 24 * (9 + 2), so no saturation is needed
+ sum_row_first = vaddq_u32(sum_row_first, u_first);
+ sum_row_second = vaddq_u32(sum_row_second, u_second);
+
+ sum_row_first = vaddq_u32(sum_row_first, v_first);
+ sum_row_second = vaddq_u32(sum_row_second, v_second);
+
+ // Get modifier and store result
+ highbd_average_8(&sum_row_first, &sum_row_second, sum_row_first,
+ sum_row_second, &mul_first, &mul_second, strength, rounding,
+ weight);
+
+ highbd_accumulate_and_store_8(sum_row_first, sum_row_second, y_pre, y_count,
+ y_accum);
+
+ y_pre += y_pre_stride;
+ y_count += y_pre_stride;
+ y_accum += y_pre_stride;
+ y_dist += DIST_STRIDE;
+
+ u_dist += DIST_STRIDE;
+ v_dist += DIST_STRIDE;
+
+ // Then all the rows except the last one
+ mul_first = vld1q_u32(neighbors_first[1]);
+ mul_second = vld1q_u32(neighbors_second[1]);
+
+ for (h = 1; h < block_height - 1; ++h) {
+ // Move the weight to bottom half
+ if (!use_whole_blk && h == block_height / 2) {
+ weight = bottom_weight;
+ }
+ // Shift the rows up
+ sum_row_1_first = sum_row_2_first;
+ sum_row_1_second = sum_row_2_second;
+ sum_row_2_first = sum_row_3_first;
+ sum_row_2_second = sum_row_3_second;
+
+ // Add luma values to the modifier
+ sum_row_first = vaddq_u32(sum_row_1_first, sum_row_2_first);
+ sum_row_second = vaddq_u32(sum_row_1_second, sum_row_2_second);
+
+ highbd_get_sum_8(y_dist + DIST_STRIDE, &sum_row_3_first, &sum_row_3_second);
+
+ sum_row_first = vaddq_u32(sum_row_first, sum_row_3_first);
+ sum_row_second = vaddq_u32(sum_row_second, sum_row_3_second);
+
+ // Add chroma values to the modifier
+ if (ss_y == 0 || h % 2 == 0) {
+ // Only calculate the new chroma distortion if we are at a pixel that
+ // corresponds to a new chroma row
+ highbd_read_chroma_dist_row_8(ss_x, u_dist, v_dist, &u_first, &u_second,
+ &v_first, &v_second);
+
+ u_dist += DIST_STRIDE;
+ v_dist += DIST_STRIDE;
+ }
+
+ sum_row_first = vaddq_u32(sum_row_first, u_first);
+ sum_row_second = vaddq_u32(sum_row_second, u_second);
+ sum_row_first = vaddq_u32(sum_row_first, v_first);
+ sum_row_second = vaddq_u32(sum_row_second, v_second);
+
+ // Get modifier and store result
+ highbd_average_8(&sum_row_first, &sum_row_second, sum_row_first,
+ sum_row_second, &mul_first, &mul_second, strength,
+ rounding, weight);
+ highbd_accumulate_and_store_8(sum_row_first, sum_row_second, y_pre, y_count,
+ y_accum);
+
+ y_pre += y_pre_stride;
+ y_count += y_pre_stride;
+ y_accum += y_pre_stride;
+ y_dist += DIST_STRIDE;
+ }
+
+ // The last row
+ mul_first = vld1q_u32(neighbors_first[0]);
+ mul_second = vld1q_u32(neighbors_second[0]);
+
+ // Shift the rows up
+ sum_row_1_first = sum_row_2_first;
+ sum_row_1_second = sum_row_2_second;
+ sum_row_2_first = sum_row_3_first;
+ sum_row_2_second = sum_row_3_second;
+
+ // Add luma values to the modifier
+ sum_row_first = vaddq_u32(sum_row_1_first, sum_row_2_first);
+ sum_row_second = vaddq_u32(sum_row_1_second, sum_row_2_second);
+
+ // Add chroma values to the modifier
+ if (ss_y == 0) {
+ // Only calculate the new chroma distortion if we are at a pixel that
+ // corresponds to a new chroma row
+ highbd_read_chroma_dist_row_8(ss_x, u_dist, v_dist, &u_first, &u_second,
+ &v_first, &v_second);
+ }
+
+ sum_row_first = vaddq_u32(sum_row_first, u_first);
+ sum_row_second = vaddq_u32(sum_row_second, u_second);
+ sum_row_first = vaddq_u32(sum_row_first, v_first);
+ sum_row_second = vaddq_u32(sum_row_second, v_second);
+
+ // Get modifier and store result
+ highbd_average_8(&sum_row_first, &sum_row_second, sum_row_first,
+ sum_row_second, &mul_first, &mul_second, strength, rounding,
+ weight);
+ highbd_accumulate_and_store_8(sum_row_first, sum_row_second, y_pre, y_count,
+ y_accum);
+}
+
+// Perform temporal filter for the luma component.
+static void highbd_apply_temporal_filter_luma(
+ const uint16_t *y_pre, int y_pre_stride, unsigned int block_width,
+ unsigned int block_height, int ss_x, int ss_y, int strength,
+ const int *blk_fw, int use_whole_blk, uint32_t *y_accum, uint16_t *y_count,
+ const uint32_t *y_dist, const uint32_t *u_dist, const uint32_t *v_dist) {
+ unsigned int blk_col = 0, uv_blk_col = 0;
+ const unsigned int blk_col_step = 8, uv_blk_col_step = 8 >> ss_x;
+ const unsigned int mid_width = block_width >> 1,
+ last_width = block_width - blk_col_step;
+ int top_weight = blk_fw[0],
+ bottom_weight = use_whole_blk ? blk_fw[0] : blk_fw[2];
+ const uint32_t *const *neighbors_first;
+ const uint32_t *const *neighbors_second;
+
+ // Left
+ neighbors_first = HIGHBD_LUMA_LEFT_COLUMN_NEIGHBORS;
+ neighbors_second = HIGHBD_LUMA_MIDDLE_COLUMN_NEIGHBORS;
+ highbd_apply_temporal_filter_luma_8(
+ y_pre + blk_col, y_pre_stride, blk_col_step, block_height, ss_x, ss_y,
+ strength, use_whole_blk, y_accum + blk_col, y_count + blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col,
+ neighbors_first, neighbors_second, top_weight, bottom_weight);
+
+ blk_col += blk_col_step;
+ uv_blk_col += uv_blk_col_step;
+
+ // Middle First
+ neighbors_first = HIGHBD_LUMA_MIDDLE_COLUMN_NEIGHBORS;
+ for (; blk_col < mid_width;
+ blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
+ highbd_apply_temporal_filter_luma_8(
+ y_pre + blk_col, y_pre_stride, blk_col_step, block_height, ss_x, ss_y,
+ strength, use_whole_blk, y_accum + blk_col, y_count + blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col,
+ neighbors_first, neighbors_second, top_weight, bottom_weight);
+ }
+
+ if (!use_whole_blk) {
+ top_weight = blk_fw[1];
+ bottom_weight = blk_fw[3];
+ }
+
+ // Middle Second
+ for (; blk_col < last_width;
+ blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
+ highbd_apply_temporal_filter_luma_8(
+ y_pre + blk_col, y_pre_stride, blk_col_step, block_height, ss_x, ss_y,
+ strength, use_whole_blk, y_accum + blk_col, y_count + blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col,
+ neighbors_first, neighbors_second, top_weight, bottom_weight);
+ }
+
+ // Right
+ neighbors_second = HIGHBD_LUMA_RIGHT_COLUMN_NEIGHBORS;
+ highbd_apply_temporal_filter_luma_8(
+ y_pre + blk_col, y_pre_stride, blk_col_step, block_height, ss_x, ss_y,
+ strength, use_whole_blk, y_accum + blk_col, y_count + blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col,
+ neighbors_first, neighbors_second, top_weight, bottom_weight);
+}
+
+// Add a row of luma distortion that corresponds to 8 chroma mods. If we are
+// subsampling in x direction, then we have 16 lumas, else we have 8.
+static INLINE void highbd_add_luma_dist_to_8_chroma_mod(
+ const uint32_t *y_dist, int ss_x, int ss_y, uint32x4_t *u_mod_fst,
+ uint32x4_t *u_mod_snd, uint32x4_t *v_mod_fst, uint32x4_t *v_mod_snd) {
+ uint32x4_t y_reg_fst, y_reg_snd;
+ if (!ss_x) {
+ highbd_read_dist_8(y_dist, &y_reg_fst, &y_reg_snd);
+ if (ss_y == 1) {
+ uint32x4_t y_tmp_fst, y_tmp_snd;
+ highbd_read_dist_8(y_dist + DIST_STRIDE, &y_tmp_fst, &y_tmp_snd);
+ y_reg_fst = vaddq_u32(y_reg_fst, y_tmp_fst);
+ y_reg_snd = vaddq_u32(y_reg_snd, y_tmp_snd);
+ }
+ } else {
+ // Temporary
+ uint32x4_t y_fst, y_snd;
+ uint64x2_t y_fst64, y_snd64;
+
+ // First 8
+ highbd_read_dist_8(y_dist, &y_fst, &y_snd);
+ if (ss_y == 1) {
+ uint32x4_t y_tmp_fst, y_tmp_snd;
+ highbd_read_dist_8(y_dist + DIST_STRIDE, &y_tmp_fst, &y_tmp_snd);
+
+ y_fst = vaddq_u32(y_fst, y_tmp_fst);
+ y_snd = vaddq_u32(y_snd, y_tmp_snd);
+ }
+
+ y_fst64 = vpaddlq_u32(y_fst);
+ y_snd64 = vpaddlq_u32(y_snd);
+ y_reg_fst = vcombine_u32(vqmovn_u64(y_fst64), vqmovn_u64(y_snd64));
+
+ // Second 8
+ highbd_read_dist_8(y_dist + 8, &y_fst, &y_snd);
+ if (ss_y == 1) {
+ uint32x4_t y_tmp_fst, y_tmp_snd;
+ highbd_read_dist_8(y_dist + 8 + DIST_STRIDE, &y_tmp_fst, &y_tmp_snd);
+
+ y_fst = vaddq_u32(y_fst, y_tmp_fst);
+ y_snd = vaddq_u32(y_snd, y_tmp_snd);
+ }
+
+ y_fst64 = vpaddlq_u32(y_fst);
+ y_snd64 = vpaddlq_u32(y_snd);
+ y_reg_snd = vcombine_u32(vqmovn_u64(y_fst64), vqmovn_u64(y_snd64));
+ }
+
+ *u_mod_fst = vaddq_u32(*u_mod_fst, y_reg_fst);
+ *u_mod_snd = vaddq_u32(*u_mod_snd, y_reg_snd);
+ *v_mod_fst = vaddq_u32(*v_mod_fst, y_reg_fst);
+ *v_mod_snd = vaddq_u32(*v_mod_snd, y_reg_snd);
+}
+
+// Apply temporal filter to the chroma components. This performs temporal
+// filtering on a chroma block of 8 X uv_height. If blk_fw is not NULL, use
+// blk_fw as an array of size 4 for the weights for each of the 4 subblocks,
+// else use top_weight for top half, and bottom weight for bottom half.
+static void highbd_apply_temporal_filter_chroma_8(
+ const uint16_t *u_pre, const uint16_t *v_pre, int uv_pre_stride,
+ unsigned int uv_block_width, unsigned int uv_block_height, int ss_x,
+ int ss_y, int strength, uint32_t *u_accum, uint16_t *u_count,
+ uint32_t *v_accum, uint16_t *v_count, const uint32_t *y_dist,
+ const uint32_t *u_dist, const uint32_t *v_dist,
+ const uint32_t *const *neighbors_fst, const uint32_t *const *neighbors_snd,
+ int top_weight, int bottom_weight, const int *blk_fw) {
+ const int rounding = (1 << strength) >> 1;
+ int weight = top_weight;
+
+ uint32x4_t mul_fst, mul_snd;
+
+ uint32x4_t u_sum_row_1_fst, u_sum_row_2_fst, u_sum_row_3_fst;
+ uint32x4_t v_sum_row_1_fst, v_sum_row_2_fst, v_sum_row_3_fst;
+ uint32x4_t u_sum_row_1_snd, u_sum_row_2_snd, u_sum_row_3_snd;
+ uint32x4_t v_sum_row_1_snd, v_sum_row_2_snd, v_sum_row_3_snd;
+
+ uint32x4_t u_sum_row_fst, v_sum_row_fst;
+ uint32x4_t u_sum_row_snd, v_sum_row_snd;
+
+ // Loop variable
+ unsigned int h;
+
+ (void)uv_block_width;
+
+ // First row
+ mul_fst = vld1q_u32(neighbors_fst[0]);
+ mul_snd = vld1q_u32(neighbors_snd[0]);
+
+ // Add chroma values
+ highbd_get_sum_8(u_dist, &u_sum_row_2_fst, &u_sum_row_2_snd);
+ highbd_get_sum_8(u_dist + DIST_STRIDE, &u_sum_row_3_fst, &u_sum_row_3_snd);
+
+ u_sum_row_fst = vaddq_u32(u_sum_row_2_fst, u_sum_row_3_fst);
+ u_sum_row_snd = vaddq_u32(u_sum_row_2_snd, u_sum_row_3_snd);
+
+ highbd_get_sum_8(v_dist, &v_sum_row_2_fst, &v_sum_row_2_snd);
+ highbd_get_sum_8(v_dist + DIST_STRIDE, &v_sum_row_3_fst, &v_sum_row_3_snd);
+
+ v_sum_row_fst = vaddq_u32(v_sum_row_2_fst, v_sum_row_3_fst);
+ v_sum_row_snd = vaddq_u32(v_sum_row_2_snd, v_sum_row_3_snd);
+
+ // Add luma values
+ highbd_add_luma_dist_to_8_chroma_mod(y_dist, ss_x, ss_y, &u_sum_row_fst,
+ &u_sum_row_snd, &v_sum_row_fst,
+ &v_sum_row_snd);
+
+ // Get modifier and store result
+ if (blk_fw) {
+ highbd_average_4(&u_sum_row_fst, u_sum_row_fst, &mul_fst, strength,
+ rounding, blk_fw[0]);
+ highbd_average_4(&u_sum_row_snd, u_sum_row_snd, &mul_snd, strength,
+ rounding, blk_fw[1]);
+
+ highbd_average_4(&v_sum_row_fst, v_sum_row_fst, &mul_fst, strength,
+ rounding, blk_fw[0]);
+ highbd_average_4(&v_sum_row_snd, v_sum_row_snd, &mul_snd, strength,
+ rounding, blk_fw[1]);
+
+ } else {
+ highbd_average_8(&u_sum_row_fst, &u_sum_row_snd, u_sum_row_fst,
+ u_sum_row_snd, &mul_fst, &mul_snd, strength, rounding,
+ weight);
+ highbd_average_8(&v_sum_row_fst, &v_sum_row_snd, v_sum_row_fst,
+ v_sum_row_snd, &mul_fst, &mul_snd, strength, rounding,
+ weight);
+ }
+ highbd_accumulate_and_store_8(u_sum_row_fst, u_sum_row_snd, u_pre, u_count,
+ u_accum);
+ highbd_accumulate_and_store_8(v_sum_row_fst, v_sum_row_snd, v_pre, v_count,
+ v_accum);
+
+ u_pre += uv_pre_stride;
+ u_dist += DIST_STRIDE;
+ v_pre += uv_pre_stride;
+ v_dist += DIST_STRIDE;
+ u_count += uv_pre_stride;
+ u_accum += uv_pre_stride;
+ v_count += uv_pre_stride;
+ v_accum += uv_pre_stride;
+
+ y_dist += DIST_STRIDE * (1 + ss_y);
+
+ // Then all the rows except the last one
+ mul_fst = vld1q_u32(neighbors_fst[1]);
+ mul_snd = vld1q_u32(neighbors_snd[1]);
+
+ for (h = 1; h < uv_block_height - 1; ++h) {
+ // Move the weight pointer to the bottom half of the blocks
+ if (h == uv_block_height / 2) {
+ if (blk_fw) {
+ blk_fw += 2;
+ } else {
+ weight = bottom_weight;
+ }
+ }
+
+ // Shift the rows up
+ u_sum_row_1_fst = u_sum_row_2_fst;
+ u_sum_row_2_fst = u_sum_row_3_fst;
+ u_sum_row_1_snd = u_sum_row_2_snd;
+ u_sum_row_2_snd = u_sum_row_3_snd;
+
+ v_sum_row_1_fst = v_sum_row_2_fst;
+ v_sum_row_2_fst = v_sum_row_3_fst;
+ v_sum_row_1_snd = v_sum_row_2_snd;
+ v_sum_row_2_snd = v_sum_row_3_snd;
+
+ // Add chroma values
+ u_sum_row_fst = vaddq_u32(u_sum_row_1_fst, u_sum_row_2_fst);
+ u_sum_row_snd = vaddq_u32(u_sum_row_1_snd, u_sum_row_2_snd);
+ highbd_get_sum_8(u_dist + DIST_STRIDE, &u_sum_row_3_fst, &u_sum_row_3_snd);
+ u_sum_row_fst = vaddq_u32(u_sum_row_fst, u_sum_row_3_fst);
+ u_sum_row_snd = vaddq_u32(u_sum_row_snd, u_sum_row_3_snd);
+
+ v_sum_row_fst = vaddq_u32(v_sum_row_1_fst, v_sum_row_2_fst);
+ v_sum_row_snd = vaddq_u32(v_sum_row_1_snd, v_sum_row_2_snd);
+ highbd_get_sum_8(v_dist + DIST_STRIDE, &v_sum_row_3_fst, &v_sum_row_3_snd);
+ v_sum_row_fst = vaddq_u32(v_sum_row_fst, v_sum_row_3_fst);
+ v_sum_row_snd = vaddq_u32(v_sum_row_snd, v_sum_row_3_snd);
+
+ // Add luma values
+ highbd_add_luma_dist_to_8_chroma_mod(y_dist, ss_x, ss_y, &u_sum_row_fst,
+ &u_sum_row_snd, &v_sum_row_fst,
+ &v_sum_row_snd);
+
+ // Get modifier and store result
+ if (blk_fw) {
+ highbd_average_4(&u_sum_row_fst, u_sum_row_fst, &mul_fst, strength,
+ rounding, blk_fw[0]);
+ highbd_average_4(&u_sum_row_snd, u_sum_row_snd, &mul_snd, strength,
+ rounding, blk_fw[1]);
+
+ highbd_average_4(&v_sum_row_fst, v_sum_row_fst, &mul_fst, strength,
+ rounding, blk_fw[0]);
+ highbd_average_4(&v_sum_row_snd, v_sum_row_snd, &mul_snd, strength,
+ rounding, blk_fw[1]);
+
+ } else {
+ highbd_average_8(&u_sum_row_fst, &u_sum_row_snd, u_sum_row_fst,
+ u_sum_row_snd, &mul_fst, &mul_snd, strength, rounding,
+ weight);
+ highbd_average_8(&v_sum_row_fst, &v_sum_row_snd, v_sum_row_fst,
+ v_sum_row_snd, &mul_fst, &mul_snd, strength, rounding,
+ weight);
+ }
+
+ highbd_accumulate_and_store_8(u_sum_row_fst, u_sum_row_snd, u_pre, u_count,
+ u_accum);
+ highbd_accumulate_and_store_8(v_sum_row_fst, v_sum_row_snd, v_pre, v_count,
+ v_accum);
+
+ u_pre += uv_pre_stride;
+ u_dist += DIST_STRIDE;
+ v_pre += uv_pre_stride;
+ v_dist += DIST_STRIDE;
+ u_count += uv_pre_stride;
+ u_accum += uv_pre_stride;
+ v_count += uv_pre_stride;
+ v_accum += uv_pre_stride;
+
+ y_dist += DIST_STRIDE * (1 + ss_y);
+ }
+
+ // The last row
+ mul_fst = vld1q_u32(neighbors_fst[0]);
+ mul_snd = vld1q_u32(neighbors_snd[0]);
+
+ // Shift the rows up
+ u_sum_row_1_fst = u_sum_row_2_fst;
+ u_sum_row_2_fst = u_sum_row_3_fst;
+ u_sum_row_1_snd = u_sum_row_2_snd;
+ u_sum_row_2_snd = u_sum_row_3_snd;
+
+ v_sum_row_1_fst = v_sum_row_2_fst;
+ v_sum_row_2_fst = v_sum_row_3_fst;
+ v_sum_row_1_snd = v_sum_row_2_snd;
+ v_sum_row_2_snd = v_sum_row_3_snd;
+
+ // Add chroma values
+ u_sum_row_fst = vaddq_u32(u_sum_row_1_fst, u_sum_row_2_fst);
+ v_sum_row_fst = vaddq_u32(v_sum_row_1_fst, v_sum_row_2_fst);
+ u_sum_row_snd = vaddq_u32(u_sum_row_1_snd, u_sum_row_2_snd);
+ v_sum_row_snd = vaddq_u32(v_sum_row_1_snd, v_sum_row_2_snd);
+
+ // Add luma values
+ highbd_add_luma_dist_to_8_chroma_mod(y_dist, ss_x, ss_y, &u_sum_row_fst,
+ &u_sum_row_snd, &v_sum_row_fst,
+ &v_sum_row_snd);
+
+ // Get modifier and store result
+ if (blk_fw) {
+ highbd_average_4(&u_sum_row_fst, u_sum_row_fst, &mul_fst, strength,
+ rounding, blk_fw[0]);
+ highbd_average_4(&u_sum_row_snd, u_sum_row_snd, &mul_snd, strength,
+ rounding, blk_fw[1]);
+
+ highbd_average_4(&v_sum_row_fst, v_sum_row_fst, &mul_fst, strength,
+ rounding, blk_fw[0]);
+ highbd_average_4(&v_sum_row_snd, v_sum_row_snd, &mul_snd, strength,
+ rounding, blk_fw[1]);
+
+ } else {
+ highbd_average_8(&u_sum_row_fst, &u_sum_row_snd, u_sum_row_fst,
+ u_sum_row_snd, &mul_fst, &mul_snd, strength, rounding,
+ weight);
+ highbd_average_8(&v_sum_row_fst, &v_sum_row_snd, v_sum_row_fst,
+ v_sum_row_snd, &mul_fst, &mul_snd, strength, rounding,
+ weight);
+ }
+
+ highbd_accumulate_and_store_8(u_sum_row_fst, u_sum_row_snd, u_pre, u_count,
+ u_accum);
+ highbd_accumulate_and_store_8(v_sum_row_fst, v_sum_row_snd, v_pre, v_count,
+ v_accum);
+}
+
+// Perform temporal filter for the chroma components.
+static void highbd_apply_temporal_filter_chroma(
+ const uint16_t *u_pre, const uint16_t *v_pre, int uv_pre_stride,
+ unsigned int block_width, unsigned int block_height, int ss_x, int ss_y,
+ int strength, const int *blk_fw, int use_whole_blk, uint32_t *u_accum,
+ uint16_t *u_count, uint32_t *v_accum, uint16_t *v_count,
+ const uint32_t *y_dist, const uint32_t *u_dist, const uint32_t *v_dist) {
+ const unsigned int uv_width = block_width >> ss_x,
+ uv_height = block_height >> ss_y;
+
+ unsigned int blk_col = 0, uv_blk_col = 0;
+ const unsigned int uv_blk_col_step = 8, blk_col_step = 8 << ss_x;
+ const unsigned int uv_mid_width = uv_width >> 1,
+ uv_last_width = uv_width - uv_blk_col_step;
+ int top_weight = blk_fw[0],
+ bottom_weight = use_whole_blk ? blk_fw[0] : blk_fw[2];
+ const uint32_t *const *neighbors_fst;
+ const uint32_t *const *neighbors_snd;
+
+ if (uv_width == 8) {
+ // Special Case: We are subsampling in x direction on a 16x16 block. Since
+ // we are operating on a row of 8 chroma pixels, we can't use the usual
+ // left-middle-right pattern.
+ assert(ss_x);
+
+ if (ss_y) {
+ neighbors_fst = HIGHBD_CHROMA_DOUBLE_SS_LEFT_COLUMN_NEIGHBORS;
+ neighbors_snd = HIGHBD_CHROMA_DOUBLE_SS_RIGHT_COLUMN_NEIGHBORS;
+ } else {
+ neighbors_fst = HIGHBD_CHROMA_SINGLE_SS_LEFT_COLUMN_NEIGHBORS;
+ neighbors_snd = HIGHBD_CHROMA_SINGLE_SS_RIGHT_COLUMN_NEIGHBORS;
+ }
+
+ if (use_whole_blk) {
+ highbd_apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
+ uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
+ u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col,
+ neighbors_fst, neighbors_snd, top_weight, bottom_weight, NULL);
+ } else {
+ highbd_apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
+ uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
+ u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col,
+ neighbors_fst, neighbors_snd, 0, 0, blk_fw);
+ }
+
+ return;
+ }
+
+ // Left
+ if (ss_x && ss_y) {
+ neighbors_fst = HIGHBD_CHROMA_DOUBLE_SS_LEFT_COLUMN_NEIGHBORS;
+ neighbors_snd = HIGHBD_CHROMA_DOUBLE_SS_MIDDLE_COLUMN_NEIGHBORS;
+ } else if (ss_x || ss_y) {
+ neighbors_fst = HIGHBD_CHROMA_SINGLE_SS_LEFT_COLUMN_NEIGHBORS;
+ neighbors_snd = HIGHBD_CHROMA_SINGLE_SS_MIDDLE_COLUMN_NEIGHBORS;
+ } else {
+ neighbors_fst = HIGHBD_CHROMA_NO_SS_LEFT_COLUMN_NEIGHBORS;
+ neighbors_snd = HIGHBD_CHROMA_NO_SS_MIDDLE_COLUMN_NEIGHBORS;
+ }
+
+ highbd_apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
+ uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
+ u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_fst,
+ neighbors_snd, top_weight, bottom_weight, NULL);
+
+ blk_col += blk_col_step;
+ uv_blk_col += uv_blk_col_step;
+
+ // Middle First
+ if (ss_x && ss_y) {
+ neighbors_fst = HIGHBD_CHROMA_DOUBLE_SS_MIDDLE_COLUMN_NEIGHBORS;
+ } else if (ss_x || ss_y) {
+ neighbors_fst = HIGHBD_CHROMA_SINGLE_SS_MIDDLE_COLUMN_NEIGHBORS;
+ } else {
+ neighbors_fst = HIGHBD_CHROMA_NO_SS_MIDDLE_COLUMN_NEIGHBORS;
+ }
+
+ for (; uv_blk_col < uv_mid_width;
+ blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
+ highbd_apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
+ uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
+ u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col,
+ neighbors_fst, neighbors_snd, top_weight, bottom_weight, NULL);
+ }
+
+ if (!use_whole_blk) {
+ top_weight = blk_fw[1];
+ bottom_weight = blk_fw[3];
+ }
+
+ // Middle Second
+ for (; uv_blk_col < uv_last_width;
+ blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
+ highbd_apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
+ uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
+ u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col,
+ neighbors_fst, neighbors_snd, top_weight, bottom_weight, NULL);
+ }
+
+ // Right
+ if (ss_x && ss_y) {
+ neighbors_snd = HIGHBD_CHROMA_DOUBLE_SS_RIGHT_COLUMN_NEIGHBORS;
+ } else if (ss_x || ss_y) {
+ neighbors_snd = HIGHBD_CHROMA_SINGLE_SS_RIGHT_COLUMN_NEIGHBORS;
+ } else {
+ neighbors_snd = HIGHBD_CHROMA_NO_SS_RIGHT_COLUMN_NEIGHBORS;
+ }
+
+ highbd_apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
+ uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
+ u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
+ y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_fst,
+ neighbors_snd, top_weight, bottom_weight, NULL);
+}
+
+void vp9_highbd_apply_temporal_filter_neon(
+ const uint16_t *y_src, int y_src_stride, const uint16_t *y_pre,
+ int y_pre_stride, const uint16_t *u_src, const uint16_t *v_src,
+ int uv_src_stride, const uint16_t *u_pre, const uint16_t *v_pre,
+ int uv_pre_stride, unsigned int block_width, unsigned int block_height,
+ int ss_x, int ss_y, int strength, const int *const blk_fw,
+ int use_whole_blk, uint32_t *y_accum, uint16_t *y_count, uint32_t *u_accum,
+ uint16_t *u_count, uint32_t *v_accum, uint16_t *v_count) {
+ const unsigned int chroma_height = block_height >> ss_y,
+ chroma_width = block_width >> ss_x;
+
+ DECLARE_ALIGNED(16, uint32_t, y_dist[BH * DIST_STRIDE]) = { 0 };
+ DECLARE_ALIGNED(16, uint32_t, u_dist[BH * DIST_STRIDE]) = { 0 };
+ DECLARE_ALIGNED(16, uint32_t, v_dist[BH * DIST_STRIDE]) = { 0 };
+
+ uint32_t *y_dist_ptr = y_dist + 1, *u_dist_ptr = u_dist + 1,
+ *v_dist_ptr = v_dist + 1;
+ const uint16_t *y_src_ptr = y_src, *u_src_ptr = u_src, *v_src_ptr = v_src;
+ const uint16_t *y_pre_ptr = y_pre, *u_pre_ptr = u_pre, *v_pre_ptr = v_pre;
+
+ // Loop variables
+ unsigned int row, blk_col;
+
+ assert(block_width <= BW && "block width too large");
+ assert(block_height <= BH && "block height too large");
+ assert(block_width % 16 == 0 && "block width must be multiple of 16");
+ assert(block_height % 2 == 0 && "block height must be even");
+ assert((ss_x == 0 || ss_x == 1) && (ss_y == 0 || ss_y == 1) &&
+ "invalid chroma subsampling");
+ assert(strength >= 4 && strength <= 14 &&
+ "invalid adjusted temporal filter strength");
+ assert(blk_fw[0] >= 0 && "filter weight must be positive");
+ assert(
+ (use_whole_blk || (blk_fw[1] >= 0 && blk_fw[2] >= 0 && blk_fw[3] >= 0)) &&
+ "subblock filter weight must be positive");
+ assert(blk_fw[0] <= 2 && "subblock filter weight must be less than 2");
+ assert(
+ (use_whole_blk || (blk_fw[1] <= 2 && blk_fw[2] <= 2 && blk_fw[3] <= 2)) &&
+ "subblock filter weight must be less than 2");
+
+ // Precompute the difference squared
+ for (row = 0; row < block_height; row++) {
+ for (blk_col = 0; blk_col < block_width; blk_col += 8) {
+ highbd_store_dist_8(y_src_ptr + blk_col, y_pre_ptr + blk_col,
+ y_dist_ptr + blk_col);
+ }
+ y_src_ptr += y_src_stride;
+ y_pre_ptr += y_pre_stride;
+ y_dist_ptr += DIST_STRIDE;
+ }
+
+ for (row = 0; row < chroma_height; row++) {
+ for (blk_col = 0; blk_col < chroma_width; blk_col += 8) {
+ highbd_store_dist_8(u_src_ptr + blk_col, u_pre_ptr + blk_col,
+ u_dist_ptr + blk_col);
+ highbd_store_dist_8(v_src_ptr + blk_col, v_pre_ptr + blk_col,
+ v_dist_ptr + blk_col);
+ }
+
+ u_src_ptr += uv_src_stride;
+ u_pre_ptr += uv_pre_stride;
+ u_dist_ptr += DIST_STRIDE;
+ v_src_ptr += uv_src_stride;
+ v_pre_ptr += uv_pre_stride;
+ v_dist_ptr += DIST_STRIDE;
+ }
+
+ y_dist_ptr = y_dist + 1;
+ u_dist_ptr = u_dist + 1;
+ v_dist_ptr = v_dist + 1;
+
+ highbd_apply_temporal_filter_luma(y_pre, y_pre_stride, block_width,
+ block_height, ss_x, ss_y, strength, blk_fw,
+ use_whole_blk, y_accum, y_count, y_dist_ptr,
+ u_dist_ptr, v_dist_ptr);
+
+ highbd_apply_temporal_filter_chroma(
+ u_pre, v_pre, uv_pre_stride, block_width, block_height, ss_x, ss_y,
+ strength, blk_fw, use_whole_blk, u_accum, u_count, v_accum, v_count,
+ y_dist_ptr, u_dist_ptr, v_dist_ptr);
+}
diff --git a/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c
new file mode 100644
index 0000000000..97ab13628e
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+#include <math.h>
+
+#include "./vpx_config.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_seg_common.h"
+
+#include "vp9/encoder/vp9_encoder.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/encoder/vp9_rd.h"
+
+#include "vpx_dsp/arm/idct_neon.h"
+#include "vpx_dsp/arm/mem_neon.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+
+static VPX_FORCE_INLINE void calculate_dqcoeff_and_store(
+ const int16x8_t qcoeff, const int16x8_t dequant, tran_low_t *dqcoeff) {
+ const int32x4_t dqcoeff_0 =
+ vmull_s16(vget_low_s16(qcoeff), vget_low_s16(dequant));
+ const int32x4_t dqcoeff_1 =
+ vmull_s16(vget_high_s16(qcoeff), vget_high_s16(dequant));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+ vst1q_s32(dqcoeff, dqcoeff_0);
+ vst1q_s32(dqcoeff + 4, dqcoeff_1);
+#else
+ vst1q_s16(dqcoeff, vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+}
+
+static VPX_FORCE_INLINE int16x8_t get_max_lane_eob(const int16_t *iscan_ptr,
+ int16x8_t v_eobmax,
+ uint16x8_t v_nz_mask) {
+ const int16x8_t v_iscan = vld1q_s16(&iscan_ptr[0]);
+ const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, vdupq_n_s16(0), v_iscan);
+ return vmaxq_s16(v_eobmax, v_nz_iscan);
+}
+
+static VPX_FORCE_INLINE uint16_t get_max_eob(int16x8_t v_eobmax) {
+#if VPX_ARCH_AARCH64
+ return (uint16_t)vmaxvq_s16(v_eobmax);
+#else
+ const int16x4_t v_eobmax_3210 =
+ vmax_s16(vget_low_s16(v_eobmax), vget_high_s16(v_eobmax));
+ const int64x1_t v_eobmax_xx32 =
+ vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
+ const int16x4_t v_eobmax_tmp =
+ vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
+ const int64x1_t v_eobmax_xxx3 =
+ vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
+ const int16x4_t v_eobmax_final =
+ vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));
+
+ return (uint16_t)vget_lane_s16(v_eobmax_final, 0);
+#endif // VPX_ARCH_AARCH64
+}
+
+static VPX_FORCE_INLINE void load_fp_values(const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *dequant_ptr,
+ int16x8_t *round, int16x8_t *quant,
+ int16x8_t *dequant) {
+ *round = vld1q_s16(round_ptr);
+ *quant = vld1q_s16(quant_ptr);
+ *dequant = vld1q_s16(dequant_ptr);
+}
+
+static VPX_FORCE_INLINE void update_fp_values(int16x8_t *v_round,
+ int16x8_t *v_quant,
+ int16x8_t *v_dequant) {
+#if VPX_ARCH_AARCH64
+ *v_round = vdupq_laneq_s16(*v_round, 1);
+ *v_quant = vdupq_laneq_s16(*v_quant, 1);
+ *v_dequant = vdupq_laneq_s16(*v_dequant, 1);
+#else
+ *v_round = vdupq_lane_s16(vget_low_s16(*v_round), 1);
+ *v_quant = vdupq_lane_s16(vget_low_s16(*v_quant), 1);
+ *v_dequant = vdupq_lane_s16(vget_low_s16(*v_dequant), 1);
+#endif
+}
+
+static VPX_FORCE_INLINE void quantize_fp_8(
+ const int16x8_t *v_round, const int16x8_t *v_quant,
+ const int16x8_t *v_dequant, const tran_low_t *coeff_ptr,
+ const int16_t *iscan_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ int16x8_t *v_eobmax) {
+ const int16x8_t v_zero = vdupq_n_s16(0);
+ const int16x8_t v_coeff = load_tran_low_to_s16q(coeff_ptr);
+ const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+ const int16x8_t v_abs = vabsq_s16(v_coeff);
+ const int16x8_t v_tmp = vqaddq_s16(v_abs, *v_round);
+ const int32x4_t v_tmp_lo =
+ vmull_s16(vget_low_s16(v_tmp), vget_low_s16(*v_quant));
+ const int32x4_t v_tmp_hi =
+ vmull_s16(vget_high_s16(v_tmp), vget_high_s16(*v_quant));
+ const int16x8_t v_tmp2 =
+ vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
+ const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
+ const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign);
+ const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign);
+ calculate_dqcoeff_and_store(v_qcoeff, *v_dequant, dqcoeff_ptr);
+ store_s16q_to_tran_low(qcoeff_ptr, v_qcoeff);
+
+ *v_eobmax = get_max_lane_eob(iscan_ptr, *v_eobmax, v_nz_mask);
+}
+
+void vp9_quantize_fp_neon(const tran_low_t *coeff_ptr, intptr_t count,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
+ // Quantization pass: All coefficients with index >= zero_flag are
+ // skippable. Note: zero_flag can be zero.
+ int i;
+ int16x8_t v_eobmax = vdupq_n_s16(-1);
+ int16x8_t v_round, v_quant, v_dequant;
+ (void)scan;
+
+ load_fp_values(round_ptr, quant_ptr, dequant_ptr, &v_round, &v_quant,
+ &v_dequant);
+ // process dc and the first seven ac coeffs
+ quantize_fp_8(&v_round, &v_quant, &v_dequant, coeff_ptr, iscan, qcoeff_ptr,
+ dqcoeff_ptr, &v_eobmax);
+
+ // now process the rest of the ac coeffs
+ update_fp_values(&v_round, &v_quant, &v_dequant);
+ for (i = 8; i < count; i += 8) {
+ quantize_fp_8(&v_round, &v_quant, &v_dequant, coeff_ptr + i, iscan + i,
+ qcoeff_ptr + i, dqcoeff_ptr + i, &v_eobmax);
+ }
+
+ *eob_ptr = get_max_eob(v_eobmax);
+}
+
+static INLINE int32x4_t extract_sign_bit(int32x4_t a) {
+ return vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_s32(a), 31));
+}
+
+static VPX_FORCE_INLINE void quantize_fp_32x32_8(
+ const int16x8_t *v_round, const int16x8_t *v_quant,
+ const int16x8_t *v_dequant, const int16x8_t *dequant_thresh,
+ const tran_low_t *coeff_ptr, const int16_t *iscan_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, int16x8_t *v_eobmax) {
+ const int16x8_t v_coeff = load_tran_low_to_s16q(coeff_ptr);
+ const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+ const int16x8_t v_coeff_abs = vabsq_s16(v_coeff);
+ const int16x8_t v_thr_mask =
+ vreinterpretq_s16_u16(vcgeq_s16(v_coeff_abs, *dequant_thresh));
+ const int16x8_t v_tmp_rnd =
+ vandq_s16(vqaddq_s16(v_coeff_abs, *v_round), v_thr_mask);
+ const int16x8_t v_abs_qcoeff = vqdmulhq_s16(v_tmp_rnd, *v_quant);
+ const int16x8_t v_qcoeff =
+ vsubq_s16(veorq_s16(v_abs_qcoeff, v_coeff_sign), v_coeff_sign);
+ const uint16x8_t v_nz_mask = vceqq_s16(v_abs_qcoeff, vdupq_n_s16(0));
+
+ int32x4_t dqcoeff_0, dqcoeff_1;
+ dqcoeff_0 = vmull_s16(vget_low_s16(v_qcoeff), vget_low_s16(*v_dequant));
+ dqcoeff_1 = vmull_s16(vget_high_s16(v_qcoeff), vget_high_s16(*v_dequant));
+ // Add 1 if negative to round towards zero because the C uses division.
+ dqcoeff_0 = vaddq_s32(dqcoeff_0, extract_sign_bit(dqcoeff_0));
+ dqcoeff_1 = vaddq_s32(dqcoeff_1, extract_sign_bit(dqcoeff_1));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+ vst1q_s32(dqcoeff_ptr, vshrq_n_s32(dqcoeff_0, 1));
+ vst1q_s32(dqcoeff_ptr + 4, vshrq_n_s32(dqcoeff_1, 1));
+#else
+ store_s16q_to_tran_low(dqcoeff_ptr, vcombine_s16(vshrn_n_s32(dqcoeff_0, 1),
+ vshrn_n_s32(dqcoeff_1, 1)));
+#endif
+
+ store_s16q_to_tran_low(qcoeff_ptr, v_qcoeff);
+
+ *v_eobmax = get_max_lane_eob(iscan_ptr, *v_eobmax, v_nz_mask);
+}
+
+void vp9_quantize_fp_32x32_neon(const tran_low_t *coeff_ptr, intptr_t count,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
+ int16x8_t eob_max = vdupq_n_s16(-1);
+ // ROUND_POWER_OF_TWO(round_ptr[], 1)
+ int16x8_t round = vrshrq_n_s16(vld1q_s16(round_ptr), 1);
+ int16x8_t quant = vld1q_s16(quant_ptr);
+ int16x8_t dequant = vld1q_s16(dequant_ptr);
+ // dequant >> 2 is used similar to zbin as a threshold.
+ int16x8_t dequant_thresh = vshrq_n_s16(vld1q_s16(dequant_ptr), 2);
+ int i;
+
+ (void)scan;
+ (void)count;
+
+ // Process dc and the first seven ac coeffs.
+ quantize_fp_32x32_8(&round, &quant, &dequant, &dequant_thresh, coeff_ptr,
+ iscan, qcoeff_ptr, dqcoeff_ptr, &eob_max);
+
+ update_fp_values(&round, &quant, &dequant);
+ dequant_thresh = vdupq_lane_s16(vget_low_s16(dequant_thresh), 1);
+
+ iscan += 8;
+ coeff_ptr += 8;
+ qcoeff_ptr += 8;
+ dqcoeff_ptr += 8;
+
+ // Process the rest of the ac coeffs.
+ for (i = 8; i < 32 * 32; i += 8) {
+ quantize_fp_32x32_8(&round, &quant, &dequant, &dequant_thresh, coeff_ptr,
+ iscan, qcoeff_ptr, dqcoeff_ptr, &eob_max);
+
+ iscan += 8;
+ coeff_ptr += 8;
+ qcoeff_ptr += 8;
+ dqcoeff_ptr += 8;
+ }
+
+ *eob_ptr = get_max_eob(eob_max);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static VPX_FORCE_INLINE uint16x4_t
+highbd_quantize_fp_4(const tran_low_t *coeff_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, int32x4_t v_quant_s32,
+ int32x4_t v_dequant_s32, int32x4_t v_round_s32) {
+ const int32x4_t v_coeff = vld1q_s32(coeff_ptr);
+ const int32x4_t v_coeff_sign =
+ vreinterpretq_s32_u32(vcltq_s32(v_coeff, vdupq_n_s32(0)));
+ const int32x4_t v_abs_coeff = vabsq_s32(v_coeff);
+ const int32x4_t v_tmp = vaddq_s32(v_abs_coeff, v_round_s32);
+ // const int abs_qcoeff = (int)((tmp * quant) >> 16);
+ const int32x4_t v_abs_qcoeff = vqdmulhq_s32(v_tmp, v_quant_s32);
+ // qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
+ const int32x4_t v_qcoeff =
+ vsubq_s32(veorq_s32(v_abs_qcoeff, v_coeff_sign), v_coeff_sign);
+ const int32x4_t v_abs_dqcoeff = vmulq_s32(v_abs_qcoeff, v_dequant_s32);
+ // dqcoeff_ptr[rc] = (tran_low_t)((abs_dqcoeff ^ coeff_sign) - coeff_sign);
+ const int32x4_t v_dqcoeff =
+ vsubq_s32(veorq_s32(v_abs_dqcoeff, v_coeff_sign), v_coeff_sign);
+
+ vst1q_s32(qcoeff_ptr, v_qcoeff);
+ vst1q_s32(dqcoeff_ptr, v_dqcoeff);
+
+ // Packed nz_qcoeff_mask. Used to find eob.
+ return vmovn_u32(vceqq_s32(v_abs_qcoeff, vdupq_n_s32(0)));
+}
+
+void vp9_highbd_quantize_fp_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
+ const int16x4_t v_zero = vdup_n_s16(0);
+ const int16x4_t v_quant = vld1_s16(quant_ptr);
+ const int16x4_t v_dequant = vld1_s16(dequant_ptr);
+ const int16x4_t v_round = vld1_s16(round_ptr);
+ int32x4_t v_round_s32 = vaddl_s16(v_round, v_zero);
+ int32x4_t v_quant_s32 = vshlq_n_s32(vaddl_s16(v_quant, v_zero), 15);
+ int32x4_t v_dequant_s32 = vaddl_s16(v_dequant, v_zero);
+ uint16x4_t v_mask_lo, v_mask_hi;
+ int16x8_t v_eobmax = vdupq_n_s16(-1);
+
+ (void)scan;
+
+ // DC and first 3 AC
+ v_mask_lo = highbd_quantize_fp_4(coeff_ptr, qcoeff_ptr, dqcoeff_ptr,
+ v_quant_s32, v_dequant_s32, v_round_s32);
+
+ // overwrite the DC constants with AC constants
+ v_round_s32 = vdupq_lane_s32(vget_low_s32(v_round_s32), 1);
+ v_quant_s32 = vdupq_lane_s32(vget_low_s32(v_quant_s32), 1);
+ v_dequant_s32 = vdupq_lane_s32(vget_low_s32(v_dequant_s32), 1);
+
+ // 4 more AC
+ v_mask_hi =
+ highbd_quantize_fp_4(coeff_ptr + 4, qcoeff_ptr + 4, dqcoeff_ptr + 4,
+ v_quant_s32, v_dequant_s32, v_round_s32);
+
+ // Find the max lane eob for the first 8 coeffs.
+ v_eobmax =
+ get_max_lane_eob(iscan, v_eobmax, vcombine_u16(v_mask_lo, v_mask_hi));
+
+ n_coeffs -= 8;
+ do {
+ coeff_ptr += 8;
+ qcoeff_ptr += 8;
+ dqcoeff_ptr += 8;
+ iscan += 8;
+ v_mask_lo = highbd_quantize_fp_4(coeff_ptr, qcoeff_ptr, dqcoeff_ptr,
+ v_quant_s32, v_dequant_s32, v_round_s32);
+ v_mask_hi =
+ highbd_quantize_fp_4(coeff_ptr + 4, qcoeff_ptr + 4, dqcoeff_ptr + 4,
+ v_quant_s32, v_dequant_s32, v_round_s32);
+ // Find the max lane eob for 8 coeffs.
+ v_eobmax =
+ get_max_lane_eob(iscan, v_eobmax, vcombine_u16(v_mask_lo, v_mask_hi));
+ n_coeffs -= 8;
+ } while (n_coeffs);
+
+ *eob_ptr = get_max_eob(v_eobmax);
+}
+
+static VPX_FORCE_INLINE uint16x4_t
+highbd_quantize_fp_32x32_4(const tran_low_t *coeff_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, int32x4_t v_quant_s32,
+ int32x4_t v_dequant_s32, int32x4_t v_round_s32) {
+ const int32x4_t v_coeff = vld1q_s32(coeff_ptr);
+ const int32x4_t v_coeff_sign =
+ vreinterpretq_s32_u32(vcltq_s32(v_coeff, vdupq_n_s32(0)));
+ const int32x4_t v_abs_coeff = vabsq_s32(v_coeff);
+ // ((abs_coeff << (1 + log_scale)) >= dequant_ptr[rc01])
+ const int32x4_t v_abs_coeff_scaled = vshlq_n_s32(v_abs_coeff, 2);
+ const uint32x4_t v_mask = vcgeq_s32(v_abs_coeff_scaled, v_dequant_s32);
+ // const int64_t tmp = vmask ? (int64_t)abs_coeff + log_scaled_round : 0
+ const int32x4_t v_tmp = vandq_s32(vaddq_s32(v_abs_coeff, v_round_s32),
+ vreinterpretq_s32_u32(v_mask));
+ // const int abs_qcoeff = (int)((tmp * quant) >> (16 - log_scale));
+ const int32x4_t v_abs_qcoeff =
+ vqdmulhq_s32(vshlq_n_s32(v_tmp, 1), v_quant_s32);
+ // qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
+ const int32x4_t v_qcoeff =
+ vsubq_s32(veorq_s32(v_abs_qcoeff, v_coeff_sign), v_coeff_sign);
+ // vshlq_s32 will shift right if shift value is negative.
+ const int32x4_t v_abs_dqcoeff =
+ vshrq_n_s32(vmulq_s32(v_abs_qcoeff, v_dequant_s32), 1);
+ // dqcoeff_ptr[rc] = (tran_low_t)((abs_dqcoeff ^ coeff_sign) - coeff_sign);
+ const int32x4_t v_dqcoeff =
+ vsubq_s32(veorq_s32(v_abs_dqcoeff, v_coeff_sign), v_coeff_sign);
+
+ vst1q_s32(qcoeff_ptr, v_qcoeff);
+ vst1q_s32(dqcoeff_ptr, v_dqcoeff);
+
+ // Packed nz_qcoeff_mask. Used to find eob.
+ return vmovn_u32(vceqq_s32(v_abs_qcoeff, vdupq_n_s32(0)));
+}
+
+void vp9_highbd_quantize_fp_32x32_neon(
+ const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *round_ptr,
+ const int16_t *quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
+ const int16x4_t v_quant = vld1_s16(quant_ptr);
+ const int16x4_t v_dequant = vld1_s16(dequant_ptr);
+ const int16x4_t v_zero = vdup_n_s16(0);
+ const int16x4_t v_round =
+ vqrdmulh_n_s16(vld1_s16(round_ptr), (int16_t)(1 << 14));
+ int32x4_t v_round_s32 = vaddl_s16(v_round, v_zero);
+ int32x4_t v_quant_s32 = vshlq_n_s32(vaddl_s16(v_quant, v_zero), 15);
+ int32x4_t v_dequant_s32 = vaddl_s16(v_dequant, v_zero);
+ uint16x4_t v_mask_lo, v_mask_hi;
+ int16x8_t v_eobmax = vdupq_n_s16(-1);
+
+ (void)scan;
+
+ // DC and first 3 AC
+ v_mask_lo =
+ highbd_quantize_fp_32x32_4(coeff_ptr, qcoeff_ptr, dqcoeff_ptr,
+ v_quant_s32, v_dequant_s32, v_round_s32);
+
+ // overwrite the DC constants with AC constants
+ v_round_s32 = vdupq_lane_s32(vget_low_s32(v_round_s32), 1);
+ v_quant_s32 = vdupq_lane_s32(vget_low_s32(v_quant_s32), 1);
+ v_dequant_s32 = vdupq_lane_s32(vget_low_s32(v_dequant_s32), 1);
+
+ // 4 more AC
+ v_mask_hi =
+ highbd_quantize_fp_32x32_4(coeff_ptr + 4, qcoeff_ptr + 4, dqcoeff_ptr + 4,
+ v_quant_s32, v_dequant_s32, v_round_s32);
+
+ // Find the max lane eob for the first 8 coeffs.
+ v_eobmax =
+ get_max_lane_eob(iscan, v_eobmax, vcombine_u16(v_mask_lo, v_mask_hi));
+
+ n_coeffs -= 8;
+ do {
+ coeff_ptr += 8;
+ qcoeff_ptr += 8;
+ dqcoeff_ptr += 8;
+ iscan += 8;
+ v_mask_lo =
+ highbd_quantize_fp_32x32_4(coeff_ptr, qcoeff_ptr, dqcoeff_ptr,
+ v_quant_s32, v_dequant_s32, v_round_s32);
+ v_mask_hi = highbd_quantize_fp_32x32_4(coeff_ptr + 4, qcoeff_ptr + 4,
+ dqcoeff_ptr + 4, v_quant_s32,
+ v_dequant_s32, v_round_s32);
+ // Find the max lane eob for 8 coeffs.
+ v_eobmax =
+ get_max_lane_eob(iscan, v_eobmax, vcombine_u16(v_mask_lo, v_mask_hi));
+ n_coeffs -= 8;
+ } while (n_coeffs);
+
+ *eob_ptr = get_max_eob(v_eobmax);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
diff --git a/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_temporal_filter_neon.c b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_temporal_filter_neon.c
new file mode 100644
index 0000000000..a651a15d90
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/encoder/arm/neon/vp9_temporal_filter_neon.c
@@ -0,0 +1,849 @@
+/*
+ * Copyright (c) 2023 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "./vp9_rtcd.h"
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/encoder/vp9_encoder.h"
+#include "vp9/encoder/vp9_temporal_filter.h"
+#include "vp9/encoder/vp9_temporal_filter_constants.h"
+
+// Read in 8 pixels from a and b as 8-bit unsigned integers, compute the
+// difference squared, and store as unsigned 16-bit integer to dst.
+static INLINE void store_dist_8(const uint8_t *a, const uint8_t *b,
+ uint16_t *dst) {
+ const uint8x8_t a_reg = vld1_u8(a);
+ const uint8x8_t b_reg = vld1_u8(b);
+
+ uint16x8_t dist_first = vabdl_u8(a_reg, b_reg);
+ dist_first = vmulq_u16(dist_first, dist_first);
+
+ vst1q_u16(dst, dist_first);
+}
+
+static INLINE void store_dist_16(const uint8_t *a, const uint8_t *b,
+ uint16_t *dst) {
+ const uint8x16_t a_reg = vld1q_u8(a);
+ const uint8x16_t b_reg = vld1q_u8(b);
+
+ uint16x8_t dist_first = vabdl_u8(vget_low_u8(a_reg), vget_low_u8(b_reg));
+ uint16x8_t dist_second = vabdl_u8(vget_high_u8(a_reg), vget_high_u8(b_reg));
+ dist_first = vmulq_u16(dist_first, dist_first);
+ dist_second = vmulq_u16(dist_second, dist_second);
+
+ vst1q_u16(dst, dist_first);
+ vst1q_u16(dst + 8, dist_second);
+}
+
+static INLINE void read_dist_8(const uint16_t *dist, uint16x8_t *dist_reg) {
+ *dist_reg = vld1q_u16(dist);
+}
+
+static INLINE void read_dist_16(const uint16_t *dist, uint16x8_t *reg_first,
+ uint16x8_t *reg_second) {
+ read_dist_8(dist, reg_first);
+ read_dist_8(dist + 8, reg_second);
+}
+
+// Average the value based on the number of values summed (9 for pixels away
+// from the border, 4 for pixels in corners, and 6 for other edge values).
+//
+// Add in the rounding factor and shift, clamp to 16, invert and shift. Multiply
+// by weight.
+static INLINE uint16x8_t average_8(uint16x8_t sum,
+ const uint16x8_t *mul_constants,
+ const int strength, const int rounding,
+ const uint16x8_t *weight) {
+ const uint32x4_t rounding_u32 = vdupq_n_u32(rounding << 16);
+ const uint16x8_t weight_u16 = *weight;
+ const uint16x8_t sixteen = vdupq_n_u16(16);
+ const int32x4_t strength_u32 = vdupq_n_s32(-strength - 16);
+
+ // modifier * 3 / index;
+ uint32x4_t sum_hi =
+ vmull_u16(vget_low_u16(sum), vget_low_u16(*mul_constants));
+ uint32x4_t sum_lo =
+ vmull_u16(vget_high_u16(sum), vget_high_u16(*mul_constants));
+
+ sum_lo = vqaddq_u32(sum_lo, rounding_u32);
+ sum_hi = vqaddq_u32(sum_hi, rounding_u32);
+
+ // we cannot use vshrn_n_u32 as strength is not known at compile time.
+ sum_lo = vshlq_u32(sum_lo, strength_u32);
+ sum_hi = vshlq_u32(sum_hi, strength_u32);
+
+ sum = vcombine_u16(vmovn_u32(sum_hi), vmovn_u32(sum_lo));
+
+ // The maximum input to this comparison is UINT16_MAX * NEIGHBOR_CONSTANT_4
+ // >> 16 (also NEIGHBOR_CONSTANT_4 -1) which is 49151 / 0xbfff / -16385
+ // So this needs to use the epu16 version which did not come until SSE4.
+ sum = vminq_u16(sum, sixteen);
+ sum = vsubq_u16(sixteen, sum);
+ return vmulq_u16(sum, weight_u16);
+}
+
+// Add 'sum_u16' to 'count'. Multiply by 'pred' and add to 'accumulator.'
+static void accumulate_and_store_8(const uint16x8_t sum_u16,
+ const uint8_t *pred, uint16_t *count,
+ uint32_t *accumulator) {
+ uint16x8_t pred_u16 = vmovl_u8(vld1_u8(pred));
+ uint16x8_t count_u16 = vld1q_u16(count);
+ uint32x4_t accum_0_u32, accum_1_u32;
+
+ count_u16 = vqaddq_u16(count_u16, sum_u16);
+ vst1q_u16(count, count_u16);
+
+ accum_0_u32 = vld1q_u32(accumulator);
+ accum_1_u32 = vld1q_u32(accumulator + 4);
+
+ accum_0_u32 =
+ vmlal_u16(accum_0_u32, vget_low_u16(sum_u16), vget_low_u16(pred_u16));
+ accum_1_u32 =
+ vmlal_u16(accum_1_u32, vget_high_u16(sum_u16), vget_high_u16(pred_u16));
+
+ vst1q_u32(accumulator, accum_0_u32);
+ vst1q_u32(accumulator + 4, accum_1_u32);
+}
+
+static INLINE void accumulate_and_store_16(const uint16x8_t sum_0_u16,
+ const uint16x8_t sum_1_u16,
+ const uint8_t *pred, uint16_t *count,
+ uint32_t *accumulator) {
+ uint8x16_t pred_u8 = vld1q_u8(pred);
+ uint16x8_t pred_0_u16 = vmovl_u8(vget_low_u8(pred_u8));
+ uint16x8_t pred_1_u16 = vmovl_u8(vget_high_u8(pred_u8));
+ uint16x8_t count_0_u16 = vld1q_u16(count);
+ uint16x8_t count_1_u16 = vld1q_u16(count + 8);
+ uint32x4_t accum_0_u32, accum_1_u32, accum_2_u32, accum_3_u32;
+
+ count_0_u16 = vqaddq_u16(count_0_u16, sum_0_u16);
+ vst1q_u16(count, count_0_u16);
+ count_1_u16 = vqaddq_u16(count_1_u16, sum_1_u16);
+ vst1q_u16(count + 8, count_1_u16);
+
+ accum_0_u32 = vld1q_u32(accumulator);
+ accum_1_u32 = vld1q_u32(accumulator + 4);
+ accum_2_u32 = vld1q_u32(accumulator + 8);
+ accum_3_u32 = vld1q_u32(accumulator + 12);
+
+ accum_0_u32 =
+ vmlal_u16(accum_0_u32, vget_low_u16(sum_0_u16), vget_low_u16(pred_0_u16));
+ accum_1_u32 = vmlal_u16(accum_1_u32, vget_high_u16(sum_0_u16),
+ vget_high_u16(pred_0_u16));
+ accum_2_u32 =
+ vmlal_u16(accum_2_u32, vget_low_u16(sum_1_u16), vget_low_u16(pred_1_u16));
+ accum_3_u32 = vmlal_u16(accum_3_u32, vget_high_u16(sum_1_u16),
+ vget_high_u16(pred_1_u16));
+
+ vst1q_u32(accumulator, accum_0_u32);
+ vst1q_u32(accumulator + 4, accum_1_u32);
+ vst1q_u32(accumulator + 8, accum_2_u32);
+ vst1q_u32(accumulator + 12, accum_3_u32);
+}
+
+// Read in 8 pixels from y_dist. For each index i, compute y_dist[i-1] +
+// y_dist[i] + y_dist[i+1] and store in sum as 16-bit unsigned int.
+static INLINE void get_sum_8(const uint16_t *y_dist, uint16x8_t *sum) {
+ uint16x8_t dist_reg, dist_left, dist_right;
+
+ dist_reg = vld1q_u16(y_dist);
+ dist_left = vld1q_u16(y_dist - 1);
+ dist_right = vld1q_u16(y_dist + 1);
+
+ *sum = vqaddq_u16(dist_reg, dist_left);
+ *sum = vqaddq_u16(*sum, dist_right);
+}
+
+// Read in 16 pixels from y_dist. For each index i, compute y_dist[i-1] +
+// y_dist[i] + y_dist[i+1]. Store the result for first 8 pixels in sum_first and
+// the rest in sum_second.
+static INLINE void get_sum_16(const uint16_t *y_dist, uint16x8_t *sum_first,
+ uint16x8_t *sum_second) {
+ get_sum_8(y_dist, sum_first);
+ get_sum_8(y_dist + 8, sum_second);
+}
+
+// Read in a row of chroma values corresponds to a row of 16 luma values.
+static INLINE void read_chroma_dist_row_16(int ss_x, const uint16_t *u_dist,
+ const uint16_t *v_dist,
+ uint16x8_t *u_first,
+ uint16x8_t *u_second,
+ uint16x8_t *v_first,
+ uint16x8_t *v_second) {
+ if (!ss_x) {
+ // If there is no chroma subsampling in the horizontal direction, then we
+ // need to load 16 entries from chroma.
+ read_dist_16(u_dist, u_first, u_second);
+ read_dist_16(v_dist, v_first, v_second);
+ } else { // ss_x == 1
+ // Otherwise, we only need to load 8 entries
+ uint16x8_t u_reg, v_reg;
+ uint16x8x2_t pair;
+
+ read_dist_8(u_dist, &u_reg);
+
+ pair = vzipq_u16(u_reg, u_reg);
+ *u_first = pair.val[0];
+ *u_second = pair.val[1];
+
+ read_dist_8(v_dist, &v_reg);
+
+ pair = vzipq_u16(v_reg, v_reg);
+ *v_first = pair.val[0];
+ *v_second = pair.val[1];
+ }
+}
+
+// Add a row of luma distortion to 8 corresponding chroma mods.
+static INLINE void add_luma_dist_to_8_chroma_mod(const uint16_t *y_dist,
+ int ss_x, int ss_y,
+ uint16x8_t *u_mod,
+ uint16x8_t *v_mod) {
+ uint16x8_t y_reg;
+ if (!ss_x) {
+ read_dist_8(y_dist, &y_reg);
+ if (ss_y == 1) {
+ uint16x8_t y_tmp;
+ read_dist_8(y_dist + DIST_STRIDE, &y_tmp);
+
+ y_reg = vqaddq_u16(y_reg, y_tmp);
+ }
+ } else {
+ uint16x8_t y_first, y_second;
+ uint32x4_t y_first32, y_second32;
+
+ read_dist_16(y_dist, &y_first, &y_second);
+ if (ss_y == 1) {
+ uint16x8_t y_tmp_0, y_tmp_1;
+ read_dist_16(y_dist + DIST_STRIDE, &y_tmp_0, &y_tmp_1);
+
+ y_first = vqaddq_u16(y_first, y_tmp_0);
+ y_second = vqaddq_u16(y_second, y_tmp_1);
+ }
+
+ y_first32 = vpaddlq_u16(y_first);
+ y_second32 = vpaddlq_u16(y_second);
+
+ y_reg = vcombine_u16(vqmovn_u32(y_first32), vqmovn_u32(y_second32));
+ }
+
+ *u_mod = vqaddq_u16(*u_mod, y_reg);
+ *v_mod = vqaddq_u16(*v_mod, y_reg);
+}
+
+// Apply temporal filter to the luma components. This performs temporal
+// filtering on a luma block of 16 X block_height. Use blk_fw as an array of
+// size 4 for the weights for each of the 4 subblocks if blk_fw is not NULL,
+// else use top_weight for top half, and bottom weight for bottom half.
+static void apply_temporal_filter_luma_16(
+ const uint8_t *y_pre, int y_pre_stride, unsigned int block_width,
+ unsigned int block_height, int ss_x, int ss_y, int strength,
+ int use_whole_blk, uint32_t *y_accum, uint16_t *y_count,
+ const uint16_t *y_dist, const uint16_t *u_dist, const uint16_t *v_dist,
+ const int16_t *const *neighbors_first,
+ const int16_t *const *neighbors_second, int top_weight, int bottom_weight,
+ const int *blk_fw) {
+ const int rounding = (1 << strength) >> 1;
+ uint16x8_t weight_first, weight_second;
+
+ uint16x8_t mul_first, mul_second;
+
+ uint16x8_t sum_row_1_first, sum_row_1_second;
+ uint16x8_t sum_row_2_first, sum_row_2_second;
+ uint16x8_t sum_row_3_first, sum_row_3_second;
+
+ uint16x8_t u_first, u_second;
+ uint16x8_t v_first, v_second;
+
+ uint16x8_t sum_row_first;
+ uint16x8_t sum_row_second;
+
+ // Loop variables
+ unsigned int h;
+
+ assert(strength >= 0);
+ assert(strength <= 6);
+
+ assert(block_width == 16);
+ (void)block_width;
+
+ // Initialize the weights
+ if (blk_fw) {
+ weight_first = vdupq_n_u16(blk_fw[0]);
+ weight_second = vdupq_n_u16(blk_fw[1]);
+ } else {
+ weight_first = vdupq_n_u16(top_weight);
+ weight_second = weight_first;
+ }
+
+ // First row
+ mul_first = vld1q_u16((const uint16_t *)neighbors_first[0]);
+ mul_second = vld1q_u16((const uint16_t *)neighbors_second[0]);
+
+ // Add luma values
+ get_sum_16(y_dist, &sum_row_2_first, &sum_row_2_second);
+ get_sum_16(y_dist + DIST_STRIDE, &sum_row_3_first, &sum_row_3_second);
+
+ sum_row_first = vqaddq_u16(sum_row_2_first, sum_row_3_first);
+ sum_row_second = vqaddq_u16(sum_row_2_second, sum_row_3_second);
+
+ // Add chroma values
+ read_chroma_dist_row_16(ss_x, u_dist, v_dist, &u_first, &u_second, &v_first,
+ &v_second);
+
+ sum_row_first = vqaddq_u16(sum_row_first, u_first);
+ sum_row_second = vqaddq_u16(sum_row_second, u_second);
+
+ sum_row_first = vqaddq_u16(sum_row_first, v_first);
+ sum_row_second = vqaddq_u16(sum_row_second, v_second);
+
+ // Get modifier and store result
+ sum_row_first =
+ average_8(sum_row_first, &mul_first, strength, rounding, &weight_first);
+
+ sum_row_second = average_8(sum_row_second, &mul_second, strength, rounding,
+ &weight_second);
+
+ accumulate_and_store_16(sum_row_first, sum_row_second, y_pre, y_count,
+ y_accum);
+
+ y_pre += y_pre_stride;
+ y_count += y_pre_stride;
+ y_accum += y_pre_stride;
+ y_dist += DIST_STRIDE;
+
+ u_dist += DIST_STRIDE;
+ v_dist += DIST_STRIDE;
+
+ // Then all the rows except the last one
+ mul_first = vld1q_u16((const uint16_t *)neighbors_first[1]);
+ mul_second = vld1q_u16((const uint16_t *)neighbors_second[1]);
+
+ for (h = 1; h < block_height - 1; ++h) {
+ // Move the weight to bottom half
+ if (!use_whole_blk && h == block_height / 2) {
+ if (blk_fw) {
+ weight_first = vdupq_n_u16(blk_fw[2]);
+ weight_second = vdupq_n_u16(blk_fw[3]);
+ } else {
+ weight_first = vdupq_n_u16(bottom_weight);
+ weight_second = weight_first;
+ }
+ }
+ // Shift the rows up
+ sum_row_1_first = sum_row_2_first;
+ sum_row_1_second = sum_row_2_second;
+ sum_row_2_first = sum_row_3_first;
+ sum_row_2_second = sum_row_3_second;
+
+ // Add luma values to the modifier
+ sum_row_first = vqaddq_u16(sum_row_1_first, sum_row_2_first);
+ sum_row_second = vqaddq_u16(sum_row_1_second, sum_row_2_second);
+
+ get_sum_16(y_dist + DIST_STRIDE, &sum_row_3_first, &sum_row_3_second);
+
+ sum_row_first = vqaddq_u16(sum_row_first, sum_row_3_first);
+ sum_row_second = vqaddq_u16(sum_row_second, sum_row_3_second);
+
+ // Add chroma values to the modifier
+ if (ss_y == 0 || h % 2 == 0) {
+ // Only calculate the new chroma distortion if we are at a pixel that
+ // corresponds to a new chroma row
+ read_chroma_dist_row_16(ss_x, u_dist, v_dist, &u_first, &u_second,
+ &v_first, &v_second);
+ u_dist += DIST_STRIDE;
+ v_dist += DIST_STRIDE;
+ }
+
+ sum_row_first = vqaddq_u16(sum_row_first, u_first);
+ sum_row_second = vqaddq_u16(sum_row_second, u_second);
+ sum_row_first = vqaddq_u16(sum_row_first, v_first);
+ sum_row_second = vqaddq_u16(sum_row_second, v_second);
+
+ // Get modifier and store result
+ sum_row_first =
+ average_8(sum_row_first, &mul_first, strength, rounding, &weight_first);
+ sum_row_second = average_8(sum_row_second, &mul_second, strength, rounding,
+ &weight_second);
+ accumulate_and_store_16(sum_row_first, sum_row_second, y_pre, y_count,
+ y_accum);
+ y_pre += y_pre_stride;
+ y_count += y_pre_stride;
+ y_accum += y_pre_stride;
+ y_dist += DIST_STRIDE;
+ }
+
+ // The last row
+ mul_first = vld1q_u16((const uint16_t *)neighbors_first[0]);
+ mul_second = vld1q_u16((const uint16_t *)neighbors_second[0]);
+
+ // Shift the rows up
+ sum_row_1_first = sum_row_2_first;
+ sum_row_1_second = sum_row_2_second;
+ sum_row_2_first = sum_row_3_first;
+ sum_row_2_second = sum_row_3_second;
+
+ // Add luma values to the modifier
+ sum_row_first = vqaddq_u16(sum_row_1_first, sum_row_2_first);
+ sum_row_second = vqaddq_u16(sum_row_1_second, sum_row_2_second);
+
+ // Add chroma values to the modifier
+ if (ss_y == 0) {
+ // Only calculate the new chroma distortion if we are at a pixel that
+ // corresponds to a new chroma row
+ read_chroma_dist_row_16(ss_x, u_dist, v_dist, &u_first, &u_second, &v_first,
+ &v_second);
+ }
+
+ sum_row_first = vqaddq_u16(sum_row_first, u_first);
+ sum_row_second = vqaddq_u16(sum_row_second, u_second);
+ sum_row_first = vqaddq_u16(sum_row_first, v_first);
+ sum_row_second = vqaddq_u16(sum_row_second, v_second);
+
+ // Get modifier and store result
+ sum_row_first =
+ average_8(sum_row_first, &mul_first, strength, rounding, &weight_first);
+ sum_row_second = average_8(sum_row_second, &mul_second, strength, rounding,
+ &weight_second);
+ accumulate_and_store_16(sum_row_first, sum_row_second, y_pre, y_count,
+ y_accum);
+}
+
+// Perform temporal filter for the luma component.
+static void apply_temporal_filter_luma(
+ const uint8_t *y_pre, int y_pre_stride, unsigned int block_width,
+ unsigned int block_height, int ss_x, int ss_y, int strength,
+ const int *blk_fw, int use_whole_blk, uint32_t *y_accum, uint16_t *y_count,
+ const uint16_t *y_dist, const uint16_t *u_dist, const uint16_t *v_dist) {
+ unsigned int blk_col = 0, uv_blk_col = 0;
+ const unsigned int blk_col_step = 16, uv_blk_col_step = 16 >> ss_x;
+ const unsigned int mid_width = block_width >> 1,
+ last_width = block_width - blk_col_step;
+ int top_weight = blk_fw[0],
+ bottom_weight = use_whole_blk ? blk_fw[0] : blk_fw[2];
+ const int16_t *const *neighbors_first;
+ const int16_t *const *neighbors_second;
+
+ if (block_width == 16) {
+ // Special Case: The block width is 16 and we are operating on a row of 16
+ // chroma pixels. In this case, we can't use the usual left-middle-right
+ // pattern. We also don't support splitting now.
+ neighbors_first = LUMA_LEFT_COLUMN_NEIGHBORS;
+ neighbors_second = LUMA_RIGHT_COLUMN_NEIGHBORS;
+ if (use_whole_blk) {
+ apply_temporal_filter_luma_16(
+ y_pre + blk_col, y_pre_stride, 16, block_height, ss_x, ss_y, strength,
+ use_whole_blk, y_accum + blk_col, y_count + blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_first,
+ neighbors_second, top_weight, bottom_weight, NULL);
+ } else {
+ apply_temporal_filter_luma_16(
+ y_pre + blk_col, y_pre_stride, 16, block_height, ss_x, ss_y, strength,
+ use_whole_blk, y_accum + blk_col, y_count + blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_first,
+ neighbors_second, 0, 0, blk_fw);
+ }
+
+ return;
+ }
+
+ // Left
+ neighbors_first = LUMA_LEFT_COLUMN_NEIGHBORS;
+ neighbors_second = LUMA_MIDDLE_COLUMN_NEIGHBORS;
+ apply_temporal_filter_luma_16(
+ y_pre + blk_col, y_pre_stride, 16, block_height, ss_x, ss_y, strength,
+ use_whole_blk, y_accum + blk_col, y_count + blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_first,
+ neighbors_second, top_weight, bottom_weight, NULL);
+
+ blk_col += blk_col_step;
+ uv_blk_col += uv_blk_col_step;
+
+ // Middle First
+ neighbors_first = LUMA_MIDDLE_COLUMN_NEIGHBORS;
+ for (; blk_col < mid_width;
+ blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
+ apply_temporal_filter_luma_16(
+ y_pre + blk_col, y_pre_stride, 16, block_height, ss_x, ss_y, strength,
+ use_whole_blk, y_accum + blk_col, y_count + blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_first,
+ neighbors_second, top_weight, bottom_weight, NULL);
+ }
+
+ if (!use_whole_blk) {
+ top_weight = blk_fw[1];
+ bottom_weight = blk_fw[3];
+ }
+
+ // Middle Second
+ for (; blk_col < last_width;
+ blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
+ apply_temporal_filter_luma_16(
+ y_pre + blk_col, y_pre_stride, 16, block_height, ss_x, ss_y, strength,
+ use_whole_blk, y_accum + blk_col, y_count + blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_first,
+ neighbors_second, top_weight, bottom_weight, NULL);
+ }
+
+ // Right
+ neighbors_second = LUMA_RIGHT_COLUMN_NEIGHBORS;
+ apply_temporal_filter_luma_16(
+ y_pre + blk_col, y_pre_stride, 16, block_height, ss_x, ss_y, strength,
+ use_whole_blk, y_accum + blk_col, y_count + blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_first,
+ neighbors_second, top_weight, bottom_weight, NULL);
+}
+
+// Apply temporal filter to the chroma components. This performs temporal
+// filtering on a chroma block of 8 X uv_height. If blk_fw is not NULL, use
+// blk_fw as an array of size 4 for the weights for each of the 4 subblocks,
+// else use top_weight for top half, and bottom weight for bottom half.
+static void apply_temporal_filter_chroma_8(
+ const uint8_t *u_pre, const uint8_t *v_pre, int uv_pre_stride,
+ unsigned int uv_block_height, int ss_x, int ss_y, int strength,
+ uint32_t *u_accum, uint16_t *u_count, uint32_t *v_accum, uint16_t *v_count,
+ const uint16_t *y_dist, const uint16_t *u_dist, const uint16_t *v_dist,
+ const int16_t *const *neighbors, int top_weight, int bottom_weight,
+ const int *blk_fw) {
+ const int rounding = (1 << strength) >> 1;
+
+ uint16x8_t weight;
+
+ uint16x8_t mul;
+
+ uint16x8_t u_sum_row_1, u_sum_row_2, u_sum_row_3;
+ uint16x8_t v_sum_row_1, v_sum_row_2, v_sum_row_3;
+
+ uint16x8_t u_sum_row, v_sum_row;
+
+ // Loop variable
+ unsigned int h;
+
+ // Initialize weight
+ if (blk_fw) {
+ weight = vcombine_u16(vdup_n_u16(blk_fw[0]), vdup_n_u16(blk_fw[1]));
+ } else {
+ weight = vdupq_n_u16(top_weight);
+ }
+
+ // First row
+ mul = vld1q_u16((const uint16_t *)neighbors[0]);
+
+ // Add chroma values
+ get_sum_8(u_dist, &u_sum_row_2);
+ get_sum_8(u_dist + DIST_STRIDE, &u_sum_row_3);
+
+ u_sum_row = vqaddq_u16(u_sum_row_2, u_sum_row_3);
+
+ get_sum_8(v_dist, &v_sum_row_2);
+ get_sum_8(v_dist + DIST_STRIDE, &v_sum_row_3);
+
+ v_sum_row = vqaddq_u16(v_sum_row_2, v_sum_row_3);
+
+ // Add luma values
+ add_luma_dist_to_8_chroma_mod(y_dist, ss_x, ss_y, &u_sum_row, &v_sum_row);
+
+ // Get modifier and store result
+ u_sum_row = average_8(u_sum_row, &mul, strength, rounding, &weight);
+ v_sum_row = average_8(v_sum_row, &mul, strength, rounding, &weight);
+
+ accumulate_and_store_8(u_sum_row, u_pre, u_count, u_accum);
+ accumulate_and_store_8(v_sum_row, v_pre, v_count, v_accum);
+
+ u_pre += uv_pre_stride;
+ u_dist += DIST_STRIDE;
+ v_pre += uv_pre_stride;
+ v_dist += DIST_STRIDE;
+ u_count += uv_pre_stride;
+ u_accum += uv_pre_stride;
+ v_count += uv_pre_stride;
+ v_accum += uv_pre_stride;
+
+ y_dist += DIST_STRIDE * (1 + ss_y);
+
+ // Then all the rows except the last one
+ mul = vld1q_u16((const uint16_t *)neighbors[1]);
+
+ for (h = 1; h < uv_block_height - 1; ++h) {
+ // Move the weight pointer to the bottom half of the blocks
+ if (h == uv_block_height / 2) {
+ if (blk_fw) {
+ weight = vcombine_u16(vdup_n_u16(blk_fw[2]), vdup_n_u16(blk_fw[3]));
+ } else {
+ weight = vdupq_n_u16(bottom_weight);
+ }
+ }
+
+ // Shift the rows up
+ u_sum_row_1 = u_sum_row_2;
+ u_sum_row_2 = u_sum_row_3;
+
+ v_sum_row_1 = v_sum_row_2;
+ v_sum_row_2 = v_sum_row_3;
+
+ // Add chroma values
+ u_sum_row = vqaddq_u16(u_sum_row_1, u_sum_row_2);
+ get_sum_8(u_dist + DIST_STRIDE, &u_sum_row_3);
+ u_sum_row = vqaddq_u16(u_sum_row, u_sum_row_3);
+
+ v_sum_row = vqaddq_u16(v_sum_row_1, v_sum_row_2);
+ get_sum_8(v_dist + DIST_STRIDE, &v_sum_row_3);
+ v_sum_row = vqaddq_u16(v_sum_row, v_sum_row_3);
+
+ // Add luma values
+ add_luma_dist_to_8_chroma_mod(y_dist, ss_x, ss_y, &u_sum_row, &v_sum_row);
+
+ // Get modifier and store result
+ u_sum_row = average_8(u_sum_row, &mul, strength, rounding, &weight);
+ v_sum_row = average_8(v_sum_row, &mul, strength, rounding, &weight);
+
+ accumulate_and_store_8(u_sum_row, u_pre, u_count, u_accum);
+ accumulate_and_store_8(v_sum_row, v_pre, v_count, v_accum);
+
+ u_pre += uv_pre_stride;
+ u_dist += DIST_STRIDE;
+ v_pre += uv_pre_stride;
+ v_dist += DIST_STRIDE;
+ u_count += uv_pre_stride;
+ u_accum += uv_pre_stride;
+ v_count += uv_pre_stride;
+ v_accum += uv_pre_stride;
+
+ y_dist += DIST_STRIDE * (1 + ss_y);
+ }
+
+ // The last row
+ mul = vld1q_u16((const uint16_t *)neighbors[0]);
+
+ // Shift the rows up
+ u_sum_row_1 = u_sum_row_2;
+ u_sum_row_2 = u_sum_row_3;
+
+ v_sum_row_1 = v_sum_row_2;
+ v_sum_row_2 = v_sum_row_3;
+
+ // Add chroma values
+ u_sum_row = vqaddq_u16(u_sum_row_1, u_sum_row_2);
+ v_sum_row = vqaddq_u16(v_sum_row_1, v_sum_row_2);
+
+ // Add luma values
+ add_luma_dist_to_8_chroma_mod(y_dist, ss_x, ss_y, &u_sum_row, &v_sum_row);
+
+ // Get modifier and store result
+ u_sum_row = average_8(u_sum_row, &mul, strength, rounding, &weight);
+ v_sum_row = average_8(v_sum_row, &mul, strength, rounding, &weight);
+
+ accumulate_and_store_8(u_sum_row, u_pre, u_count, u_accum);
+ accumulate_and_store_8(v_sum_row, v_pre, v_count, v_accum);
+}
+
+// Perform temporal filter for the chroma components.
+static void apply_temporal_filter_chroma(
+ const uint8_t *u_pre, const uint8_t *v_pre, int uv_pre_stride,
+ unsigned int block_width, unsigned int block_height, int ss_x, int ss_y,
+ int strength, const int *blk_fw, int use_whole_blk, uint32_t *u_accum,
+ uint16_t *u_count, uint32_t *v_accum, uint16_t *v_count,
+ const uint16_t *y_dist, const uint16_t *u_dist, const uint16_t *v_dist) {
+ const unsigned int uv_width = block_width >> ss_x,
+ uv_height = block_height >> ss_y;
+
+ unsigned int blk_col = 0, uv_blk_col = 0;
+ const unsigned int uv_blk_col_step = 8, blk_col_step = 8 << ss_x;
+ const unsigned int uv_mid_width = uv_width >> 1,
+ uv_last_width = uv_width - uv_blk_col_step;
+ int top_weight = blk_fw[0],
+ bottom_weight = use_whole_blk ? blk_fw[0] : blk_fw[2];
+ const int16_t *const *neighbors;
+
+ if (uv_width == 8) {
+ // Special Case: We are subsampling in x direction on a 16x16 block. Since
+ // we are operating on a row of 8 chroma pixels, we can't use the usual
+ // left-middle-right pattern.
+ assert(ss_x);
+
+ if (ss_y) {
+ neighbors = CHROMA_DOUBLE_SS_SINGLE_COLUMN_NEIGHBORS;
+ } else {
+ neighbors = CHROMA_SINGLE_SS_SINGLE_COLUMN_NEIGHBORS;
+ }
+
+ if (use_whole_blk) {
+ apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_height,
+ ss_x, ss_y, strength, u_accum + uv_blk_col, u_count + uv_blk_col,
+ v_accum + uv_blk_col, v_count + uv_blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors, top_weight,
+ bottom_weight, NULL);
+ } else {
+ apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_height,
+ ss_x, ss_y, strength, u_accum + uv_blk_col, u_count + uv_blk_col,
+ v_accum + uv_blk_col, v_count + uv_blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors, 0, 0, blk_fw);
+ }
+
+ return;
+ }
+
+ // Left
+ if (ss_x && ss_y) {
+ neighbors = CHROMA_DOUBLE_SS_LEFT_COLUMN_NEIGHBORS;
+ } else if (ss_x || ss_y) {
+ neighbors = CHROMA_SINGLE_SS_LEFT_COLUMN_NEIGHBORS;
+ } else {
+ neighbors = CHROMA_NO_SS_LEFT_COLUMN_NEIGHBORS;
+ }
+
+ apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_height, ss_x,
+ ss_y, strength, u_accum + uv_blk_col, u_count + uv_blk_col,
+ v_accum + uv_blk_col, v_count + uv_blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors, top_weight,
+ bottom_weight, NULL);
+
+ blk_col += blk_col_step;
+ uv_blk_col += uv_blk_col_step;
+
+ // Middle First
+ if (ss_x && ss_y) {
+ neighbors = CHROMA_DOUBLE_SS_MIDDLE_COLUMN_NEIGHBORS;
+ } else if (ss_x || ss_y) {
+ neighbors = CHROMA_SINGLE_SS_MIDDLE_COLUMN_NEIGHBORS;
+ } else {
+ neighbors = CHROMA_NO_SS_MIDDLE_COLUMN_NEIGHBORS;
+ }
+
+ for (; uv_blk_col < uv_mid_width;
+ blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
+ apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_height, ss_x,
+ ss_y, strength, u_accum + uv_blk_col, u_count + uv_blk_col,
+ v_accum + uv_blk_col, v_count + uv_blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors, top_weight,
+ bottom_weight, NULL);
+ }
+
+ if (!use_whole_blk) {
+ top_weight = blk_fw[1];
+ bottom_weight = blk_fw[3];
+ }
+
+ // Middle Second
+ for (; uv_blk_col < uv_last_width;
+ blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
+ apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_height, ss_x,
+ ss_y, strength, u_accum + uv_blk_col, u_count + uv_blk_col,
+ v_accum + uv_blk_col, v_count + uv_blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors, top_weight,
+ bottom_weight, NULL);
+ }
+
+ // Right
+ if (ss_x && ss_y) {
+ neighbors = CHROMA_DOUBLE_SS_RIGHT_COLUMN_NEIGHBORS;
+ } else if (ss_x || ss_y) {
+ neighbors = CHROMA_SINGLE_SS_RIGHT_COLUMN_NEIGHBORS;
+ } else {
+ neighbors = CHROMA_NO_SS_RIGHT_COLUMN_NEIGHBORS;
+ }
+
+ apply_temporal_filter_chroma_8(
+ u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_height, ss_x,
+ ss_y, strength, u_accum + uv_blk_col, u_count + uv_blk_col,
+ v_accum + uv_blk_col, v_count + uv_blk_col, y_dist + blk_col,
+ u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors, top_weight,
+ bottom_weight, NULL);
+}
+
+void vp9_apply_temporal_filter_neon(
+ const uint8_t *y_src, int y_src_stride, const uint8_t *y_pre,
+ int y_pre_stride, const uint8_t *u_src, const uint8_t *v_src,
+ int uv_src_stride, const uint8_t *u_pre, const uint8_t *v_pre,
+ int uv_pre_stride, unsigned int block_width, unsigned int block_height,
+ int ss_x, int ss_y, int strength, const int *const blk_fw,
+ int use_whole_blk, uint32_t *y_accum, uint16_t *y_count, uint32_t *u_accum,
+ uint16_t *u_count, uint32_t *v_accum, uint16_t *v_count) {
+ const unsigned int chroma_height = block_height >> ss_y,
+ chroma_width = block_width >> ss_x;
+
+ DECLARE_ALIGNED(16, uint16_t, y_dist[BH * DIST_STRIDE]) = { 0 };
+ DECLARE_ALIGNED(16, uint16_t, u_dist[BH * DIST_STRIDE]) = { 0 };
+ DECLARE_ALIGNED(16, uint16_t, v_dist[BH * DIST_STRIDE]) = { 0 };
+ const int *blk_fw_ptr = blk_fw;
+
+ uint16_t *y_dist_ptr = y_dist + 1, *u_dist_ptr = u_dist + 1,
+ *v_dist_ptr = v_dist + 1;
+ const uint8_t *y_src_ptr = y_src, *u_src_ptr = u_src, *v_src_ptr = v_src;
+ const uint8_t *y_pre_ptr = y_pre, *u_pre_ptr = u_pre, *v_pre_ptr = v_pre;
+
+ // Loop variables
+ unsigned int row, blk_col;
+
+ assert(block_width <= BW && "block width too large");
+ assert(block_height <= BH && "block height too large");
+ assert(block_width % 16 == 0 && "block width must be multiple of 16");
+ assert(block_height % 2 == 0 && "block height must be even");
+ assert((ss_x == 0 || ss_x == 1) && (ss_y == 0 || ss_y == 1) &&
+ "invalid chroma subsampling");
+ assert(strength >= 0 && strength <= 6 && "invalid temporal filter strength");
+ assert(blk_fw[0] >= 0 && "filter weight must be positive");
+ assert(
+ (use_whole_blk || (blk_fw[1] >= 0 && blk_fw[2] >= 0 && blk_fw[3] >= 0)) &&
+ "subblock filter weight must be positive");
+ assert(blk_fw[0] <= 2 && "subblock filter weight must be less than 2");
+ assert(
+ (use_whole_blk || (blk_fw[1] <= 2 && blk_fw[2] <= 2 && blk_fw[3] <= 2)) &&
+ "subblock filter weight must be less than 2");
+
+ // Precompute the difference squared
+ for (row = 0; row < block_height; row++) {
+ for (blk_col = 0; blk_col < block_width; blk_col += 16) {
+ store_dist_16(y_src_ptr + blk_col, y_pre_ptr + blk_col,
+ y_dist_ptr + blk_col);
+ }
+ y_src_ptr += y_src_stride;
+ y_pre_ptr += y_pre_stride;
+ y_dist_ptr += DIST_STRIDE;
+ }
+
+ for (row = 0; row < chroma_height; row++) {
+ for (blk_col = 0; blk_col < chroma_width; blk_col += 8) {
+ store_dist_8(u_src_ptr + blk_col, u_pre_ptr + blk_col,
+ u_dist_ptr + blk_col);
+ store_dist_8(v_src_ptr + blk_col, v_pre_ptr + blk_col,
+ v_dist_ptr + blk_col);
+ }
+
+ u_src_ptr += uv_src_stride;
+ u_pre_ptr += uv_pre_stride;
+ u_dist_ptr += DIST_STRIDE;
+ v_src_ptr += uv_src_stride;
+ v_pre_ptr += uv_pre_stride;
+ v_dist_ptr += DIST_STRIDE;
+ }
+
+ y_dist_ptr = y_dist + 1;
+ u_dist_ptr = u_dist + 1;
+ v_dist_ptr = v_dist + 1;
+
+ apply_temporal_filter_luma(y_pre, y_pre_stride, block_width, block_height,
+ ss_x, ss_y, strength, blk_fw_ptr, use_whole_blk,
+ y_accum, y_count, y_dist_ptr, u_dist_ptr,
+ v_dist_ptr);
+
+ apply_temporal_filter_chroma(u_pre, v_pre, uv_pre_stride, block_width,
+ block_height, ss_x, ss_y, strength, blk_fw_ptr,
+ use_whole_blk, u_accum, u_count, v_accum,
+ v_count, y_dist_ptr, u_dist_ptr, v_dist_ptr);
+}