diff options
Diffstat (limited to 'third_party/aom/av1/common/arm')
35 files changed, 33201 insertions, 0 deletions
diff --git a/third_party/aom/av1/common/arm/av1_inv_txfm_neon.c b/third_party/aom/av1/common/arm/av1_inv_txfm_neon.c new file mode 100644 index 0000000000..09e5166b14 --- /dev/null +++ b/third_party/aom/av1/common/arm/av1_inv_txfm_neon.c @@ -0,0 +1,4217 @@ +/* + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> + +#include "config/aom_config.h" +#include "config/aom_dsp_rtcd.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/arm/transpose_neon.h" +#include "av1/common/av1_inv_txfm1d.h" +#include "av1/common/av1_inv_txfm1d_cfg.h" +#include "av1/common/av1_txfm.h" +#include "av1/common/enums.h" +#include "av1/common/idct.h" +#include "av1/common/arm/av1_inv_txfm_neon.h" + +// 1D itx types +typedef enum ATTRIBUTE_PACKED { + IDCT_1D, + IADST_1D, + IFLIPADST_1D = IADST_1D, + IIDENTITY_1D, + ITX_TYPES_1D, +} ITX_TYPE_1D; + +static const ITX_TYPE_1D vitx_1d_tab[TX_TYPES] = { + IDCT_1D, IADST_1D, IDCT_1D, IADST_1D, + IFLIPADST_1D, IDCT_1D, IFLIPADST_1D, IADST_1D, + IFLIPADST_1D, IIDENTITY_1D, IDCT_1D, IIDENTITY_1D, + IADST_1D, IIDENTITY_1D, IFLIPADST_1D, IIDENTITY_1D, +}; + +static const ITX_TYPE_1D hitx_1d_tab[TX_TYPES] = { + IDCT_1D, IDCT_1D, IADST_1D, IADST_1D, + IDCT_1D, IFLIPADST_1D, IFLIPADST_1D, IFLIPADST_1D, + IADST_1D, IIDENTITY_1D, IIDENTITY_1D, IDCT_1D, + IIDENTITY_1D, IADST_1D, IIDENTITY_1D, IFLIPADST_1D, +}; + +// 1D functions +static const transform_1d_neon lowbd_txfm_all_1d_arr[TX_SIZES][ITX_TYPES_1D] = { + { av1_idct4, av1_iadst4, av1_iidentity4_c }, + { av1_idct8, av1_iadst8, av1_iidentity8_c }, + { av1_idct16, av1_iadst16, av1_iidentity16_c }, + { av1_idct32, NULL, NULL }, + { av1_idct64, NULL, NULL }, +}; + +static INLINE void lowbd_add_flip_buffer_8xn_neon(int16x8_t *in, + uint8_t *output, int stride, + int flipud, + const int height) { + int j = flipud ? (height - 1) : 0; + const int step = flipud ? -1 : 1; + int16x8_t temp_output; + for (int i = 0; i < height; ++i, j += step) { + temp_output = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(output))); + temp_output = vaddq_s16(temp_output, in[j]); + vst1_u8(output, vqmovun_s16(temp_output)); + output += stride; + } +} + +static INLINE uint8x16_t lowbd_get_recon_16x16_neon(const uint8x16_t pred, + int16x8_t res0, + int16x8_t res1) { + int16x8_t temp_output[2]; + uint8x16_t temp_output_8q; + temp_output[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pred))); + temp_output[0] = vaddq_s16(temp_output[0], res0); + temp_output[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pred))); + temp_output[1] = vaddq_s16(temp_output[1], res1); + temp_output_8q = + vcombine_u8(vqmovun_s16(temp_output[0]), vqmovun_s16(temp_output[1])); + return temp_output_8q; +} + +static INLINE void lowbd_add_flip_buffer_16xn_neon(int16x8_t *in, + uint8_t *output, int stride, + int flipud, int height) { + uint8x16_t temp_output_8q; + int j = flipud ? (height - 1) : 0; + const int step = flipud ? -1 : 1; + for (int i = 0; i < height; ++i, j += step) { + temp_output_8q = vld1q_u8(output + i * stride); + temp_output_8q = + lowbd_get_recon_16x16_neon(temp_output_8q, in[j], in[j + height]); + vst1q_u8((output + i * stride), temp_output_8q); + } +} + +static INLINE void lowbd_inv_txfm2d_memset_neon(int16x8_t *a, int size, + int value) { + for (int i = 0; i < size; i++) { + a[i] = vdupq_n_s16((int16_t)value); + } +} + +static INLINE void btf_16_lane_0_1_neon(const int16x8_t in0, + const int16x8_t in1, const int16x4_t c, + int16x8_t *t0, int16x8_t *t1) { + int32x4_t s0[2], s1[2]; + int16x4_t v0[2], v1[2]; + + s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 0); + s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 0); + s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 1); + s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 1); + + s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 1); + s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 1); + s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 0); + s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 0); + + v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT); + v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT); + v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT); + v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT); + + *t0 = vcombine_s16(v0[0], v0[1]); + *t1 = vcombine_s16(v1[0], v1[1]); +} + +static INLINE void btf_16_lane_1_0_neon(const int16x8_t in0, + const int16x8_t in1, const int16x4_t c, + int16x8_t *t0, int16x8_t *t1) { + int32x4_t s0[2], s1[2]; + int16x4_t v0[2], v1[2]; + + s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 1); + s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 1); + s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 0); + s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 0); + + s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 0); + s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 0); + s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 1); + s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 1); + + v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT); + v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT); + v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT); + v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT); + + *t0 = vcombine_s16(v0[0], v0[1]); + *t1 = vcombine_s16(v1[0], v1[1]); +} + +static INLINE void btf_16_lane_2_3_neon(const int16x8_t in0, + const int16x8_t in1, const int16x4_t c, + int16x8_t *t0, int16x8_t *t1) { + int32x4_t s0[2], s1[2]; + int16x4_t v0[2], v1[2]; + + s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 2); + s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 2); + s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 3); + s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 3); + + s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 3); + s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 3); + s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 2); + s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 2); + + v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT); + v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT); + v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT); + v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT); + + *t0 = vcombine_s16(v0[0], v0[1]); + *t1 = vcombine_s16(v1[0], v1[1]); +} + +static INLINE void btf_16_neon(const int16x8_t in0, int16_t coef1, + int16_t coef2, int16x8_t *t0, int16x8_t *t1) { + int32x4_t s0_l, s0_h, s1_l, s1_h; + int16x4_t v0[2], v1[2]; + + s0_l = vmull_n_s16(vget_low_s16(in0), coef1); + s0_h = vmull_n_s16(vget_high_s16(in0), coef1); + s1_l = vmull_n_s16(vget_low_s16(in0), coef2); + s1_h = vmull_n_s16(vget_high_s16(in0), coef2); + + v0[0] = vrshrn_n_s32(s0_l, INV_COS_BIT); + v0[1] = vrshrn_n_s32(s0_h, INV_COS_BIT); + v1[0] = vrshrn_n_s32(s1_l, INV_COS_BIT); + v1[1] = vrshrn_n_s32(s1_h, INV_COS_BIT); + + *t0 = vcombine_s16(v0[0], v0[1]); + *t1 = vcombine_s16(v1[0], v1[1]); +} + +static INLINE void btf_16_lane_3_2_neon(const int16x8_t in0, + const int16x8_t in1, const int16x4_t c, + int16x8_t *t0, int16x8_t *t1) { + int32x4_t s0[2], s1[2]; + int16x4_t v0[2], v1[2]; + + s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 3); + s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 3); + s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 2); + s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 2); + + s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 2); + s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 2); + s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 3); + s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 3); + + v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT); + v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT); + v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT); + v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT); + + *t0 = vcombine_s16(v0[0], v0[1]); + *t1 = vcombine_s16(v1[0], v1[1]); +} + +static INLINE void btf_16_half_neon(int16x8_t *const x, const int16x4_t c) { + int32x4_t t0[2], t1[2]; + int16x4_t v0[2], v1[2]; + + // Don't add/sub before multiply, which will overflow in iadst8. + const int32x4_t x0_lo = vmull_lane_s16(vget_low_s16(x[0]), c, 0); + const int32x4_t x0_hi = vmull_lane_s16(vget_high_s16(x[0]), c, 0); + const int32x4_t x1_lo = vmull_lane_s16(vget_low_s16(x[1]), c, 0); + const int32x4_t x1_hi = vmull_lane_s16(vget_high_s16(x[1]), c, 0); + + t0[0] = vaddq_s32(x0_lo, x1_lo); + t0[1] = vaddq_s32(x0_hi, x1_hi); + t1[0] = vsubq_s32(x0_lo, x1_lo); + t1[1] = vsubq_s32(x0_hi, x1_hi); + + v0[0] = vrshrn_n_s32(t0[0], INV_COS_BIT); + v0[1] = vrshrn_n_s32(t0[1], INV_COS_BIT); + v1[0] = vrshrn_n_s32(t1[0], INV_COS_BIT); + v1[1] = vrshrn_n_s32(t1[1], INV_COS_BIT); + + x[0] = vcombine_s16(v0[0], v0[1]); + x[1] = vcombine_s16(v1[0], v1[1]); +} + +static INLINE int16x4_t set_s16x4_neon(const int16_t c0, const int16_t c1, + const int16_t c2, const int16_t c3) { + int16x4_t val = vdup_n_s16(c0); + val = vset_lane_s16(c1, val, 1); + val = vset_lane_s16(c2, val, 2); + val = vset_lane_s16(c3, val, 3); + return val; +} + +static INLINE void iadst8_neon(int16x8_t *const in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[4], (int16_t)cospi[60], + (int16_t)cospi[20], (int16_t)cospi[44]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[36], (int16_t)cospi[28], + (int16_t)cospi[52], (int16_t)cospi[12]); + const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + + int16x8_t x[8]; + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; + + // Stage 1 + x[0] = in[7]; + x[1] = in[0]; + x[2] = in[5]; + x[3] = in[2]; + x[4] = in[3]; + x[5] = in[4]; + x[6] = in[1]; + x[7] = in[6]; + + // Stage 2 + btf_16_lane_0_1_neon(x[0], x[1], c0, &s0, &s1); + btf_16_lane_2_3_neon(x[2], x[3], c0, &s2, &s3); + btf_16_lane_0_1_neon(x[4], x[5], c1, &s4, &s5); + btf_16_lane_2_3_neon(x[6], x[7], c1, &s6, &s7); + + // Stage 3 + x[0] = vqaddq_s16(s0, s4); + x[1] = vqaddq_s16(s1, s5); + x[2] = vqaddq_s16(s2, s6); + x[3] = vqaddq_s16(s3, s7); + x[4] = vqsubq_s16(s0, s4); + x[5] = vqsubq_s16(s1, s5); + x[6] = vqsubq_s16(s2, s6); + x[7] = vqsubq_s16(s3, s7); + + // Stage 4 + s0 = x[0]; + s1 = x[1]; + s2 = x[2]; + s3 = x[3]; + btf_16_lane_2_3_neon(x[4], x[5], c2, &s4, &s5); + btf_16_lane_3_2_neon(x[7], x[6], c2, &s7, &s6); + + // Stage 5 + x[0] = vqaddq_s16(s0, s2); + x[1] = vqaddq_s16(s1, s3); + x[2] = vqsubq_s16(s0, s2); + x[3] = vqsubq_s16(s1, s3); + x[4] = vqaddq_s16(s4, s6); + x[5] = vqaddq_s16(s5, s7); + x[6] = vqsubq_s16(s4, s6); + x[7] = vqsubq_s16(s5, s7); + + // stage 6 + btf_16_half_neon(x + 2, c2); + btf_16_half_neon(x + 6, c2); + + // Stage 7 + out[0] = x[0]; + out[1] = vqnegq_s16(x[4]); + out[2] = x[6]; + out[3] = vqnegq_s16(x[2]); + out[4] = x[3]; + out[5] = vqnegq_s16(x[7]); + out[6] = x[5]; + out[7] = vqnegq_s16(x[1]); +} + +static INLINE void iadst8_low1_neon(int16x8_t *const in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + + int16x8_t x[8]; + int16x8_t s0, s1, s4, s5; + + // Stage 1 + x[1] = in[0]; + + // Stage 2 + + btf_16_neon(x[1], cospi[60], -cospi[4], &s0, &s1); + + // Stage 3 + x[0] = s0; + x[1] = s1; + x[4] = s0; + x[5] = s1; + + // Stage 4 + s0 = x[0]; + s1 = x[1]; + btf_16_lane_2_3_neon(x[4], x[5], c2, &s4, &s5); + + // Stage 5 + x[0] = s0; + x[1] = s1; + x[2] = s0; + x[3] = s1; + x[4] = s4; + x[5] = s5; + x[6] = s4; + x[7] = s5; + + // stage 6 + btf_16_half_neon(x + 2, c2); + btf_16_half_neon(x + 6, c2); + + // Stage 7 + out[0] = x[0]; + out[1] = vqnegq_s16(x[4]); + out[2] = x[6]; + out[3] = vqnegq_s16(x[2]); + out[4] = x[3]; + out[5] = vqnegq_s16(x[7]); + out[6] = x[5]; + out[7] = vqnegq_s16(x[1]); +} + +static INLINE void idct8_neon(int16x8_t *in, int16x8_t *out, int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1[8], step2[8]; + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + + // stage 2 + btf_16_lane_0_1_neon(in[1], in[7], c0, &step1[7], &step1[4]); + btf_16_lane_2_3_neon(in[5], in[3], c0, &step1[6], &step1[5]); + + // stage 3 + btf_16_lane_0_1_neon(in[0], in[4], c1, &step2[0], &step2[1]); + btf_16_lane_2_3_neon(in[2], in[6], c1, &step2[3], &step2[2]); + step2[4] = vqaddq_s16(step1[4], step1[5]); + step2[5] = vqsubq_s16(step1[4], step1[5]); + step2[6] = vqsubq_s16(step1[7], step1[6]); + step2[7] = vqaddq_s16(step1[7], step1[6]); + + // stage 4 + step1[0] = vqaddq_s16(step2[0], step2[3]); + step1[1] = vqaddq_s16(step2[1], step2[2]); + step1[2] = vqsubq_s16(step2[1], step2[2]); + step1[3] = vqsubq_s16(step2[0], step2[3]); + btf_16_lane_0_1_neon(step2[6], step2[5], c1, &step1[6], &step1[5]); + + // stage 5 + out[0] = vqaddq_s16(step1[0], step2[7]); + out[1] = vqaddq_s16(step1[1], step1[6]); + out[2] = vqaddq_s16(step1[2], step1[5]); + out[3] = vqaddq_s16(step1[3], step2[4]); + out[4] = vqsubq_s16(step1[3], step2[4]); + out[5] = vqsubq_s16(step1[2], step1[5]); + out[6] = vqsubq_s16(step1[1], step1[6]); + out[7] = vqsubq_s16(step1[0], step2[7]); +} + +static INLINE void idct8_low1_neon(int16x8_t *in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1; + int32x4_t t32[2]; + + // stage 1 + // stage 2 + // stage 3 + t32[0] = vmull_n_s16(vget_low_s16(in[0]), (int16_t)cospi[32]); + t32[1] = vmull_n_s16(vget_high_s16(in[0]), (int16_t)cospi[32]); + + step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), + vrshrn_n_s32(t32[1], INV_COS_BIT)); + + // stage 4 + // stage 5 + out[0] = step1; + out[1] = step1; + out[2] = step1; + out[3] = step1; + out[4] = step1; + out[5] = step1; + out[6] = step1; + out[7] = step1; +} + +void av1_round_shift_array_16_neon(int16x8_t *arr, int size, int bit) { + assert(!(size % 4)); + if (!bit) return; + const int16x8_t dup_bits_n_16x8 = vdupq_n_s16((int16_t)(-bit)); + for (int i = 0; i < size; i++) { + arr[i] = vrshlq_s16(arr[i], dup_bits_n_16x8); + } +} + +static INLINE void flip_buf_ud_neon(int16x8_t *input, int size) { + int16x8_t temp[8]; + for (int i = 0; i < size; ++i) { + temp[i] = input[size - 1 - i]; + } + for (int i = 0; i < size; ++i) { + input[i] = temp[i]; + } +} + +static INLINE void load_buffer_32bit_to_16bit_neon(const int32_t *input, + int stride, + int16x8_t *const a, + int out_size) { + for (int i = 0; i < out_size; ++i) { + a[i] = vcombine_s16(vmovn_s32(vld1q_s32(input)), + vmovn_s32(vld1q_s32(input + 4))); + input += stride; + } +} + +static int16_t sqrt_2_list[TX_SIZES] = { 5793, 2 * 4096, 2 * 5793, 4 * 4096, + 4 * 5793 }; + +static INLINE void identity_txfm_round_neon(int16x8_t *input, int16x8_t *output, + int txw_idx, int8_t size, int bit) { + const int32x4_t dup_bits_n_32x4 = vdupq_n_s32((int32_t)(-bit)); + int16x4_t scale = vdup_n_s16(sqrt_2_list[txw_idx]); + int16x4_t low_i16, high_i16; + int32x4_t low_i32, high_i32; + for (int i = 0; i < size; i++) { + int32x4_t temp_out_low = vmull_s16(vget_low_s16(input[i]), scale); + int32x4_t temp_out_high = vmull_s16(vget_high_s16(input[i]), scale); + low_i32 = vrshlq_s32(vrshrq_n_s32(temp_out_low, 12), dup_bits_n_32x4); + high_i32 = vrshlq_s32(vrshrq_n_s32(temp_out_high, 12), dup_bits_n_32x4); + low_i16 = vqmovn_s32(low_i32); + high_i16 = vqmovn_s32(high_i32); + output[i] = vcombine_s16(low_i16, high_i16); + } +} + +static INLINE void round_shift_for_rect(int16x8_t *input, int16x8_t *output, + int size) { + int32x4_t out_low, out_high; + int16x4_t low, high; + + for (int z = 0; z < size; ++z) { + out_low = vmull_n_s16(vget_low_s16(input[z]), (int16_t)NewInvSqrt2); + out_high = vmull_n_s16(vget_high_s16(input[z]), (int16_t)NewInvSqrt2); + + low = vqrshrn_n_s32(out_low, (int32_t)NewSqrt2Bits); + high = vqrshrn_n_s32(out_high, (int32_t)NewSqrt2Bits); + + output[z] = vcombine_s16(low, high); + } +} + +static INLINE void idct16_low1_neon(int16x8_t *in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1; + int32x4_t t32[2]; + + // stage 4 + + t32[0] = vmull_n_s16(vget_low_s16(in[0]), cospi[32]); + t32[1] = vmull_n_s16(vget_high_s16(in[0]), cospi[32]); + step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), + vrshrn_n_s32(t32[1], INV_COS_BIT)); + + // stage 6 + // stage 7 + out[0] = step1; + out[1] = step1; + out[2] = step1; + out[3] = step1; + out[4] = step1; + out[5] = step1; + out[6] = step1; + out[7] = step1; + out[8] = step1; + out[9] = step1; + out[10] = step1; + out[11] = step1; + out[12] = step1; + out[13] = step1; + out[14] = step1; + out[15] = step1; +} + +static INLINE void idct16_neon(int16x8_t *in, int16x8_t *out, int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1[16], step2[16]; + + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[4], (int16_t)cospi[60], + (int16_t)cospi[36], (int16_t)cospi[28]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[20], (int16_t)cospi[44], + (int16_t)cospi[52], (int16_t)cospi[12]); + const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + const int16x4_t c4 = + set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), + (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); + // stage 2 + + btf_16_lane_0_1_neon(in[1], in[15], c0, &step2[15], &step2[8]); + btf_16_lane_2_3_neon(in[9], in[7], c0, &step2[14], &step2[9]); + btf_16_lane_0_1_neon(in[5], in[11], c1, &step2[13], &step2[10]); + btf_16_lane_2_3_neon(in[13], in[3], c1, &step2[12], &step2[11]); + + step2[0] = in[0]; + step2[1] = in[8]; + step2[2] = in[4]; + step2[3] = in[12]; + step2[4] = in[2]; + step2[5] = in[10]; + step2[6] = in[6]; + step2[7] = in[14]; + + // stage 3 + + btf_16_lane_0_1_neon(step2[4], step2[7], c2, &step1[7], &step1[4]); + btf_16_lane_2_3_neon(step2[5], step2[6], c2, &step1[6], &step1[5]); + + step1[0] = step2[0]; + step1[1] = step2[1]; + step1[2] = step2[2]; + step1[3] = step2[3]; + step1[8] = vqaddq_s16(step2[8], step2[9]); + step1[9] = vqsubq_s16(step2[8], step2[9]); + step1[10] = vqsubq_s16(step2[11], step2[10]); + step1[11] = vqaddq_s16(step2[11], step2[10]); + step1[12] = vqaddq_s16(step2[12], step2[13]); + step1[13] = vqsubq_s16(step2[12], step2[13]); + step1[14] = vqsubq_s16(step2[15], step2[14]); + step1[15] = vqaddq_s16(step2[15], step2[14]); + + // stage 4 + + btf_16_lane_0_1_neon(step1[0], step1[1], c3, &step2[0], &step2[1]); + btf_16_lane_2_3_neon(step1[2], step1[3], c3, &step2[3], &step2[2]); + btf_16_lane_2_3_neon(step1[14], step1[9], c3, &step2[14], &step2[9]); + btf_16_lane_3_2_neon(step1[10], step1[13], c4, &step2[10], &step2[13]); + + step2[4] = vqaddq_s16(step1[4], step1[5]); + step2[5] = vqsubq_s16(step1[4], step1[5]); + step2[6] = vqsubq_s16(step1[7], step1[6]); + step2[7] = vqaddq_s16(step1[7], step1[6]); + step2[8] = step1[8]; + step2[11] = step1[11]; + step2[12] = step1[12]; + step2[15] = step1[15]; + + // stage 5 + + btf_16_lane_0_1_neon(step2[6], step2[5], c3, &step1[6], &step1[5]); + + step1[0] = vqaddq_s16(step2[0], step2[3]); + step1[1] = vqaddq_s16(step2[1], step2[2]); + step1[2] = vqsubq_s16(step2[1], step2[2]); + step1[3] = vqsubq_s16(step2[0], step2[3]); + step1[4] = step2[4]; + step1[7] = step2[7]; + step1[8] = vqaddq_s16(step2[8], step2[11]); + step1[9] = vqaddq_s16(step2[9], step2[10]); + step1[10] = vqsubq_s16(step2[9], step2[10]); + step1[11] = vqsubq_s16(step2[8], step2[11]); + step1[12] = vqsubq_s16(step2[15], step2[12]); + step1[13] = vqsubq_s16(step2[14], step2[13]); + step1[14] = vqaddq_s16(step2[14], step2[13]); + step1[15] = vqaddq_s16(step2[15], step2[12]); + + // stage 6 + + btf_16_lane_0_1_neon(step1[13], step1[10], c3, &step2[13], &step2[10]); + btf_16_lane_0_1_neon(step1[12], step1[11], c3, &step2[12], &step2[11]); + + step2[0] = vqaddq_s16(step1[0], step1[7]); + step2[1] = vqaddq_s16(step1[1], step1[6]); + step2[2] = vqaddq_s16(step1[2], step1[5]); + step2[3] = vqaddq_s16(step1[3], step1[4]); + step2[4] = vqsubq_s16(step1[3], step1[4]); + step2[5] = vqsubq_s16(step1[2], step1[5]); + step2[6] = vqsubq_s16(step1[1], step1[6]); + step2[7] = vqsubq_s16(step1[0], step1[7]); + step2[8] = step1[8]; + step2[9] = step1[9]; + step2[14] = step1[14]; + step2[15] = step1[15]; + + // stage 7 + out[0] = vqaddq_s16(step2[0], step2[15]); + out[1] = vqaddq_s16(step2[1], step2[14]); + out[2] = vqaddq_s16(step2[2], step2[13]); + out[3] = vqaddq_s16(step2[3], step2[12]); + out[4] = vqaddq_s16(step2[4], step2[11]); + out[5] = vqaddq_s16(step2[5], step2[10]); + out[6] = vqaddq_s16(step2[6], step2[9]); + out[7] = vqaddq_s16(step2[7], step2[8]); + out[8] = vqsubq_s16(step2[7], step2[8]); + out[9] = vqsubq_s16(step2[6], step2[9]); + out[10] = vqsubq_s16(step2[5], step2[10]); + out[11] = vqsubq_s16(step2[4], step2[11]); + out[12] = vqsubq_s16(step2[3], step2[12]); + out[13] = vqsubq_s16(step2[2], step2[13]); + out[14] = vqsubq_s16(step2[1], step2[14]); + out[15] = vqsubq_s16(step2[0], step2[15]); +} + +static INLINE void idct16_low8_neon(int16x8_t *in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1[16], step2[16]; + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + const int16x4_t c1 = + set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), + (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); + + // stage 1 + // stage 2 + + step2[0] = in[0]; + step2[2] = in[4]; + step2[4] = in[2]; + step2[6] = in[6]; + + btf_16_neon(in[1], cospi[60], cospi[4], &step2[8], &step2[15]); + btf_16_neon(in[7], -cospi[36], cospi[28], &step2[9], &step2[14]); + btf_16_neon(in[5], cospi[44], cospi[20], &step2[10], &step2[13]); + btf_16_neon(in[3], -cospi[52], cospi[12], &step2[11], &step2[12]); + + // stage 3 + + btf_16_neon(step2[4], cospi[56], cospi[8], &step1[4], &step1[7]); + btf_16_neon(step2[6], -cospi[40], cospi[24], &step1[5], &step1[6]); + + step1[0] = step2[0]; + step1[2] = step2[2]; + step1[8] = vqaddq_s16(step2[8], step2[9]); + step1[9] = vqsubq_s16(step2[8], step2[9]); + step1[10] = vqsubq_s16(step2[11], step2[10]); + step1[11] = vqaddq_s16(step2[11], step2[10]); + step1[12] = vqaddq_s16(step2[12], step2[13]); + step1[13] = vqsubq_s16(step2[12], step2[13]); + step1[14] = vqsubq_s16(step2[15], step2[14]); + step1[15] = vqaddq_s16(step2[15], step2[14]); + + // stage 4 + + btf_16_neon(step1[0], cospi[32], cospi[32], &step2[0], &step2[1]); + btf_16_neon(step1[2], cospi[48], cospi[16], &step2[2], &step2[3]); + btf_16_lane_2_3_neon(step1[14], step1[9], c0, &step2[14], &step2[9]); + btf_16_lane_3_2_neon(step1[10], step1[13], c1, &step2[10], &step2[13]); + + step2[4] = vqaddq_s16(step1[4], step1[5]); + step2[5] = vqsubq_s16(step1[4], step1[5]); + step2[6] = vqsubq_s16(step1[7], step1[6]); + step2[7] = vqaddq_s16(step1[7], step1[6]); + step2[8] = step1[8]; + step2[11] = step1[11]; + step2[12] = step1[12]; + step2[15] = step1[15]; + + // stage 5 + + btf_16_lane_0_1_neon(step2[6], step2[5], c0, &step1[6], &step1[5]); + step1[0] = vqaddq_s16(step2[0], step2[3]); + step1[1] = vqaddq_s16(step2[1], step2[2]); + step1[2] = vqsubq_s16(step2[1], step2[2]); + step1[3] = vqsubq_s16(step2[0], step2[3]); + step1[4] = step2[4]; + step1[7] = step2[7]; + step1[8] = vqaddq_s16(step2[8], step2[11]); + step1[9] = vqaddq_s16(step2[9], step2[10]); + step1[10] = vqsubq_s16(step2[9], step2[10]); + step1[11] = vqsubq_s16(step2[8], step2[11]); + step1[12] = vqsubq_s16(step2[15], step2[12]); + step1[13] = vqsubq_s16(step2[14], step2[13]); + step1[14] = vqaddq_s16(step2[14], step2[13]); + step1[15] = vqaddq_s16(step2[15], step2[12]); + + // stage 6 + btf_16_lane_0_1_neon(step1[13], step1[10], c0, &step2[13], &step2[10]); + btf_16_lane_0_1_neon(step1[12], step1[11], c0, &step2[12], &step2[11]); + + step2[0] = vqaddq_s16(step1[0], step1[7]); + step2[1] = vqaddq_s16(step1[1], step1[6]); + step2[2] = vqaddq_s16(step1[2], step1[5]); + step2[3] = vqaddq_s16(step1[3], step1[4]); + step2[4] = vqsubq_s16(step1[3], step1[4]); + step2[5] = vqsubq_s16(step1[2], step1[5]); + step2[6] = vqsubq_s16(step1[1], step1[6]); + step2[7] = vqsubq_s16(step1[0], step1[7]); + step2[8] = step1[8]; + step2[9] = step1[9]; + step2[14] = step1[14]; + step2[15] = step1[15]; + + // stage 7 + + out[0] = vqaddq_s16(step2[0], step2[15]); + out[1] = vqaddq_s16(step2[1], step2[14]); + out[2] = vqaddq_s16(step2[2], step2[13]); + out[3] = vqaddq_s16(step2[3], step2[12]); + out[4] = vqaddq_s16(step2[4], step2[11]); + out[5] = vqaddq_s16(step2[5], step2[10]); + out[6] = vqaddq_s16(step2[6], step2[9]); + out[7] = vqaddq_s16(step2[7], step2[8]); + out[8] = vqsubq_s16(step2[7], step2[8]); + out[9] = vqsubq_s16(step2[6], step2[9]); + out[10] = vqsubq_s16(step2[5], step2[10]); + out[11] = vqsubq_s16(step2[4], step2[11]); + out[12] = vqsubq_s16(step2[3], step2[12]); + out[13] = vqsubq_s16(step2[2], step2[13]); + out[14] = vqsubq_s16(step2[1], step2[14]); + out[15] = vqsubq_s16(step2[0], step2[15]); +} + +static INLINE void iadst16_neon(int16x8_t *const in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[2], (int16_t)cospi[62], + (int16_t)cospi[10], (int16_t)cospi[54]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[18], (int16_t)cospi[46], + (int16_t)cospi[26], (int16_t)cospi[38]); + const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[34], (int16_t)cospi[30], + (int16_t)cospi[42], (int16_t)cospi[22]); + const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[50], (int16_t)cospi[14], + (int16_t)cospi[58], (int16_t)cospi[6]); + const int16x4_t c4 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c5 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + + int16x8_t x[16]; + int16x8_t t[14]; + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; + int16x8_t s8, s9, s10, s11, s12, s13, s14, s15; + + // Stage 1 + x[0] = in[15]; + x[1] = in[0]; + x[2] = in[13]; + x[3] = in[2]; + x[4] = in[11]; + x[5] = in[4]; + x[6] = in[9]; + x[7] = in[6]; + x[8] = in[7]; + x[9] = in[8]; + x[10] = in[5]; + x[11] = in[10]; + x[12] = in[3]; + x[13] = in[12]; + x[14] = in[1]; + x[15] = in[14]; + + // Stage 2 + btf_16_lane_0_1_neon(x[0], x[1], c0, &s0, &s1); + btf_16_lane_2_3_neon(x[2], x[3], c0, &s2, &s3); + btf_16_lane_0_1_neon(x[4], x[5], c1, &s4, &s5); + btf_16_lane_2_3_neon(x[6], x[7], c1, &s6, &s7); + btf_16_lane_0_1_neon(x[8], x[9], c2, &s8, &s9); + btf_16_lane_2_3_neon(x[10], x[11], c2, &s10, &s11); + btf_16_lane_0_1_neon(x[12], x[13], c3, &s12, &s13); + btf_16_lane_2_3_neon(x[14], x[15], c3, &s14, &s15); + + // Stage 3 + x[0] = vqaddq_s16(s0, s8); + x[1] = vqaddq_s16(s1, s9); + x[2] = vqaddq_s16(s2, s10); + x[3] = vqaddq_s16(s3, s11); + x[4] = vqaddq_s16(s4, s12); + x[5] = vqaddq_s16(s5, s13); + x[6] = vqaddq_s16(s6, s14); + x[7] = vqaddq_s16(s7, s15); + x[8] = vqsubq_s16(s0, s8); + x[9] = vqsubq_s16(s1, s9); + x[10] = vqsubq_s16(s2, s10); + x[11] = vqsubq_s16(s3, s11); + x[12] = vqsubq_s16(s4, s12); + x[13] = vqsubq_s16(s5, s13); + x[14] = vqsubq_s16(s6, s14); + x[15] = vqsubq_s16(s7, s15); + + // Stage 4 + t[0] = x[0]; + t[1] = x[1]; + t[2] = x[2]; + t[3] = x[3]; + t[4] = x[4]; + t[5] = x[5]; + t[6] = x[6]; + t[7] = x[7]; + btf_16_lane_0_1_neon(x[8], x[9], c4, &s8, &s9); + btf_16_lane_2_3_neon(x[10], x[11], c4, &s10, &s11); + btf_16_lane_1_0_neon(x[13], x[12], c4, &s13, &s12); + btf_16_lane_3_2_neon(x[15], x[14], c4, &s15, &s14); + + // Stage 5 + x[0] = vqaddq_s16(t[0], t[4]); + x[1] = vqaddq_s16(t[1], t[5]); + x[2] = vqaddq_s16(t[2], t[6]); + x[3] = vqaddq_s16(t[3], t[7]); + x[4] = vqsubq_s16(t[0], t[4]); + x[5] = vqsubq_s16(t[1], t[5]); + x[6] = vqsubq_s16(t[2], t[6]); + x[7] = vqsubq_s16(t[3], t[7]); + x[8] = vqaddq_s16(s8, s12); + x[9] = vqaddq_s16(s9, s13); + x[10] = vqaddq_s16(s10, s14); + x[11] = vqaddq_s16(s11, s15); + x[12] = vqsubq_s16(s8, s12); + x[13] = vqsubq_s16(s9, s13); + x[14] = vqsubq_s16(s10, s14); + x[15] = vqsubq_s16(s11, s15); + + // stage 6 + t[0] = x[0]; + t[1] = x[1]; + t[2] = x[2]; + t[3] = x[3]; + btf_16_lane_2_3_neon(x[4], x[5], c5, &s4, &s5); + btf_16_lane_3_2_neon(x[7], x[6], c5, &s7, &s6); + t[8] = x[8]; + t[9] = x[9]; + t[10] = x[10]; + t[11] = x[11]; + btf_16_lane_2_3_neon(x[12], x[13], c5, &s12, &s13); + btf_16_lane_3_2_neon(x[15], x[14], c5, &s15, &s14); + + // Stage 7 + x[0] = vqaddq_s16(t[0], t[2]); + x[1] = vqaddq_s16(t[1], t[3]); + x[2] = vqsubq_s16(t[0], t[2]); + x[3] = vqsubq_s16(t[1], t[3]); + x[4] = vqaddq_s16(s4, s6); + x[5] = vqaddq_s16(s5, s7); + x[6] = vqsubq_s16(s4, s6); + x[7] = vqsubq_s16(s5, s7); + x[8] = vqaddq_s16(t[8], t[10]); + x[9] = vqaddq_s16(t[9], t[11]); + x[10] = vqsubq_s16(t[8], t[10]); + x[11] = vqsubq_s16(t[9], t[11]); + x[12] = vqaddq_s16(s12, s14); + x[13] = vqaddq_s16(s13, s15); + x[14] = vqsubq_s16(s12, s14); + x[15] = vqsubq_s16(s13, s15); + + // Stage 8 + btf_16_half_neon(x + 2, c5); + btf_16_half_neon(x + 6, c5); + btf_16_half_neon(x + 10, c5); + btf_16_half_neon(x + 14, c5); + + // Stage 9 + out[0] = x[0]; + out[1] = vqnegq_s16(x[8]); + out[2] = x[12]; + out[3] = vqnegq_s16(x[4]); + out[4] = x[6]; + out[5] = vqnegq_s16(x[14]); + out[6] = x[10]; + out[7] = vqnegq_s16(x[2]); + out[8] = x[3]; + out[9] = vqnegq_s16(x[11]); + out[10] = x[15]; + out[11] = vqnegq_s16(x[7]); + out[12] = x[5]; + out[13] = vqnegq_s16(x[13]); + out[14] = x[9]; + out[15] = vqnegq_s16(x[1]); +} + +static INLINE void iadst16_low1_neon(int16x8_t *const in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + + int16x8_t x[16]; + int16x8_t t[10]; + int16x8_t s0, s1, s4, s5; + int16x8_t s8, s9, s12, s13; + + // Stage 1 + x[1] = in[0]; + + // Stage 2 + btf_16_neon(x[1], cospi[62], -cospi[2], &s0, &s1); + + // Stage 3 + x[0] = s0; + x[1] = s1; + x[8] = s0; + x[9] = s1; + + // Stage 4 + t[0] = x[0]; + t[1] = x[1]; + btf_16_lane_0_1_neon(x[8], x[9], c0, &s8, &s9); + + // Stage 5 + x[0] = t[0]; + x[1] = t[1]; + x[4] = t[0]; + x[5] = t[1]; + x[8] = s8; + x[9] = s9; + x[12] = s8; + x[13] = s9; + + // stage 6 + t[0] = x[0]; + t[1] = x[1]; + btf_16_lane_2_3_neon(x[4], x[5], c1, &s4, &s5); + t[8] = x[8]; + t[9] = x[9]; + btf_16_lane_2_3_neon(x[12], x[13], c1, &s12, &s13); + + // Stage 7 + x[0] = t[0]; + x[1] = t[1]; + x[2] = t[0]; + x[3] = t[1]; + x[4] = s4; + x[5] = s5; + x[6] = s4; + x[7] = s5; + x[8] = t[8]; + x[9] = t[9]; + x[10] = t[8]; + x[11] = t[9]; + x[12] = s12; + x[13] = s13; + x[14] = s12; + x[15] = s13; + + // Stage 8 + btf_16_half_neon(x + 2, c1); + btf_16_half_neon(x + 6, c1); + btf_16_half_neon(x + 10, c1); + btf_16_half_neon(x + 14, c1); + + // Stage 9 + out[0] = x[0]; + out[1] = vqnegq_s16(x[8]); + out[2] = x[12]; + out[3] = vqnegq_s16(x[4]); + out[4] = x[6]; + out[5] = vqnegq_s16(x[14]); + out[6] = x[10]; + out[7] = vqnegq_s16(x[2]); + out[8] = x[3]; + out[9] = vqnegq_s16(x[11]); + out[10] = x[15]; + out[11] = vqnegq_s16(x[7]); + out[12] = x[5]; + out[13] = vqnegq_s16(x[13]); + out[14] = x[9]; + out[15] = vqnegq_s16(x[1]); +} + +static INLINE void iadst16_low8_neon(int16x8_t *const in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + + int16x8_t x[16]; + int16x8_t t[14]; + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; + int16x8_t s8, s9, s10, s11, s12, s13, s14, s15; + + // Stage 1 + x[1] = in[0]; + x[3] = in[2]; + x[5] = in[4]; + x[7] = in[6]; + x[8] = in[7]; + x[10] = in[5]; + x[12] = in[3]; + x[14] = in[1]; + + // Stage 2 + btf_16_neon(x[1], cospi[62], -cospi[2], &s0, &s1); + btf_16_neon(x[3], cospi[54], -cospi[10], &s2, &s3); + btf_16_neon(x[5], cospi[46], -cospi[18], &s4, &s5); + btf_16_neon(x[7], cospi[38], -cospi[26], &s6, &s7); + + btf_16_neon(x[8], cospi[34], cospi[30], &s8, &s9); + btf_16_neon(x[10], cospi[42], cospi[22], &s10, &s11); + btf_16_neon(x[12], cospi[50], cospi[14], &s12, &s13); + btf_16_neon(x[14], cospi[58], cospi[6], &s14, &s15); + + // Stage 3 + x[0] = vqaddq_s16(s0, s8); + x[1] = vqaddq_s16(s1, s9); + x[2] = vqaddq_s16(s2, s10); + x[3] = vqaddq_s16(s3, s11); + x[4] = vqaddq_s16(s4, s12); + x[5] = vqaddq_s16(s5, s13); + x[6] = vqaddq_s16(s6, s14); + x[7] = vqaddq_s16(s7, s15); + x[8] = vqsubq_s16(s0, s8); + x[9] = vqsubq_s16(s1, s9); + x[10] = vqsubq_s16(s2, s10); + x[11] = vqsubq_s16(s3, s11); + x[12] = vqsubq_s16(s4, s12); + x[13] = vqsubq_s16(s5, s13); + x[14] = vqsubq_s16(s6, s14); + x[15] = vqsubq_s16(s7, s15); + + // Stage 4 + t[0] = x[0]; + t[1] = x[1]; + t[2] = x[2]; + t[3] = x[3]; + t[4] = x[4]; + t[5] = x[5]; + t[6] = x[6]; + t[7] = x[7]; + btf_16_lane_0_1_neon(x[8], x[9], c0, &s8, &s9); + btf_16_lane_2_3_neon(x[10], x[11], c0, &s10, &s11); + btf_16_lane_1_0_neon(x[13], x[12], c0, &s13, &s12); + btf_16_lane_3_2_neon(x[15], x[14], c0, &s15, &s14); + + // Stage 5 + x[0] = vqaddq_s16(t[0], t[4]); + x[1] = vqaddq_s16(t[1], t[5]); + x[2] = vqaddq_s16(t[2], t[6]); + x[3] = vqaddq_s16(t[3], t[7]); + x[4] = vqsubq_s16(t[0], t[4]); + x[5] = vqsubq_s16(t[1], t[5]); + x[6] = vqsubq_s16(t[2], t[6]); + x[7] = vqsubq_s16(t[3], t[7]); + x[8] = vqaddq_s16(s8, s12); + x[9] = vqaddq_s16(s9, s13); + x[10] = vqaddq_s16(s10, s14); + x[11] = vqaddq_s16(s11, s15); + x[12] = vqsubq_s16(s8, s12); + x[13] = vqsubq_s16(s9, s13); + x[14] = vqsubq_s16(s10, s14); + x[15] = vqsubq_s16(s11, s15); + + // stage 6 + t[0] = x[0]; + t[1] = x[1]; + t[2] = x[2]; + t[3] = x[3]; + btf_16_lane_2_3_neon(x[4], x[5], c1, &s4, &s5); + btf_16_lane_3_2_neon(x[7], x[6], c1, &s7, &s6); + t[8] = x[8]; + t[9] = x[9]; + t[10] = x[10]; + t[11] = x[11]; + btf_16_lane_2_3_neon(x[12], x[13], c1, &s12, &s13); + btf_16_lane_3_2_neon(x[15], x[14], c1, &s15, &s14); + + // Stage 7 + x[0] = vqaddq_s16(t[0], t[2]); + x[1] = vqaddq_s16(t[1], t[3]); + x[2] = vqsubq_s16(t[0], t[2]); + x[3] = vqsubq_s16(t[1], t[3]); + x[4] = vqaddq_s16(s4, s6); + x[5] = vqaddq_s16(s5, s7); + x[6] = vqsubq_s16(s4, s6); + x[7] = vqsubq_s16(s5, s7); + x[8] = vqaddq_s16(t[8], t[10]); + x[9] = vqaddq_s16(t[9], t[11]); + x[10] = vqsubq_s16(t[8], t[10]); + x[11] = vqsubq_s16(t[9], t[11]); + x[12] = vqaddq_s16(s12, s14); + x[13] = vqaddq_s16(s13, s15); + x[14] = vqsubq_s16(s12, s14); + x[15] = vqsubq_s16(s13, s15); + + // Stage 8 + btf_16_half_neon(x + 2, c1); + btf_16_half_neon(x + 6, c1); + btf_16_half_neon(x + 10, c1); + btf_16_half_neon(x + 14, c1); + + // Stage 9 + out[0] = x[0]; + out[1] = vqnegq_s16(x[8]); + out[2] = x[12]; + out[3] = vqnegq_s16(x[4]); + out[4] = x[6]; + out[5] = vqnegq_s16(x[14]); + out[6] = x[10]; + out[7] = vqnegq_s16(x[2]); + out[8] = x[3]; + out[9] = vqnegq_s16(x[11]); + out[10] = x[15]; + out[11] = vqnegq_s16(x[7]); + out[12] = x[5]; + out[13] = vqnegq_s16(x[13]); + out[14] = x[9]; + out[15] = vqnegq_s16(x[1]); +} + +static INLINE void idct32_neon(int16x8_t *in, int16x8_t *out, int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1[32], step2[32]; + + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[2], (int16_t)cospi[62], + (int16_t)cospi[34], (int16_t)cospi[30]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[18], (int16_t)cospi[46], + (int16_t)cospi[50], (int16_t)cospi[14]); + const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[10], (int16_t)cospi[54], + (int16_t)cospi[42], (int16_t)cospi[22]); + const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[26], (int16_t)cospi[38], + (int16_t)cospi[58], (int16_t)cospi[6]); + const int16x4_t c4 = set_s16x4_neon((int16_t)cospi[4], (int16_t)cospi[60], + (int16_t)cospi[36], (int16_t)cospi[28]); + const int16x4_t c5 = set_s16x4_neon((int16_t)cospi[20], (int16_t)cospi[44], + (int16_t)cospi[52], (int16_t)cospi[12]); + const int16x4_t c6 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c7 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + const int16x4_t c8 = + set_s16x4_neon((int16_t)(-cospi[8]), (int16_t)(-cospi[56]), + (int16_t)(-cospi[40]), (int16_t)(-cospi[24])); + const int16x4_t c9 = + set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), + (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); + + // stage 2 + + btf_16_lane_0_1_neon(in[1], in[31], c0, &step2[31], &step2[16]); + btf_16_lane_2_3_neon(in[17], in[15], c0, &step2[30], &step2[17]); + btf_16_lane_0_1_neon(in[9], in[23], c1, &step2[29], &step2[18]); + btf_16_lane_2_3_neon(in[25], in[7], c1, &step2[28], &step2[19]); + btf_16_lane_0_1_neon(in[5], in[27], c2, &step2[27], &step2[20]); + btf_16_lane_2_3_neon(in[21], in[11], c2, &step2[26], &step2[21]); + btf_16_lane_0_1_neon(in[13], in[19], c3, &step2[25], &step2[22]); + btf_16_lane_2_3_neon(in[29], in[3], c3, &step2[24], &step2[23]); + + step2[0] = in[0]; + step2[1] = in[16]; + step2[2] = in[8]; + step2[3] = in[24]; + step2[4] = in[4]; + step2[5] = in[20]; + step2[6] = in[12]; + step2[7] = in[28]; + step2[8] = in[2]; + step2[9] = in[18]; + step2[10] = in[10]; + step2[11] = in[26]; + step2[12] = in[6]; + step2[13] = in[22]; + step2[14] = in[14]; + step2[15] = in[30]; + + // stage 3 + + btf_16_lane_0_1_neon(step2[8], step2[15], c4, &step1[15], &step1[8]); + btf_16_lane_2_3_neon(step2[9], step2[14], c4, &step1[14], &step1[9]); + btf_16_lane_0_1_neon(step2[10], step2[13], c5, &step1[13], &step1[10]); + btf_16_lane_2_3_neon(step2[11], step2[12], c5, &step1[12], &step1[11]); + + step1[0] = step2[0]; + step1[1] = step2[1]; + step1[2] = step2[2]; + step1[3] = step2[3]; + step1[4] = step2[4]; + step1[5] = step2[5]; + step1[6] = step2[6]; + step1[7] = step2[7]; + + step1[16] = vqaddq_s16(step2[16], step2[17]); + step1[17] = vqsubq_s16(step2[16], step2[17]); + step1[18] = vqsubq_s16(step2[19], step2[18]); + step1[19] = vqaddq_s16(step2[19], step2[18]); + step1[20] = vqaddq_s16(step2[20], step2[21]); + step1[21] = vqsubq_s16(step2[20], step2[21]); + step1[22] = vqsubq_s16(step2[23], step2[22]); + step1[23] = vqaddq_s16(step2[23], step2[22]); + step1[24] = vqaddq_s16(step2[24], step2[25]); + step1[25] = vqsubq_s16(step2[24], step2[25]); + step1[26] = vqsubq_s16(step2[27], step2[26]); + step1[27] = vqaddq_s16(step2[27], step2[26]); + step1[28] = vqaddq_s16(step2[28], step2[29]); + step1[29] = vqsubq_s16(step2[28], step2[29]); + step1[30] = vqsubq_s16(step2[31], step2[30]); + step1[31] = vqaddq_s16(step2[31], step2[30]); + + // stage 4 + + btf_16_lane_0_1_neon(step1[4], step1[7], c6, &step2[7], &step2[4]); + btf_16_lane_2_3_neon(step1[5], step1[6], c6, &step2[6], &step2[5]); + btf_16_lane_0_1_neon(step1[30], step1[17], c6, &step2[30], &step2[17]); + btf_16_lane_1_0_neon(step1[18], step1[29], c8, &step2[18], &step2[29]); + btf_16_lane_2_3_neon(step1[26], step1[21], c6, &step2[26], &step2[21]); + btf_16_lane_3_2_neon(step1[22], step1[25], c8, &step2[22], &step2[25]); + + step2[0] = step1[0]; + step2[1] = step1[1]; + step2[2] = step1[2]; + step2[3] = step1[3]; + step2[8] = vqaddq_s16(step1[8], step1[9]); + step2[9] = vqsubq_s16(step1[8], step1[9]); + step2[10] = vqsubq_s16(step1[11], step1[10]); + step2[11] = vqaddq_s16(step1[11], step1[10]); + step2[12] = vqaddq_s16(step1[12], step1[13]); + step2[13] = vqsubq_s16(step1[12], step1[13]); + step2[14] = vqsubq_s16(step1[15], step1[14]); + step2[15] = vqaddq_s16(step1[15], step1[14]); + step2[16] = step1[16]; + step2[19] = step1[19]; + step2[20] = step1[20]; + step2[23] = step1[23]; + step2[24] = step1[24]; + step2[27] = step1[27]; + step2[28] = step1[28]; + step2[31] = step1[31]; + + // stage 5 + + btf_16_lane_0_1_neon(step2[0], step2[1], c7, &step1[0], &step1[1]); + btf_16_lane_2_3_neon(step2[2], step2[3], c7, &step1[3], &step1[2]); + btf_16_lane_2_3_neon(step2[14], step2[9], c7, &step1[14], &step1[9]); + btf_16_lane_3_2_neon(step2[10], step2[13], c9, &step1[10], &step1[13]); + + step1[4] = vqaddq_s16(step2[4], step2[5]); + step1[5] = vqsubq_s16(step2[4], step2[5]); + step1[6] = vqsubq_s16(step2[7], step2[6]); + step1[7] = vqaddq_s16(step2[7], step2[6]); + step1[8] = step2[8]; + step1[11] = step2[11]; + step1[12] = step2[12]; + step1[15] = step2[15]; + step1[16] = vqaddq_s16(step2[16], step2[19]); + step1[17] = vqaddq_s16(step2[17], step2[18]); + step1[18] = vqsubq_s16(step2[17], step2[18]); + step1[19] = vqsubq_s16(step2[16], step2[19]); + step1[20] = vqsubq_s16(step2[23], step2[20]); + step1[21] = vqsubq_s16(step2[22], step2[21]); + step1[22] = vqaddq_s16(step2[22], step2[21]); + step1[23] = vqaddq_s16(step2[23], step2[20]); + step1[24] = vqaddq_s16(step2[24], step2[27]); + step1[25] = vqaddq_s16(step2[25], step2[26]); + step1[26] = vqsubq_s16(step2[25], step2[26]); + step1[27] = vqsubq_s16(step2[24], step2[27]); + step1[28] = vqsubq_s16(step2[31], step2[28]); + step1[29] = vqsubq_s16(step2[30], step2[29]); + step1[30] = vqaddq_s16(step2[30], step2[29]); + step1[31] = vqaddq_s16(step2[31], step2[28]); + + // stage 6 + + btf_16_lane_0_1_neon(step1[6], step1[5], c7, &step2[6], &step2[5]); + btf_16_lane_2_3_neon(step1[29], step1[18], c7, &step2[29], &step2[18]); + btf_16_lane_2_3_neon(step1[28], step1[19], c7, &step2[28], &step2[19]); + btf_16_lane_3_2_neon(step1[20], step1[27], c9, &step2[20], &step2[27]); + btf_16_lane_3_2_neon(step1[21], step1[26], c9, &step2[21], &step2[26]); + + step2[0] = vqaddq_s16(step1[0], step1[3]); + step2[1] = vqaddq_s16(step1[1], step1[2]); + step2[2] = vqsubq_s16(step1[1], step1[2]); + step2[3] = vqsubq_s16(step1[0], step1[3]); + step2[4] = step1[4]; + step2[7] = step1[7]; + step2[8] = vqaddq_s16(step1[8], step1[11]); + step2[9] = vqaddq_s16(step1[9], step1[10]); + step2[10] = vqsubq_s16(step1[9], step1[10]); + step2[11] = vqsubq_s16(step1[8], step1[11]); + step2[12] = vqsubq_s16(step1[15], step1[12]); + step2[13] = vqsubq_s16(step1[14], step1[13]); + step2[14] = vqaddq_s16(step1[14], step1[13]); + step2[15] = vqaddq_s16(step1[15], step1[12]); + step2[16] = step1[16]; + step2[17] = step1[17]; + step2[22] = step1[22]; + step2[23] = step1[23]; + step2[24] = step1[24]; + step2[25] = step1[25]; + step2[30] = step1[30]; + step2[31] = step1[31]; + + // stage 7 + + btf_16_lane_0_1_neon(step2[13], step2[10], c7, &step1[13], &step1[10]); + btf_16_lane_0_1_neon(step2[12], step2[11], c7, &step1[12], &step1[11]); + + step1[0] = vqaddq_s16(step2[0], step2[7]); + step1[1] = vqaddq_s16(step2[1], step2[6]); + step1[2] = vqaddq_s16(step2[2], step2[5]); + step1[3] = vqaddq_s16(step2[3], step2[4]); + step1[4] = vqsubq_s16(step2[3], step2[4]); + step1[5] = vqsubq_s16(step2[2], step2[5]); + step1[6] = vqsubq_s16(step2[1], step2[6]); + step1[7] = vqsubq_s16(step2[0], step2[7]); + step1[8] = step2[8]; + step1[9] = step2[9]; + step1[14] = step2[14]; + step1[15] = step2[15]; + step1[16] = vqaddq_s16(step2[16], step2[23]); + step1[17] = vqaddq_s16(step2[17], step2[22]); + step1[18] = vqaddq_s16(step2[18], step2[21]); + step1[19] = vqaddq_s16(step2[19], step2[20]); + step1[20] = vqsubq_s16(step2[19], step2[20]); + step1[21] = vqsubq_s16(step2[18], step2[21]); + step1[22] = vqsubq_s16(step2[17], step2[22]); + step1[23] = vqsubq_s16(step2[16], step2[23]); + step1[24] = vqsubq_s16(step2[31], step2[24]); + step1[25] = vqsubq_s16(step2[30], step2[25]); + step1[26] = vqsubq_s16(step2[29], step2[26]); + step1[27] = vqsubq_s16(step2[28], step2[27]); + step1[28] = vqaddq_s16(step2[27], step2[28]); + step1[29] = vqaddq_s16(step2[26], step2[29]); + step1[30] = vqaddq_s16(step2[25], step2[30]); + step1[31] = vqaddq_s16(step2[24], step2[31]); + + // stage 8 + + btf_16_lane_0_1_neon(step1[27], step1[20], c7, &step2[27], &step2[20]); + btf_16_lane_0_1_neon(step1[26], step1[21], c7, &step2[26], &step2[21]); + btf_16_lane_0_1_neon(step1[25], step1[22], c7, &step2[25], &step2[22]); + btf_16_lane_0_1_neon(step1[24], step1[23], c7, &step2[24], &step2[23]); + + step2[0] = vqaddq_s16(step1[0], step1[15]); + step2[1] = vqaddq_s16(step1[1], step1[14]); + step2[2] = vqaddq_s16(step1[2], step1[13]); + step2[3] = vqaddq_s16(step1[3], step1[12]); + step2[4] = vqaddq_s16(step1[4], step1[11]); + step2[5] = vqaddq_s16(step1[5], step1[10]); + step2[6] = vqaddq_s16(step1[6], step1[9]); + step2[7] = vqaddq_s16(step1[7], step1[8]); + step2[8] = vqsubq_s16(step1[7], step1[8]); + step2[9] = vqsubq_s16(step1[6], step1[9]); + step2[10] = vqsubq_s16(step1[5], step1[10]); + step2[11] = vqsubq_s16(step1[4], step1[11]); + step2[12] = vqsubq_s16(step1[3], step1[12]); + step2[13] = vqsubq_s16(step1[2], step1[13]); + step2[14] = vqsubq_s16(step1[1], step1[14]); + step2[15] = vqsubq_s16(step1[0], step1[15]); + step2[16] = step1[16]; + step2[17] = step1[17]; + step2[18] = step1[18]; + step2[19] = step1[19]; + step2[28] = step1[28]; + step2[29] = step1[29]; + step2[30] = step1[30]; + step2[31] = step1[31]; + + // stage 9 + + out[0] = vqaddq_s16(step2[0], step2[31]); + out[1] = vqaddq_s16(step2[1], step2[30]); + out[2] = vqaddq_s16(step2[2], step2[29]); + out[3] = vqaddq_s16(step2[3], step2[28]); + out[4] = vqaddq_s16(step2[4], step2[27]); + out[5] = vqaddq_s16(step2[5], step2[26]); + out[6] = vqaddq_s16(step2[6], step2[25]); + out[7] = vqaddq_s16(step2[7], step2[24]); + out[8] = vqaddq_s16(step2[8], step2[23]); + out[9] = vqaddq_s16(step2[9], step2[22]); + out[10] = vqaddq_s16(step2[10], step2[21]); + out[11] = vqaddq_s16(step2[11], step2[20]); + out[12] = vqaddq_s16(step2[12], step2[19]); + out[13] = vqaddq_s16(step2[13], step2[18]); + out[14] = vqaddq_s16(step2[14], step2[17]); + out[15] = vqaddq_s16(step2[15], step2[16]); + out[16] = vqsubq_s16(step2[15], step2[16]); + out[17] = vqsubq_s16(step2[14], step2[17]); + out[18] = vqsubq_s16(step2[13], step2[18]); + out[19] = vqsubq_s16(step2[12], step2[19]); + out[20] = vqsubq_s16(step2[11], step2[20]); + out[21] = vqsubq_s16(step2[10], step2[21]); + out[22] = vqsubq_s16(step2[9], step2[22]); + out[23] = vqsubq_s16(step2[8], step2[23]); + out[24] = vqsubq_s16(step2[7], step2[24]); + out[25] = vqsubq_s16(step2[6], step2[25]); + out[26] = vqsubq_s16(step2[5], step2[26]); + out[27] = vqsubq_s16(step2[4], step2[27]); + out[28] = vqsubq_s16(step2[3], step2[28]); + out[29] = vqsubq_s16(step2[2], step2[29]); + out[30] = vqsubq_s16(step2[1], step2[30]); + out[31] = vqsubq_s16(step2[0], step2[31]); +} + +static INLINE void idct32_low1_neon(int16x8_t *in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1; + int32x4_t t32[2]; + + // stage 1 + // stage 2 + // stage 3 + // stage 4 + // stage 5 + + t32[0] = vmull_n_s16(vget_low_s16(in[0]), cospi[32]); + t32[1] = vmull_n_s16(vget_high_s16(in[0]), cospi[32]); + step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), + vrshrn_n_s32(t32[1], INV_COS_BIT)); + + // stage 6 + // stage 7 + // stage 8 + // stage 9 + + out[0] = step1; + out[1] = step1; + out[2] = step1; + out[3] = step1; + out[4] = step1; + out[5] = step1; + out[6] = step1; + out[7] = step1; + out[8] = step1; + out[9] = step1; + out[10] = step1; + out[11] = step1; + out[12] = step1; + out[13] = step1; + out[14] = step1; + out[15] = step1; + out[16] = step1; + out[17] = step1; + out[18] = step1; + out[19] = step1; + out[20] = step1; + out[21] = step1; + out[22] = step1; + out[23] = step1; + out[24] = step1; + out[25] = step1; + out[26] = step1; + out[27] = step1; + out[28] = step1; + out[29] = step1; + out[30] = step1; + out[31] = step1; +} + +static INLINE void idct32_low8_neon(int16x8_t *in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1[32], step2[32]; + int32x4_t t32[16]; + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], cospi[48]); + const int16x4_t c2 = + set_s16x4_neon((int16_t)(-cospi[8]), (int16_t)(-cospi[56]), + (int16_t)(-cospi[40]), (int16_t)(-cospi[24])); + const int16x4_t c3 = + set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), + (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); + // stage 1 + // stage 2 + + step2[0] = in[0]; + step2[4] = in[4]; + step2[8] = in[2]; + step2[12] = in[6]; + + btf_16_neon(in[1], cospi[62], cospi[2], &step2[16], &step2[31]); + btf_16_neon(in[7], -cospi[50], cospi[14], &step2[19], &step2[28]); + btf_16_neon(in[5], cospi[54], cospi[10], &step2[20], &step2[27]); + btf_16_neon(in[3], -cospi[58], cospi[6], &step2[23], &step2[24]); + + // stage 3 + step1[0] = step2[0]; + step1[4] = step2[4]; + + btf_16_neon(step2[8], cospi[60], cospi[4], &step1[8], &step1[15]); + btf_16_neon(step2[12], -cospi[52], cospi[12], &step1[11], &step1[12]); + + step1[16] = step2[16]; + step1[17] = step2[16]; + step1[18] = step2[19]; + step1[19] = step2[19]; + step1[20] = step2[20]; + step1[21] = step2[20]; + step1[22] = step2[23]; + step1[23] = step2[23]; + step1[24] = step2[24]; + step1[25] = step2[24]; + step1[26] = step2[27]; + step1[27] = step2[27]; + step1[28] = step2[28]; + step1[29] = step2[28]; + step1[30] = step2[31]; + step1[31] = step2[31]; + + // stage 4 + + btf_16_neon(step1[4], cospi[56], cospi[8], &step2[4], &step2[7]); + btf_16_lane_0_1_neon(step1[30], step1[17], c0, &step2[30], &step2[17]); + btf_16_lane_1_0_neon(step1[18], step1[29], c2, &step2[18], &step2[29]); + btf_16_lane_2_3_neon(step1[26], step1[21], c0, &step2[26], &step2[21]); + btf_16_lane_3_2_neon(step1[22], step1[25], c2, &step2[22], &step2[25]); + + step2[0] = step1[0]; + step2[8] = step1[8]; + step2[9] = step1[8]; + step2[10] = step1[11]; + step2[11] = step1[11]; + step2[12] = step1[12]; + step2[13] = step1[12]; + step2[14] = step1[15]; + step2[15] = step1[15]; + step2[16] = step1[16]; + step2[19] = step1[19]; + step2[20] = step1[20]; + step2[23] = step1[23]; + step2[24] = step1[24]; + step2[27] = step1[27]; + step2[28] = step1[28]; + step2[31] = step1[31]; + + // stage 5 + + t32[0] = vmull_n_s16(vget_low_s16(step2[0]), cospi[32]); + t32[1] = vmull_n_s16(vget_high_s16(step2[0]), cospi[32]); + step1[0] = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), + vrshrn_n_s32(t32[1], INV_COS_BIT)); + + btf_16_lane_2_3_neon(step2[14], step2[9], c1, &step1[14], &step1[9]); + btf_16_lane_3_2_neon(step2[10], step2[13], c3, &step1[10], &step1[13]); + + step1[4] = step2[4]; + step1[5] = step2[4]; + step1[6] = step2[7]; + step1[7] = step2[7]; + step1[8] = step2[8]; + step1[11] = step2[11]; + step1[12] = step2[12]; + step1[15] = step2[15]; + step1[16] = vqaddq_s16(step2[16], step2[19]); + step1[17] = vqaddq_s16(step2[17], step2[18]); + step1[18] = vqsubq_s16(step2[17], step2[18]); + step1[19] = vqsubq_s16(step2[16], step2[19]); + step1[20] = vqsubq_s16(step2[23], step2[20]); + step1[21] = vqsubq_s16(step2[22], step2[21]); + step1[22] = vqaddq_s16(step2[22], step2[21]); + step1[23] = vqaddq_s16(step2[23], step2[20]); + step1[24] = vqaddq_s16(step2[24], step2[27]); + step1[25] = vqaddq_s16(step2[25], step2[26]); + step1[26] = vqsubq_s16(step2[25], step2[26]); + step1[27] = vqsubq_s16(step2[24], step2[27]); + step1[28] = vqsubq_s16(step2[31], step2[28]); + step1[29] = vqsubq_s16(step2[30], step2[29]); + step1[30] = vqaddq_s16(step2[30], step2[29]); + step1[31] = vqaddq_s16(step2[31], step2[28]); + + // stage 6 + + btf_16_lane_0_1_neon(step1[6], step1[5], c1, &step2[6], &step2[5]); + btf_16_lane_2_3_neon(step1[29], step1[18], c1, &step2[29], &step2[18]); + btf_16_lane_2_3_neon(step1[28], step1[19], c1, &step2[28], &step2[19]); + btf_16_lane_3_2_neon(step1[20], step1[27], c3, &step2[20], &step2[27]); + btf_16_lane_3_2_neon(step1[21], step1[26], c3, &step2[21], &step2[26]); + + step2[0] = step1[0]; + step2[1] = step1[0]; + step2[2] = step1[0]; + step2[3] = step1[0]; + step2[4] = step1[4]; + step2[7] = step1[7]; + step2[8] = vqaddq_s16(step1[8], step1[11]); + step2[9] = vqaddq_s16(step1[9], step1[10]); + step2[10] = vqsubq_s16(step1[9], step1[10]); + step2[11] = vqsubq_s16(step1[8], step1[11]); + step2[12] = vqsubq_s16(step1[15], step1[12]); + step2[13] = vqsubq_s16(step1[14], step1[13]); + step2[14] = vqaddq_s16(step1[14], step1[13]); + step2[15] = vqaddq_s16(step1[15], step1[12]); + step2[16] = step1[16]; + step2[17] = step1[17]; + step2[22] = step1[22]; + step2[23] = step1[23]; + step2[24] = step1[24]; + step2[25] = step1[25]; + step2[30] = step1[30]; + step2[31] = step1[31]; + + // stage 7 + + btf_16_lane_0_1_neon(step2[13], step2[10], c1, &step1[13], &step1[10]); + btf_16_lane_0_1_neon(step2[12], step2[11], c1, &step1[12], &step1[11]); + + step1[0] = vqaddq_s16(step2[0], step2[7]); + step1[1] = vqaddq_s16(step2[1], step2[6]); + step1[2] = vqaddq_s16(step2[2], step2[5]); + step1[3] = vqaddq_s16(step2[3], step2[4]); + step1[4] = vqsubq_s16(step2[3], step2[4]); + step1[5] = vqsubq_s16(step2[2], step2[5]); + step1[6] = vqsubq_s16(step2[1], step2[6]); + step1[7] = vqsubq_s16(step2[0], step2[7]); + step1[8] = step2[8]; + step1[9] = step2[9]; + step1[14] = step2[14]; + step1[15] = step2[15]; + step1[16] = vqaddq_s16(step2[16], step2[23]); + step1[17] = vqaddq_s16(step2[17], step2[22]); + step1[18] = vqaddq_s16(step2[18], step2[21]); + step1[19] = vqaddq_s16(step2[19], step2[20]); + step1[20] = vqsubq_s16(step2[19], step2[20]); + step1[21] = vqsubq_s16(step2[18], step2[21]); + step1[22] = vqsubq_s16(step2[17], step2[22]); + step1[23] = vqsubq_s16(step2[16], step2[23]); + step1[24] = vqsubq_s16(step2[31], step2[24]); + step1[25] = vqsubq_s16(step2[30], step2[25]); + step1[26] = vqsubq_s16(step2[29], step2[26]); + step1[27] = vqsubq_s16(step2[28], step2[27]); + step1[28] = vqaddq_s16(step2[27], step2[28]); + step1[29] = vqaddq_s16(step2[26], step2[29]); + step1[30] = vqaddq_s16(step2[25], step2[30]); + step1[31] = vqaddq_s16(step2[24], step2[31]); + + // stage 8 + + btf_16_lane_0_1_neon(step1[27], step1[20], c1, &step2[27], &step2[20]); + btf_16_lane_0_1_neon(step1[26], step1[21], c1, &step2[26], &step2[21]); + btf_16_lane_0_1_neon(step1[25], step1[22], c1, &step2[25], &step2[22]); + btf_16_lane_0_1_neon(step1[24], step1[23], c1, &step2[24], &step2[23]); + + step2[0] = vqaddq_s16(step1[0], step1[15]); + step2[1] = vqaddq_s16(step1[1], step1[14]); + step2[2] = vqaddq_s16(step1[2], step1[13]); + step2[3] = vqaddq_s16(step1[3], step1[12]); + step2[4] = vqaddq_s16(step1[4], step1[11]); + step2[5] = vqaddq_s16(step1[5], step1[10]); + step2[6] = vqaddq_s16(step1[6], step1[9]); + step2[7] = vqaddq_s16(step1[7], step1[8]); + step2[8] = vqsubq_s16(step1[7], step1[8]); + step2[9] = vqsubq_s16(step1[6], step1[9]); + step2[10] = vqsubq_s16(step1[5], step1[10]); + step2[11] = vqsubq_s16(step1[4], step1[11]); + step2[12] = vqsubq_s16(step1[3], step1[12]); + step2[13] = vqsubq_s16(step1[2], step1[13]); + step2[14] = vqsubq_s16(step1[1], step1[14]); + step2[15] = vqsubq_s16(step1[0], step1[15]); + step2[16] = step1[16]; + step2[17] = step1[17]; + step2[18] = step1[18]; + step2[19] = step1[19]; + step2[28] = step1[28]; + step2[29] = step1[29]; + step2[30] = step1[30]; + step2[31] = step1[31]; + + // stage 9 + + out[0] = vqaddq_s16(step2[0], step2[31]); + out[1] = vqaddq_s16(step2[1], step2[30]); + out[2] = vqaddq_s16(step2[2], step2[29]); + out[3] = vqaddq_s16(step2[3], step2[28]); + out[4] = vqaddq_s16(step2[4], step2[27]); + out[5] = vqaddq_s16(step2[5], step2[26]); + out[6] = vqaddq_s16(step2[6], step2[25]); + out[7] = vqaddq_s16(step2[7], step2[24]); + out[8] = vqaddq_s16(step2[8], step2[23]); + out[9] = vqaddq_s16(step2[9], step2[22]); + out[10] = vqaddq_s16(step2[10], step2[21]); + out[11] = vqaddq_s16(step2[11], step2[20]); + out[12] = vqaddq_s16(step2[12], step2[19]); + out[13] = vqaddq_s16(step2[13], step2[18]); + out[14] = vqaddq_s16(step2[14], step2[17]); + out[15] = vqaddq_s16(step2[15], step2[16]); + out[16] = vqsubq_s16(step2[15], step2[16]); + out[17] = vqsubq_s16(step2[14], step2[17]); + out[18] = vqsubq_s16(step2[13], step2[18]); + out[19] = vqsubq_s16(step2[12], step2[19]); + out[20] = vqsubq_s16(step2[11], step2[20]); + out[21] = vqsubq_s16(step2[10], step2[21]); + out[22] = vqsubq_s16(step2[9], step2[22]); + out[23] = vqsubq_s16(step2[8], step2[23]); + out[24] = vqsubq_s16(step2[7], step2[24]); + out[25] = vqsubq_s16(step2[6], step2[25]); + out[26] = vqsubq_s16(step2[5], step2[26]); + out[27] = vqsubq_s16(step2[4], step2[27]); + out[28] = vqsubq_s16(step2[3], step2[28]); + out[29] = vqsubq_s16(step2[2], step2[29]); + out[30] = vqsubq_s16(step2[1], step2[30]); + out[31] = vqsubq_s16(step2[0], step2[31]); +} + +static INLINE void idct32_low16_neon(int16x8_t *in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1[32], step2[32]; + int32x4_t t32[16]; + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + const int16x4_t c2 = + set_s16x4_neon((int16_t)(-cospi[8]), (int16_t)(-cospi[56]), + (int16_t)(-cospi[40]), (int16_t)(-cospi[24])); + const int16x4_t c3 = + set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), + (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); + + // stage 1 + // stage 2 + + btf_16_neon(in[1], cospi[62], cospi[2], &step2[16], &step2[31]); + btf_16_neon(in[15], -cospi[34], cospi[30], &step2[17], &step2[30]); + btf_16_neon(in[9], cospi[46], cospi[18], &step2[18], &step2[29]); + btf_16_neon(in[7], -cospi[50], cospi[14], &step2[19], &step2[28]); + btf_16_neon(in[5], cospi[54], cospi[10], &step2[20], &step2[27]); + btf_16_neon(in[11], -cospi[42], cospi[22], &step2[21], &step2[26]); + btf_16_neon(in[13], cospi[38], cospi[26], &step2[22], &step2[25]); + btf_16_neon(in[3], -cospi[58], cospi[6], &step2[23], &step2[24]); + + step2[0] = in[0]; + step2[2] = in[8]; + step2[4] = in[4]; + step2[6] = in[12]; + step2[8] = in[2]; + step2[10] = in[10]; + step2[12] = in[6]; + step2[14] = in[14]; + + // stage 3 + + btf_16_neon(step2[8], cospi[60], cospi[4], &step1[8], &step1[15]); + btf_16_neon(step2[14], -cospi[36], cospi[28], &step1[9], &step1[14]); + btf_16_neon(step2[10], cospi[44], cospi[20], &step1[10], &step1[13]); + btf_16_neon(step2[12], -cospi[52], cospi[12], &step1[11], &step1[12]); + + step1[0] = step2[0]; + step1[2] = step2[2]; + step1[4] = step2[4]; + step1[6] = step2[6]; + step1[16] = vqaddq_s16(step2[16], step2[17]); + step1[17] = vqsubq_s16(step2[16], step2[17]); + step1[18] = vqsubq_s16(step2[19], step2[18]); + step1[19] = vqaddq_s16(step2[19], step2[18]); + step1[20] = vqaddq_s16(step2[20], step2[21]); + step1[21] = vqsubq_s16(step2[20], step2[21]); + step1[22] = vqsubq_s16(step2[23], step2[22]); + step1[23] = vqaddq_s16(step2[23], step2[22]); + step1[24] = vqaddq_s16(step2[24], step2[25]); + step1[25] = vqsubq_s16(step2[24], step2[25]); + step1[26] = vqsubq_s16(step2[27], step2[26]); + step1[27] = vqaddq_s16(step2[27], step2[26]); + step1[28] = vqaddq_s16(step2[28], step2[29]); + step1[29] = vqsubq_s16(step2[28], step2[29]); + step1[30] = vqsubq_s16(step2[31], step2[30]); + step1[31] = vqaddq_s16(step2[31], step2[30]); + + // stage 4 + + btf_16_neon(step1[4], cospi[56], cospi[8], &step2[4], &step2[7]); + btf_16_neon(step1[6], -cospi[40], cospi[24], &step2[5], &step2[6]); + btf_16_lane_0_1_neon(step1[30], step1[17], c0, &step2[30], &step2[17]); + btf_16_lane_1_0_neon(step1[18], step1[29], c2, &step2[18], &step2[29]); + btf_16_lane_2_3_neon(step1[26], step1[21], c0, &step2[26], &step2[21]); + btf_16_lane_3_2_neon(step1[22], step1[25], c2, &step2[22], &step2[25]); + + step2[0] = step1[0]; + step2[2] = step1[2]; + step2[8] = vqaddq_s16(step1[8], step1[9]); + step2[9] = vqsubq_s16(step1[8], step1[9]); + step2[10] = vqsubq_s16(step1[11], step1[10]); + step2[11] = vqaddq_s16(step1[11], step1[10]); + step2[12] = vqaddq_s16(step1[12], step1[13]); + step2[13] = vqsubq_s16(step1[12], step1[13]); + step2[14] = vqsubq_s16(step1[15], step1[14]); + step2[15] = vqaddq_s16(step1[15], step1[14]); + step2[16] = step1[16]; + step2[19] = step1[19]; + step2[20] = step1[20]; + step2[23] = step1[23]; + step2[24] = step1[24]; + step2[27] = step1[27]; + step2[28] = step1[28]; + step2[31] = step1[31]; + + // stage 5 + + t32[0] = vmull_n_s16(vget_low_s16(step2[0]), cospi[32]); + t32[1] = vmull_n_s16(vget_high_s16(step2[0]), cospi[32]); + + step1[0] = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), + vrshrn_n_s32(t32[1], INV_COS_BIT)); + + btf_16_neon(step2[2], cospi[48], cospi[16], &step1[2], &step1[3]); + btf_16_lane_2_3_neon(step2[14], step2[9], c1, &step1[14], &step1[9]); + btf_16_lane_3_2_neon(step2[10], step2[13], c3, &step1[10], &step1[13]); + + step1[4] = vqaddq_s16(step2[4], step2[5]); + step1[5] = vqsubq_s16(step2[4], step2[5]); + step1[6] = vqsubq_s16(step2[7], step2[6]); + step1[7] = vqaddq_s16(step2[7], step2[6]); + step1[8] = step2[8]; + step1[11] = step2[11]; + step1[12] = step2[12]; + step1[15] = step2[15]; + step1[16] = vqaddq_s16(step2[16], step2[19]); + step1[17] = vqaddq_s16(step2[17], step2[18]); + step1[18] = vqsubq_s16(step2[17], step2[18]); + step1[19] = vqsubq_s16(step2[16], step2[19]); + step1[20] = vqsubq_s16(step2[23], step2[20]); + step1[21] = vqsubq_s16(step2[22], step2[21]); + step1[22] = vqaddq_s16(step2[22], step2[21]); + step1[23] = vqaddq_s16(step2[23], step2[20]); + step1[24] = vqaddq_s16(step2[24], step2[27]); + step1[25] = vqaddq_s16(step2[25], step2[26]); + step1[26] = vqsubq_s16(step2[25], step2[26]); + step1[27] = vqsubq_s16(step2[24], step2[27]); + step1[28] = vqsubq_s16(step2[31], step2[28]); + step1[29] = vqsubq_s16(step2[30], step2[29]); + step1[30] = vqaddq_s16(step2[30], step2[29]); + step1[31] = vqaddq_s16(step2[31], step2[28]); + + // stage 6 + + btf_16_lane_0_1_neon(step1[6], step1[5], c1, &step2[6], &step2[5]); + btf_16_lane_2_3_neon(step1[29], step1[18], c1, &step2[29], &step2[18]); + btf_16_lane_2_3_neon(step1[28], step1[19], c1, &step2[28], &step2[19]); + btf_16_lane_3_2_neon(step1[20], step1[27], c3, &step2[20], &step2[27]); + btf_16_lane_3_2_neon(step1[21], step1[26], c3, &step2[21], &step2[26]); + + step2[0] = vqaddq_s16(step1[0], step1[3]); + step2[1] = vqaddq_s16(step1[0], step1[2]); + step2[2] = vqsubq_s16(step1[0], step1[2]); + step2[3] = vqsubq_s16(step1[0], step1[3]); + step2[4] = step1[4]; + step2[7] = step1[7]; + step2[8] = vqaddq_s16(step1[8], step1[11]); + step2[9] = vqaddq_s16(step1[9], step1[10]); + step2[10] = vqsubq_s16(step1[9], step1[10]); + step2[11] = vqsubq_s16(step1[8], step1[11]); + step2[12] = vqsubq_s16(step1[15], step1[12]); + step2[13] = vqsubq_s16(step1[14], step1[13]); + step2[14] = vqaddq_s16(step1[14], step1[13]); + step2[15] = vqaddq_s16(step1[15], step1[12]); + step2[16] = step1[16]; + step2[17] = step1[17]; + step2[22] = step1[22]; + step2[23] = step1[23]; + step2[24] = step1[24]; + step2[25] = step1[25]; + step2[30] = step1[30]; + step2[31] = step1[31]; + + // stage 7 + + btf_16_lane_0_1_neon(step2[13], step2[10], c1, &step1[13], &step1[10]); + btf_16_lane_0_1_neon(step2[12], step2[11], c1, &step1[12], &step1[11]); + + step1[0] = vqaddq_s16(step2[0], step2[7]); + step1[1] = vqaddq_s16(step2[1], step2[6]); + step1[2] = vqaddq_s16(step2[2], step2[5]); + step1[3] = vqaddq_s16(step2[3], step2[4]); + step1[4] = vqsubq_s16(step2[3], step2[4]); + step1[5] = vqsubq_s16(step2[2], step2[5]); + step1[6] = vqsubq_s16(step2[1], step2[6]); + step1[7] = vqsubq_s16(step2[0], step2[7]); + step1[8] = step2[8]; + step1[9] = step2[9]; + step1[14] = step2[14]; + step1[15] = step2[15]; + step1[16] = vqaddq_s16(step2[16], step2[23]); + step1[17] = vqaddq_s16(step2[17], step2[22]); + step1[18] = vqaddq_s16(step2[18], step2[21]); + step1[19] = vqaddq_s16(step2[19], step2[20]); + step1[20] = vqsubq_s16(step2[19], step2[20]); + step1[21] = vqsubq_s16(step2[18], step2[21]); + step1[22] = vqsubq_s16(step2[17], step2[22]); + step1[23] = vqsubq_s16(step2[16], step2[23]); + step1[24] = vqsubq_s16(step2[31], step2[24]); + step1[25] = vqsubq_s16(step2[30], step2[25]); + step1[26] = vqsubq_s16(step2[29], step2[26]); + step1[27] = vqsubq_s16(step2[28], step2[27]); + step1[28] = vqaddq_s16(step2[27], step2[28]); + step1[29] = vqaddq_s16(step2[26], step2[29]); + step1[30] = vqaddq_s16(step2[25], step2[30]); + step1[31] = vqaddq_s16(step2[24], step2[31]); + + // stage 8 + + btf_16_lane_0_1_neon(step1[27], step1[20], c1, &step2[27], &step2[20]); + btf_16_lane_0_1_neon(step1[26], step1[21], c1, &step2[26], &step2[21]); + btf_16_lane_0_1_neon(step1[25], step1[22], c1, &step2[25], &step2[22]); + btf_16_lane_0_1_neon(step1[24], step1[23], c1, &step2[24], &step2[23]); + + step2[0] = vqaddq_s16(step1[0], step1[15]); + step2[1] = vqaddq_s16(step1[1], step1[14]); + step2[2] = vqaddq_s16(step1[2], step1[13]); + step2[3] = vqaddq_s16(step1[3], step1[12]); + step2[4] = vqaddq_s16(step1[4], step1[11]); + step2[5] = vqaddq_s16(step1[5], step1[10]); + step2[6] = vqaddq_s16(step1[6], step1[9]); + step2[7] = vqaddq_s16(step1[7], step1[8]); + step2[8] = vqsubq_s16(step1[7], step1[8]); + step2[9] = vqsubq_s16(step1[6], step1[9]); + step2[10] = vqsubq_s16(step1[5], step1[10]); + step2[11] = vqsubq_s16(step1[4], step1[11]); + step2[12] = vqsubq_s16(step1[3], step1[12]); + step2[13] = vqsubq_s16(step1[2], step1[13]); + step2[14] = vqsubq_s16(step1[1], step1[14]); + step2[15] = vqsubq_s16(step1[0], step1[15]); + step2[16] = step1[16]; + step2[17] = step1[17]; + step2[18] = step1[18]; + step2[19] = step1[19]; + step2[28] = step1[28]; + step2[29] = step1[29]; + step2[30] = step1[30]; + step2[31] = step1[31]; + + // stage 9 + + out[0] = vqaddq_s16(step2[0], step2[31]); + out[1] = vqaddq_s16(step2[1], step2[30]); + out[2] = vqaddq_s16(step2[2], step2[29]); + out[3] = vqaddq_s16(step2[3], step2[28]); + out[4] = vqaddq_s16(step2[4], step2[27]); + out[5] = vqaddq_s16(step2[5], step2[26]); + out[6] = vqaddq_s16(step2[6], step2[25]); + out[7] = vqaddq_s16(step2[7], step2[24]); + out[8] = vqaddq_s16(step2[8], step2[23]); + out[9] = vqaddq_s16(step2[9], step2[22]); + out[10] = vqaddq_s16(step2[10], step2[21]); + out[11] = vqaddq_s16(step2[11], step2[20]); + out[12] = vqaddq_s16(step2[12], step2[19]); + out[13] = vqaddq_s16(step2[13], step2[18]); + out[14] = vqaddq_s16(step2[14], step2[17]); + out[15] = vqaddq_s16(step2[15], step2[16]); + out[16] = vqsubq_s16(step2[15], step2[16]); + out[17] = vqsubq_s16(step2[14], step2[17]); + out[18] = vqsubq_s16(step2[13], step2[18]); + out[19] = vqsubq_s16(step2[12], step2[19]); + out[20] = vqsubq_s16(step2[11], step2[20]); + out[21] = vqsubq_s16(step2[10], step2[21]); + out[22] = vqsubq_s16(step2[9], step2[22]); + out[23] = vqsubq_s16(step2[8], step2[23]); + out[24] = vqsubq_s16(step2[7], step2[24]); + out[25] = vqsubq_s16(step2[6], step2[25]); + out[26] = vqsubq_s16(step2[5], step2[26]); + out[27] = vqsubq_s16(step2[4], step2[27]); + out[28] = vqsubq_s16(step2[3], step2[28]); + out[29] = vqsubq_s16(step2[2], step2[29]); + out[30] = vqsubq_s16(step2[1], step2[30]); + out[31] = vqsubq_s16(step2[0], step2[31]); +} +static INLINE void idct64_stage9_neon(int16x8_t *step2, int16x8_t *step1, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + + btf_16_lane_0_1_neon(step2[27], step2[20], c3, &step1[27], &step1[20]); + btf_16_lane_0_1_neon(step2[26], step2[21], c3, &step1[26], &step1[21]); + btf_16_lane_0_1_neon(step2[25], step2[22], c3, &step1[25], &step1[22]); + btf_16_lane_0_1_neon(step2[24], step2[23], c3, &step1[24], &step1[23]); + + step1[0] = vqaddq_s16(step2[0], step2[15]); + step1[1] = vqaddq_s16(step2[1], step2[14]); + step1[2] = vqaddq_s16(step2[2], step2[13]); + step1[3] = vqaddq_s16(step2[3], step2[12]); + step1[4] = vqaddq_s16(step2[4], step2[11]); + step1[5] = vqaddq_s16(step2[5], step2[10]); + step1[6] = vqaddq_s16(step2[6], step2[9]); + step1[7] = vqaddq_s16(step2[7], step2[8]); + step1[8] = vqsubq_s16(step2[7], step2[8]); + step1[9] = vqsubq_s16(step2[6], step2[9]); + step1[10] = vqsubq_s16(step2[5], step2[10]); + step1[11] = vqsubq_s16(step2[4], step2[11]); + step1[12] = vqsubq_s16(step2[3], step2[12]); + step1[13] = vqsubq_s16(step2[2], step2[13]); + step1[14] = vqsubq_s16(step2[1], step2[14]); + step1[15] = vqsubq_s16(step2[0], step2[15]); + step1[16] = step2[16]; + step1[17] = step2[17]; + step1[18] = step2[18]; + step1[19] = step2[19]; + step1[28] = step2[28]; + step1[29] = step2[29]; + step1[30] = step2[30]; + step1[31] = step2[31]; + step1[32] = vqaddq_s16(step2[32], step2[47]); + step1[33] = vqaddq_s16(step2[33], step2[46]); + step1[34] = vqaddq_s16(step2[34], step2[45]); + step1[35] = vqaddq_s16(step2[35], step2[44]); + step1[36] = vqaddq_s16(step2[36], step2[43]); + step1[37] = vqaddq_s16(step2[37], step2[42]); + step1[38] = vqaddq_s16(step2[38], step2[41]); + step1[39] = vqaddq_s16(step2[39], step2[40]); + step1[40] = vqsubq_s16(step2[39], step2[40]); + step1[41] = vqsubq_s16(step2[38], step2[41]); + step1[42] = vqsubq_s16(step2[37], step2[42]); + step1[43] = vqsubq_s16(step2[36], step2[43]); + step1[44] = vqsubq_s16(step2[35], step2[44]); + step1[45] = vqsubq_s16(step2[34], step2[45]); + step1[46] = vqsubq_s16(step2[33], step2[46]); + step1[47] = vqsubq_s16(step2[32], step2[47]); + step1[48] = vqsubq_s16(step2[63], step2[48]); + step1[49] = vqsubq_s16(step2[62], step2[49]); + step1[50] = vqsubq_s16(step2[61], step2[50]); + step1[51] = vqsubq_s16(step2[60], step2[51]); + step1[52] = vqsubq_s16(step2[59], step2[52]); + step1[53] = vqsubq_s16(step2[58], step2[53]); + step1[54] = vqsubq_s16(step2[57], step2[54]); + step1[55] = vqsubq_s16(step2[56], step2[55]); + step1[56] = vqaddq_s16(step2[56], step2[55]); + step1[57] = vqaddq_s16(step2[57], step2[54]); + step1[58] = vqaddq_s16(step2[58], step2[53]); + step1[59] = vqaddq_s16(step2[59], step2[52]); + step1[60] = vqaddq_s16(step2[60], step2[51]); + step1[61] = vqaddq_s16(step2[61], step2[50]); + step1[62] = vqaddq_s16(step2[62], step2[49]); + step1[63] = vqaddq_s16(step2[63], step2[48]); +} + +static INLINE void idct64_stage10_neon(int16x8_t *step1, int16x8_t *step2, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + + btf_16_lane_0_1_neon(step1[55], step1[40], c3, &step2[55], &step2[40]); + btf_16_lane_0_1_neon(step1[54], step1[41], c3, &step2[54], &step2[41]); + btf_16_lane_0_1_neon(step1[53], step1[42], c3, &step2[53], &step2[42]); + btf_16_lane_0_1_neon(step1[52], step1[43], c3, &step2[52], &step2[43]); + btf_16_lane_0_1_neon(step1[51], step1[44], c3, &step2[51], &step2[44]); + btf_16_lane_0_1_neon(step1[50], step1[45], c3, &step2[50], &step2[45]); + btf_16_lane_0_1_neon(step1[49], step1[46], c3, &step2[49], &step2[46]); + btf_16_lane_0_1_neon(step1[48], step1[47], c3, &step2[48], &step2[47]); + + step2[0] = vqaddq_s16(step1[0], step1[31]); + step2[1] = vqaddq_s16(step1[1], step1[30]); + step2[2] = vqaddq_s16(step1[2], step1[29]); + step2[3] = vqaddq_s16(step1[3], step1[28]); + step2[4] = vqaddq_s16(step1[4], step1[27]); + step2[5] = vqaddq_s16(step1[5], step1[26]); + step2[6] = vqaddq_s16(step1[6], step1[25]); + step2[7] = vqaddq_s16(step1[7], step1[24]); + step2[8] = vqaddq_s16(step1[8], step1[23]); + step2[9] = vqaddq_s16(step1[9], step1[22]); + step2[10] = vqaddq_s16(step1[10], step1[21]); + step2[11] = vqaddq_s16(step1[11], step1[20]); + step2[12] = vqaddq_s16(step1[12], step1[19]); + step2[13] = vqaddq_s16(step1[13], step1[18]); + step2[14] = vqaddq_s16(step1[14], step1[17]); + step2[15] = vqaddq_s16(step1[15], step1[16]); + step2[16] = vqsubq_s16(step1[15], step1[16]); + step2[17] = vqsubq_s16(step1[14], step1[17]); + step2[18] = vqsubq_s16(step1[13], step1[18]); + step2[19] = vqsubq_s16(step1[12], step1[19]); + step2[20] = vqsubq_s16(step1[11], step1[20]); + step2[21] = vqsubq_s16(step1[10], step1[21]); + step2[22] = vqsubq_s16(step1[9], step1[22]); + step2[23] = vqsubq_s16(step1[8], step1[23]); + step2[24] = vqsubq_s16(step1[7], step1[24]); + step2[25] = vqsubq_s16(step1[6], step1[25]); + step2[26] = vqsubq_s16(step1[5], step1[26]); + step2[27] = vqsubq_s16(step1[4], step1[27]); + step2[28] = vqsubq_s16(step1[3], step1[28]); + step2[29] = vqsubq_s16(step1[2], step1[29]); + step2[30] = vqsubq_s16(step1[1], step1[30]); + step2[31] = vqsubq_s16(step1[0], step1[31]); + step2[32] = step1[32]; + step2[33] = step1[33]; + step2[34] = step1[34]; + step2[35] = step1[35]; + step2[36] = step1[36]; + step2[37] = step1[37]; + step2[38] = step1[38]; + step2[39] = step1[39]; + step2[56] = step1[56]; + step2[57] = step1[57]; + step2[58] = step1[58]; + step2[59] = step1[59]; + step2[60] = step1[60]; + step2[61] = step1[61]; + step2[62] = step1[62]; + step2[63] = step1[63]; +} + +static INLINE void idct64_low32_neon(int16x8_t *in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step2[64], step1[64]; + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[4], (int16_t)cospi[60], + (int16_t)cospi[36], (int16_t)cospi[28]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[20], (int16_t)cospi[44], + (int16_t)cospi[52], (int16_t)cospi[12]); + const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + const int16x4_t c4 = + set_s16x4_neon((int16_t)(-cospi[4]), (int16_t)(-cospi[60]), + (int16_t)(-cospi[36]), (int16_t)(-cospi[28])); + const int16x4_t c5 = + set_s16x4_neon((int16_t)(-cospi[20]), (int16_t)(-cospi[44]), + (int16_t)(-cospi[52]), (int16_t)(-cospi[12])); + const int16x4_t c6 = + set_s16x4_neon((int16_t)(-cospi[8]), (int16_t)(-cospi[56]), + (int16_t)(-cospi[40]), (int16_t)(-cospi[24])); + const int16x4_t c7 = + set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), + (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); + + // stage 1 + // stage 2 + + step2[0] = in[0]; + step2[2] = in[16]; + step2[4] = in[8]; + step2[6] = in[24]; + step2[8] = in[4]; + step2[10] = in[20]; + step2[12] = in[12]; + step2[14] = in[28]; + step2[16] = in[2]; + step2[18] = in[18]; + step2[20] = in[10]; + step2[22] = in[26]; + step2[24] = in[6]; + step2[26] = in[22]; + step2[28] = in[14]; + step2[30] = in[30]; + + btf_16_neon(in[1], cospi[63], cospi[1], &step2[32], &step2[63]); + btf_16_neon(in[31], -cospi[33], cospi[31], &step2[33], &step2[62]); + btf_16_neon(in[17], cospi[47], cospi[17], &step2[34], &step2[61]); + btf_16_neon(in[15], -cospi[49], cospi[15], &step2[35], &step2[60]); + btf_16_neon(in[9], cospi[55], cospi[9], &step2[36], &step2[59]); + btf_16_neon(in[23], -cospi[41], cospi[23], &step2[37], &step2[58]); + btf_16_neon(in[25], cospi[39], cospi[25], &step2[38], &step2[57]); + btf_16_neon(in[7], -cospi[57], cospi[7], &step2[39], &step2[56]); + btf_16_neon(in[5], cospi[59], cospi[5], &step2[40], &step2[55]); + btf_16_neon(in[27], -cospi[37], cospi[27], &step2[41], &step2[54]); + btf_16_neon(in[21], cospi[43], cospi[21], &step2[42], &step2[53]); + btf_16_neon(in[11], -cospi[53], cospi[11], &step2[43], &step2[52]); + btf_16_neon(in[13], cospi[51], cospi[13], &step2[44], &step2[51]); + btf_16_neon(in[19], -cospi[45], cospi[19], &step2[45], &step2[50]); + btf_16_neon(in[29], cospi[35], cospi[29], &step2[46], &step2[49]); + btf_16_neon(in[3], -cospi[61], cospi[3], &step2[47], &step2[48]); + + // stage 3 + + step1[0] = step2[0]; + step1[2] = step2[2]; + step1[4] = step2[4]; + step1[6] = step2[6]; + step1[8] = step2[8]; + step1[10] = step2[10]; + step1[12] = step2[12]; + step1[14] = step2[14]; + + btf_16_neon(step2[16], cospi[62], cospi[2], &step1[16], &step1[31]); + btf_16_neon(step2[30], -cospi[34], cospi[30], &step1[17], &step1[30]); + btf_16_neon(step2[18], cospi[46], cospi[18], &step1[18], &step1[29]); + btf_16_neon(step2[28], -cospi[50], cospi[14], &step1[19], &step1[28]); + btf_16_neon(step2[20], cospi[54], cospi[10], &step1[20], &step1[27]); + btf_16_neon(step2[26], -cospi[42], cospi[22], &step1[21], &step1[26]); + btf_16_neon(step2[22], cospi[38], cospi[26], &step1[22], &step1[25]); + btf_16_neon(step2[24], -cospi[58], cospi[6], &step1[23], &step1[24]); + + step1[32] = vqaddq_s16(step2[32], step2[33]); + step1[33] = vqsubq_s16(step2[32], step2[33]); + step1[34] = vqsubq_s16(step2[35], step2[34]); + step1[35] = vqaddq_s16(step2[35], step2[34]); + step1[36] = vqaddq_s16(step2[36], step2[37]); + step1[37] = vqsubq_s16(step2[36], step2[37]); + step1[38] = vqsubq_s16(step2[39], step2[38]); + step1[39] = vqaddq_s16(step2[39], step2[38]); + step1[40] = vqaddq_s16(step2[40], step2[41]); + step1[41] = vqsubq_s16(step2[40], step2[41]); + step1[42] = vqsubq_s16(step2[43], step2[42]); + step1[43] = vqaddq_s16(step2[43], step2[42]); + step1[44] = vqaddq_s16(step2[44], step2[45]); + step1[45] = vqsubq_s16(step2[44], step2[45]); + step1[46] = vqsubq_s16(step2[47], step2[46]); + step1[47] = vqaddq_s16(step2[47], step2[46]); + step1[48] = vqaddq_s16(step2[48], step2[49]); + step1[49] = vqsubq_s16(step2[48], step2[49]); + step1[50] = vqsubq_s16(step2[51], step2[50]); + step1[51] = vqaddq_s16(step2[51], step2[50]); + step1[52] = vqaddq_s16(step2[52], step2[53]); + step1[53] = vqsubq_s16(step2[52], step2[53]); + step1[54] = vqsubq_s16(step2[55], step2[54]); + step1[55] = vqaddq_s16(step2[55], step2[54]); + step1[56] = vqaddq_s16(step2[56], step2[57]); + step1[57] = vqsubq_s16(step2[56], step2[57]); + step1[58] = vqsubq_s16(step2[59], step2[58]); + step1[59] = vqaddq_s16(step2[59], step2[58]); + step1[60] = vqaddq_s16(step2[60], step2[61]); + step1[61] = vqsubq_s16(step2[60], step2[61]); + step1[62] = vqsubq_s16(step2[63], step2[62]); + step1[63] = vqaddq_s16(step2[63], step2[62]); + + // stage 4 + + step2[0] = step1[0]; + step2[2] = step1[2]; + step2[4] = step1[4]; + step2[6] = step1[6]; + + btf_16_neon(step1[8], cospi[60], cospi[4], &step2[8], &step2[15]); + btf_16_neon(step1[14], -cospi[36], cospi[28], &step2[9], &step2[14]); + btf_16_neon(step1[10], cospi[44], cospi[20], &step2[10], &step2[13]); + btf_16_neon(step1[12], -cospi[52], cospi[12], &step2[11], &step2[12]); + btf_16_lane_0_1_neon(step1[62], step1[33], c0, &step2[62], &step2[33]); + btf_16_lane_1_0_neon(step1[34], step1[61], c4, &step2[34], &step2[61]); + btf_16_lane_2_3_neon(step1[58], step1[37], c0, &step2[58], &step2[37]); + btf_16_lane_3_2_neon(step1[38], step1[57], c4, &step2[38], &step2[57]); + btf_16_lane_0_1_neon(step1[54], step1[41], c1, &step2[54], &step2[41]); + btf_16_lane_1_0_neon(step1[42], step1[53], c5, &step2[42], &step2[53]); + btf_16_lane_2_3_neon(step1[50], step1[45], c1, &step2[50], &step2[45]); + btf_16_lane_3_2_neon(step1[46], step1[49], c5, &step2[46], &step2[49]); + + step2[16] = vqaddq_s16(step1[16], step1[17]); + step2[17] = vqsubq_s16(step1[16], step1[17]); + step2[18] = vqsubq_s16(step1[19], step1[18]); + step2[19] = vqaddq_s16(step1[19], step1[18]); + step2[20] = vqaddq_s16(step1[20], step1[21]); + step2[21] = vqsubq_s16(step1[20], step1[21]); + step2[22] = vqsubq_s16(step1[23], step1[22]); + step2[23] = vqaddq_s16(step1[23], step1[22]); + step2[24] = vqaddq_s16(step1[24], step1[25]); + step2[25] = vqsubq_s16(step1[24], step1[25]); + step2[26] = vqsubq_s16(step1[27], step1[26]); + step2[27] = vqaddq_s16(step1[27], step1[26]); + step2[28] = vqaddq_s16(step1[28], step1[29]); + step2[29] = vqsubq_s16(step1[28], step1[29]); + step2[30] = vqsubq_s16(step1[31], step1[30]); + step2[31] = vqaddq_s16(step1[31], step1[30]); + step2[32] = step1[32]; + step2[35] = step1[35]; + step2[36] = step1[36]; + step2[39] = step1[39]; + step2[40] = step1[40]; + step2[43] = step1[43]; + step2[44] = step1[44]; + step2[47] = step1[47]; + step2[48] = step1[48]; + step2[51] = step1[51]; + step2[52] = step1[52]; + step2[55] = step1[55]; + step2[56] = step1[56]; + step2[59] = step1[59]; + step2[60] = step1[60]; + step2[63] = step1[63]; + + // stage 5 + + step1[0] = step2[0]; + step1[2] = step2[2]; + + btf_16_neon(step2[4], cospi[56], cospi[8], &step1[4], &step1[7]); + btf_16_neon(step2[6], -cospi[40], cospi[24], &step1[5], &step1[6]); + btf_16_lane_0_1_neon(step2[30], step2[17], c2, &step1[30], &step1[17]); + btf_16_lane_1_0_neon(step2[18], step2[29], c6, &step1[18], &step1[29]); + btf_16_lane_2_3_neon(step2[26], step2[21], c2, &step1[26], &step1[21]); + btf_16_lane_3_2_neon(step2[22], step2[25], c6, &step1[22], &step1[25]); + + step1[8] = vqaddq_s16(step2[8], step2[9]); + step1[9] = vqsubq_s16(step2[8], step2[9]); + step1[10] = vqsubq_s16(step2[11], step2[10]); + step1[11] = vqaddq_s16(step2[11], step2[10]); + step1[12] = vqaddq_s16(step2[12], step2[13]); + step1[13] = vqsubq_s16(step2[12], step2[13]); + step1[14] = vqsubq_s16(step2[15], step2[14]); + step1[15] = vqaddq_s16(step2[15], step2[14]); + step1[16] = step2[16]; + step1[19] = step2[19]; + step1[20] = step2[20]; + step1[23] = step2[23]; + step1[24] = step2[24]; + step1[27] = step2[27]; + step1[28] = step2[28]; + step1[31] = step2[31]; + step1[32] = vqaddq_s16(step2[32], step2[35]); + step1[33] = vqaddq_s16(step2[33], step2[34]); + step1[34] = vqsubq_s16(step2[33], step2[34]); + step1[35] = vqsubq_s16(step2[32], step2[35]); + step1[36] = vqsubq_s16(step2[39], step2[36]); + step1[37] = vqsubq_s16(step2[38], step2[37]); + step1[38] = vqaddq_s16(step2[38], step2[37]); + step1[39] = vqaddq_s16(step2[39], step2[36]); + step1[40] = vqaddq_s16(step2[40], step2[43]); + step1[41] = vqaddq_s16(step2[41], step2[42]); + step1[42] = vqsubq_s16(step2[41], step2[42]); + step1[43] = vqsubq_s16(step2[40], step2[43]); + step1[44] = vqsubq_s16(step2[47], step2[44]); + step1[45] = vqsubq_s16(step2[46], step2[45]); + step1[46] = vqaddq_s16(step2[46], step2[45]); + step1[47] = vqaddq_s16(step2[47], step2[44]); + step1[48] = vqaddq_s16(step2[48], step2[51]); + step1[49] = vqaddq_s16(step2[49], step2[50]); + step1[50] = vqsubq_s16(step2[49], step2[50]); + step1[51] = vqsubq_s16(step2[48], step2[51]); + step1[52] = vqsubq_s16(step2[55], step2[52]); + step1[53] = vqsubq_s16(step2[54], step2[53]); + step1[54] = vqaddq_s16(step2[54], step2[53]); + step1[55] = vqaddq_s16(step2[55], step2[52]); + step1[56] = vqaddq_s16(step2[56], step2[59]); + step1[57] = vqaddq_s16(step2[57], step2[58]); + step1[58] = vqsubq_s16(step2[57], step2[58]); + step1[59] = vqsubq_s16(step2[56], step2[59]); + step1[60] = vqsubq_s16(step2[63], step2[60]); + step1[61] = vqsubq_s16(step2[62], step2[61]); + step1[62] = vqaddq_s16(step2[62], step2[61]); + step1[63] = vqaddq_s16(step2[63], step2[60]); + + // stage 6 + + btf_16_neon(step1[0], cospi[32], cospi[32], &step2[0], &step2[1]); + btf_16_neon(step1[2], cospi[48], cospi[16], &step2[2], &step2[3]); + btf_16_lane_2_3_neon(step1[14], step1[9], c3, &step2[14], &step2[9]); + btf_16_lane_3_2_neon(step1[10], step1[13], c7, &step2[10], &step2[13]); + btf_16_lane_0_1_neon(step1[61], step1[34], c2, &step2[61], &step2[34]); + btf_16_lane_0_1_neon(step1[60], step1[35], c2, &step2[60], &step2[35]); + btf_16_lane_1_0_neon(step1[36], step1[59], c6, &step2[36], &step2[59]); + btf_16_lane_1_0_neon(step1[37], step1[58], c6, &step2[37], &step2[58]); + btf_16_lane_2_3_neon(step1[53], step1[42], c2, &step2[53], &step2[42]); + btf_16_lane_2_3_neon(step1[52], step1[43], c2, &step2[52], &step2[43]); + btf_16_lane_3_2_neon(step1[44], step1[51], c6, &step2[44], &step2[51]); + btf_16_lane_3_2_neon(step1[45], step1[50], c6, &step2[45], &step2[50]); + + step2[4] = vqaddq_s16(step1[4], step1[5]); + step2[5] = vqsubq_s16(step1[4], step1[5]); + step2[6] = vqsubq_s16(step1[7], step1[6]); + step2[7] = vqaddq_s16(step1[7], step1[6]); + step2[8] = step1[8]; + step2[11] = step1[11]; + step2[12] = step1[12]; + step2[15] = step1[15]; + step2[16] = vqaddq_s16(step1[16], step1[19]); + step2[17] = vqaddq_s16(step1[17], step1[18]); + step2[18] = vqsubq_s16(step1[17], step1[18]); + step2[19] = vqsubq_s16(step1[16], step1[19]); + step2[20] = vqsubq_s16(step1[23], step1[20]); + step2[21] = vqsubq_s16(step1[22], step1[21]); + step2[22] = vqaddq_s16(step1[22], step1[21]); + step2[23] = vqaddq_s16(step1[23], step1[20]); + step2[24] = vqaddq_s16(step1[24], step1[27]); + step2[25] = vqaddq_s16(step1[25], step1[26]); + step2[26] = vqsubq_s16(step1[25], step1[26]); + step2[27] = vqsubq_s16(step1[24], step1[27]); + step2[28] = vqsubq_s16(step1[31], step1[28]); + step2[29] = vqsubq_s16(step1[30], step1[29]); + step2[30] = vqaddq_s16(step1[30], step1[29]); + step2[31] = vqaddq_s16(step1[31], step1[28]); + step2[32] = step1[32]; + step2[33] = step1[33]; + step2[38] = step1[38]; + step2[39] = step1[39]; + step2[40] = step1[40]; + step2[41] = step1[41]; + step2[46] = step1[46]; + step2[47] = step1[47]; + step2[48] = step1[48]; + step2[49] = step1[49]; + step2[54] = step1[54]; + step2[55] = step1[55]; + step2[56] = step1[56]; + step2[57] = step1[57]; + step2[62] = step1[62]; + step2[63] = step1[63]; + + // stage 7 + + btf_16_lane_0_1_neon(step2[6], step2[5], c3, &step1[6], &step1[5]); + btf_16_lane_2_3_neon(step2[29], step2[18], c3, &step1[29], &step1[18]); + btf_16_lane_2_3_neon(step2[28], step2[19], c3, &step1[28], &step1[19]); + btf_16_lane_3_2_neon(step2[20], step2[27], c7, &step1[20], &step1[27]); + btf_16_lane_3_2_neon(step2[21], step2[26], c7, &step1[21], &step1[26]); + + step1[0] = vqaddq_s16(step2[0], step2[3]); + step1[1] = vqaddq_s16(step2[1], step2[2]); + step1[2] = vqsubq_s16(step2[1], step2[2]); + step1[3] = vqsubq_s16(step2[0], step2[3]); + step1[4] = step2[4]; + step1[7] = step2[7]; + step1[8] = vqaddq_s16(step2[8], step2[11]); + step1[9] = vqaddq_s16(step2[9], step2[10]); + step1[10] = vqsubq_s16(step2[9], step2[10]); + step1[11] = vqsubq_s16(step2[8], step2[11]); + step1[12] = vqsubq_s16(step2[15], step2[12]); + step1[13] = vqsubq_s16(step2[14], step2[13]); + step1[14] = vqaddq_s16(step2[14], step2[13]); + step1[15] = vqaddq_s16(step2[15], step2[12]); + step1[16] = step2[16]; + step1[17] = step2[17]; + step1[22] = step2[22]; + step1[23] = step2[23]; + step1[24] = step2[24]; + step1[25] = step2[25]; + step1[30] = step2[30]; + step1[31] = step2[31]; + step1[32] = vqaddq_s16(step2[32], step2[39]); + step1[33] = vqaddq_s16(step2[33], step2[38]); + step1[34] = vqaddq_s16(step2[34], step2[37]); + step1[35] = vqaddq_s16(step2[35], step2[36]); + step1[36] = vqsubq_s16(step2[35], step2[36]); + step1[37] = vqsubq_s16(step2[34], step2[37]); + step1[38] = vqsubq_s16(step2[33], step2[38]); + step1[39] = vqsubq_s16(step2[32], step2[39]); + step1[40] = vqsubq_s16(step2[47], step2[40]); + step1[41] = vqsubq_s16(step2[46], step2[41]); + step1[42] = vqsubq_s16(step2[45], step2[42]); + step1[43] = vqsubq_s16(step2[44], step2[43]); + step1[44] = vqaddq_s16(step2[43], step2[44]); + step1[45] = vqaddq_s16(step2[42], step2[45]); + step1[46] = vqaddq_s16(step2[41], step2[46]); + step1[47] = vqaddq_s16(step2[40], step2[47]); + step1[48] = vqaddq_s16(step2[48], step2[55]); + step1[49] = vqaddq_s16(step2[49], step2[54]); + step1[50] = vqaddq_s16(step2[50], step2[53]); + step1[51] = vqaddq_s16(step2[51], step2[52]); + step1[52] = vqsubq_s16(step2[51], step2[52]); + step1[53] = vqsubq_s16(step2[50], step2[53]); + step1[54] = vqsubq_s16(step2[49], step2[54]); + step1[55] = vqsubq_s16(step2[48], step2[55]); + step1[56] = vqsubq_s16(step2[63], step2[56]); + step1[57] = vqsubq_s16(step2[62], step2[57]); + step1[58] = vqsubq_s16(step2[61], step2[58]); + step1[59] = vqsubq_s16(step2[60], step2[59]); + step1[60] = vqaddq_s16(step2[59], step2[60]); + step1[61] = vqaddq_s16(step2[58], step2[61]); + step1[62] = vqaddq_s16(step2[57], step2[62]); + step1[63] = vqaddq_s16(step2[56], step2[63]); + + // stage 8 + + btf_16_lane_0_1_neon(step1[13], step1[10], c3, &step2[13], &step2[10]); + btf_16_lane_0_1_neon(step1[12], step1[11], c3, &step2[12], &step2[11]); + btf_16_lane_2_3_neon(step1[59], step1[36], c3, &step2[59], &step2[36]); + btf_16_lane_2_3_neon(step1[58], step1[37], c3, &step2[58], &step2[37]); + btf_16_lane_2_3_neon(step1[57], step1[38], c3, &step2[57], &step2[38]); + btf_16_lane_2_3_neon(step1[56], step1[39], c3, &step2[56], &step2[39]); + btf_16_lane_3_2_neon(step1[40], step1[55], c7, &step2[40], &step2[55]); + btf_16_lane_3_2_neon(step1[41], step1[54], c7, &step2[41], &step2[54]); + btf_16_lane_3_2_neon(step1[42], step1[53], c7, &step2[42], &step2[53]); + btf_16_lane_3_2_neon(step1[43], step1[52], c7, &step2[43], &step2[52]); + + step2[0] = vqaddq_s16(step1[0], step1[7]); + step2[1] = vqaddq_s16(step1[1], step1[6]); + step2[2] = vqaddq_s16(step1[2], step1[5]); + step2[3] = vqaddq_s16(step1[3], step1[4]); + step2[4] = vqsubq_s16(step1[3], step1[4]); + step2[5] = vqsubq_s16(step1[2], step1[5]); + step2[6] = vqsubq_s16(step1[1], step1[6]); + step2[7] = vqsubq_s16(step1[0], step1[7]); + step2[8] = step1[8]; + step2[9] = step1[9]; + step2[14] = step1[14]; + step2[15] = step1[15]; + step2[16] = vqaddq_s16(step1[16], step1[23]); + step2[17] = vqaddq_s16(step1[17], step1[22]); + step2[18] = vqaddq_s16(step1[18], step1[21]); + step2[19] = vqaddq_s16(step1[19], step1[20]); + step2[20] = vqsubq_s16(step1[19], step1[20]); + step2[21] = vqsubq_s16(step1[18], step1[21]); + step2[22] = vqsubq_s16(step1[17], step1[22]); + step2[23] = vqsubq_s16(step1[16], step1[23]); + step2[24] = vqsubq_s16(step1[31], step1[24]); + step2[25] = vqsubq_s16(step1[30], step1[25]); + step2[26] = vqsubq_s16(step1[29], step1[26]); + step2[27] = vqsubq_s16(step1[28], step1[27]); + step2[28] = vqaddq_s16(step1[28], step1[27]); + step2[29] = vqaddq_s16(step1[29], step1[26]); + step2[30] = vqaddq_s16(step1[30], step1[25]); + step2[31] = vqaddq_s16(step1[31], step1[24]); + step2[32] = step1[32]; + step2[33] = step1[33]; + step2[34] = step1[34]; + step2[35] = step1[35]; + step2[44] = step1[44]; + step2[45] = step1[45]; + step2[46] = step1[46]; + step2[47] = step1[47]; + step2[48] = step1[48]; + step2[49] = step1[49]; + step2[50] = step1[50]; + step2[51] = step1[51]; + step2[60] = step1[60]; + step2[61] = step1[61]; + step2[62] = step1[62]; + step2[63] = step1[63]; + + // stage 9 + idct64_stage9_neon(step2, step1, cos_bit); + + // stage 10 + idct64_stage10_neon(step1, step2, cos_bit); + + // stage 11 + + out[0] = vqaddq_s16(step2[0], step2[63]); + out[1] = vqaddq_s16(step2[1], step2[62]); + out[2] = vqaddq_s16(step2[2], step2[61]); + out[3] = vqaddq_s16(step2[3], step2[60]); + out[4] = vqaddq_s16(step2[4], step2[59]); + out[5] = vqaddq_s16(step2[5], step2[58]); + out[6] = vqaddq_s16(step2[6], step2[57]); + out[7] = vqaddq_s16(step2[7], step2[56]); + out[8] = vqaddq_s16(step2[8], step2[55]); + out[9] = vqaddq_s16(step2[9], step2[54]); + out[10] = vqaddq_s16(step2[10], step2[53]); + out[11] = vqaddq_s16(step2[11], step2[52]); + out[12] = vqaddq_s16(step2[12], step2[51]); + out[13] = vqaddq_s16(step2[13], step2[50]); + out[14] = vqaddq_s16(step2[14], step2[49]); + out[15] = vqaddq_s16(step2[15], step2[48]); + out[16] = vqaddq_s16(step2[16], step2[47]); + out[17] = vqaddq_s16(step2[17], step2[46]); + out[18] = vqaddq_s16(step2[18], step2[45]); + out[19] = vqaddq_s16(step2[19], step2[44]); + out[20] = vqaddq_s16(step2[20], step2[43]); + out[21] = vqaddq_s16(step2[21], step2[42]); + out[22] = vqaddq_s16(step2[22], step2[41]); + out[23] = vqaddq_s16(step2[23], step2[40]); + out[24] = vqaddq_s16(step2[24], step2[39]); + out[25] = vqaddq_s16(step2[25], step2[38]); + out[26] = vqaddq_s16(step2[26], step2[37]); + out[27] = vqaddq_s16(step2[27], step2[36]); + out[28] = vqaddq_s16(step2[28], step2[35]); + out[29] = vqaddq_s16(step2[29], step2[34]); + out[30] = vqaddq_s16(step2[30], step2[33]); + out[31] = vqaddq_s16(step2[31], step2[32]); + out[32] = vqsubq_s16(step2[31], step2[32]); + out[33] = vqsubq_s16(step2[30], step2[33]); + out[34] = vqsubq_s16(step2[29], step2[34]); + out[35] = vqsubq_s16(step2[28], step2[35]); + out[36] = vqsubq_s16(step2[27], step2[36]); + out[37] = vqsubq_s16(step2[26], step2[37]); + out[38] = vqsubq_s16(step2[25], step2[38]); + out[39] = vqsubq_s16(step2[24], step2[39]); + out[40] = vqsubq_s16(step2[23], step2[40]); + out[41] = vqsubq_s16(step2[22], step2[41]); + out[42] = vqsubq_s16(step2[21], step2[42]); + out[43] = vqsubq_s16(step2[20], step2[43]); + out[44] = vqsubq_s16(step2[19], step2[44]); + out[45] = vqsubq_s16(step2[18], step2[45]); + out[46] = vqsubq_s16(step2[17], step2[46]); + out[47] = vqsubq_s16(step2[16], step2[47]); + out[48] = vqsubq_s16(step2[15], step2[48]); + out[49] = vqsubq_s16(step2[14], step2[49]); + out[50] = vqsubq_s16(step2[13], step2[50]); + out[51] = vqsubq_s16(step2[12], step2[51]); + out[52] = vqsubq_s16(step2[11], step2[52]); + out[53] = vqsubq_s16(step2[10], step2[53]); + out[54] = vqsubq_s16(step2[9], step2[54]); + out[55] = vqsubq_s16(step2[8], step2[55]); + out[56] = vqsubq_s16(step2[7], step2[56]); + out[57] = vqsubq_s16(step2[6], step2[57]); + out[58] = vqsubq_s16(step2[5], step2[58]); + out[59] = vqsubq_s16(step2[4], step2[59]); + out[60] = vqsubq_s16(step2[3], step2[60]); + out[61] = vqsubq_s16(step2[2], step2[61]); + out[62] = vqsubq_s16(step2[1], step2[62]); + out[63] = vqsubq_s16(step2[0], step2[63]); +} + +static INLINE void idct64_low1_neon(int16x8_t *input, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step1; + int32x4_t t32[2]; + + // stage 1 + // stage 2 + // stage 3 + // stage 4 + // stage 5 + // stage 6 + + t32[0] = vmull_n_s16(vget_low_s16(input[0]), cospi[32]); + t32[1] = vmull_n_s16(vget_high_s16(input[0]), cospi[32]); + + step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), + vrshrn_n_s32(t32[1], INV_COS_BIT)); + // stage 7 + // stage 8 + // stage 9 + // stage 10 + // stage 11 + out[0] = step1; + out[1] = step1; + out[2] = step1; + out[3] = step1; + out[4] = step1; + out[5] = step1; + out[6] = step1; + out[7] = step1; + out[8] = step1; + out[9] = step1; + out[10] = step1; + out[11] = step1; + out[12] = step1; + out[13] = step1; + out[14] = step1; + out[15] = step1; + out[16] = step1; + out[17] = step1; + out[18] = step1; + out[19] = step1; + out[20] = step1; + out[21] = step1; + out[22] = step1; + out[23] = step1; + out[24] = step1; + out[25] = step1; + out[26] = step1; + out[27] = step1; + out[28] = step1; + out[29] = step1; + out[30] = step1; + out[31] = step1; + out[32] = step1; + out[33] = step1; + out[34] = step1; + out[35] = step1; + out[36] = step1; + out[37] = step1; + out[38] = step1; + out[39] = step1; + out[40] = step1; + out[41] = step1; + out[42] = step1; + out[43] = step1; + out[44] = step1; + out[45] = step1; + out[46] = step1; + out[47] = step1; + out[48] = step1; + out[49] = step1; + out[50] = step1; + out[51] = step1; + out[52] = step1; + out[53] = step1; + out[54] = step1; + out[55] = step1; + out[56] = step1; + out[57] = step1; + out[58] = step1; + out[59] = step1; + out[60] = step1; + out[61] = step1; + out[62] = step1; + out[63] = step1; +} + +static INLINE void idct64_low8_neon(int16x8_t *in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step2[64], step1[64]; + + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[4], (int16_t)cospi[60], + (int16_t)cospi[36], (int16_t)cospi[28]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[20], (int16_t)cospi[44], + (int16_t)cospi[52], (int16_t)cospi[12]); + const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + const int16x4_t c4 = + set_s16x4_neon((int16_t)(-cospi[36]), (int16_t)(-cospi[28]), + (int16_t)(-cospi[52]), (int16_t)(-cospi[12])); + const int16x4_t c5 = + set_s16x4_neon((int16_t)(-cospi[8]), (int16_t)(-cospi[56]), + (int16_t)(-cospi[40]), (int16_t)(-cospi[24])); + const int16x4_t c6 = + set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), + (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); + + // stage 1 + // stage 2 + + step2[0] = in[0]; + step2[8] = in[4]; + step2[16] = in[2]; + step2[24] = in[6]; + + btf_16_neon(in[1], cospi[63], cospi[1], &step2[32], &step2[63]); + btf_16_neon(in[7], -cospi[57], cospi[7], &step2[39], &step2[56]); + btf_16_neon(in[5], cospi[59], cospi[5], &step2[40], &step2[55]); + btf_16_neon(in[3], -cospi[61], cospi[3], &step2[47], &step2[48]); + + // stage 3 + + step1[0] = step2[0]; + step1[8] = step2[8]; + + btf_16_neon(step2[16], cospi[62], cospi[2], &step1[16], &step1[31]); + btf_16_neon(step2[24], -cospi[58], cospi[6], &step1[23], &step1[24]); + + step1[32] = step2[32]; + step1[33] = step2[32]; + step1[38] = step2[39]; + step1[39] = step2[39]; + step1[40] = step2[40]; + step1[41] = step2[40]; + step1[46] = step2[47]; + step1[47] = step2[47]; + step1[48] = step2[48]; + step1[49] = step2[48]; + step1[54] = step2[55]; + step1[55] = step2[55]; + step1[56] = step2[56]; + step1[57] = step2[56]; + step1[62] = step2[63]; + step1[63] = step2[63]; + + // stage 4 + + step2[0] = step1[0]; + + btf_16_neon(step1[8], cospi[60], cospi[4], &step2[8], &step2[15]); + btf_16_lane_0_1_neon(step1[62], step1[33], c0, &step2[62], &step2[33]); + btf_16_lane_1_0_neon(step1[38], step1[57], c4, &step2[38], &step2[57]); + btf_16_lane_0_1_neon(step1[54], step1[41], c1, &step2[54], &step2[41]); + btf_16_lane_3_2_neon(step1[46], step1[49], c4, &step2[46], &step2[49]); + + step2[16] = step1[16]; + step2[17] = step1[16]; + step2[22] = step1[23]; + step2[23] = step1[23]; + step2[24] = step1[24]; + step2[25] = step1[24]; + step2[30] = step1[31]; + step2[31] = step1[31]; + step2[32] = step1[32]; + step2[39] = step1[39]; + step2[40] = step1[40]; + step2[47] = step1[47]; + step2[48] = step1[48]; + step2[55] = step1[55]; + step2[56] = step1[56]; + step2[63] = step1[63]; + + // stage 5 + + step1[0] = step2[0]; + + btf_16_lane_0_1_neon(step2[30], step2[17], c2, &step1[30], &step1[17]); + btf_16_lane_3_2_neon(step2[22], step2[25], c5, &step1[22], &step1[25]); + + step1[8] = step2[8]; + step1[9] = step2[8]; + step1[14] = step2[15]; + step1[15] = step2[15]; + + step1[16] = step2[16]; + step1[23] = step2[23]; + step1[24] = step2[24]; + step1[31] = step2[31]; + step1[32] = step2[32]; + step1[33] = step2[33]; + step1[34] = step2[33]; + step1[35] = step2[32]; + step1[36] = step2[39]; + step1[37] = step2[38]; + step1[38] = step2[38]; + step1[39] = step2[39]; + step1[40] = step2[40]; + step1[41] = step2[41]; + step1[42] = step2[41]; + step1[43] = step2[40]; + step1[44] = step2[47]; + step1[45] = step2[46]; + step1[46] = step2[46]; + step1[47] = step2[47]; + step1[48] = step2[48]; + step1[49] = step2[49]; + step1[50] = step2[49]; + step1[51] = step2[48]; + step1[52] = step2[55]; + step1[53] = step2[54]; + step1[54] = step2[54]; + step1[55] = step2[55]; + step1[56] = step2[56]; + step1[57] = step2[57]; + step1[58] = step2[57]; + step1[59] = step2[56]; + step1[60] = step2[63]; + step1[61] = step2[62]; + step1[62] = step2[62]; + step1[63] = step2[63]; + + // stage 6 + + btf_16_neon(step1[0], cospi[32], cospi[32], &step2[0], &step2[1]); + btf_16_lane_2_3_neon(step1[14], step1[9], c3, &step2[14], &step2[9]); + btf_16_lane_0_1_neon(step1[61], step1[34], c2, &step2[61], &step2[34]); + btf_16_lane_0_1_neon(step1[60], step1[35], c2, &step2[60], &step2[35]); + btf_16_lane_1_0_neon(step1[36], step1[59], c5, &step2[36], &step2[59]); + btf_16_lane_1_0_neon(step1[37], step1[58], c5, &step2[37], &step2[58]); + btf_16_lane_2_3_neon(step1[53], step1[42], c2, &step2[53], &step2[42]); + btf_16_lane_2_3_neon(step1[52], step1[43], c2, &step2[52], &step2[43]); + btf_16_lane_3_2_neon(step1[44], step1[51], c5, &step2[44], &step2[51]); + btf_16_lane_3_2_neon(step1[45], step1[50], c5, &step2[45], &step2[50]); + + step2[8] = step1[8]; + step2[15] = step1[15]; + step2[16] = step1[16]; + step2[17] = step1[17]; + step2[18] = step1[17]; + step2[19] = step1[16]; + step2[20] = step1[23]; + step2[21] = step1[22]; + step2[22] = step1[22]; + step2[23] = step1[23]; + step2[24] = step1[24]; + step2[25] = step1[25]; + step2[26] = step1[25]; + step2[27] = step1[24]; + step2[28] = step1[31]; + step2[29] = step1[30]; + step2[30] = step1[30]; + step2[31] = step1[31]; + step2[32] = step1[32]; + step2[33] = step1[33]; + step2[38] = step1[38]; + step2[39] = step1[39]; + step2[40] = step1[40]; + step2[41] = step1[41]; + step2[46] = step1[46]; + step2[47] = step1[47]; + step2[48] = step1[48]; + step2[49] = step1[49]; + step2[54] = step1[54]; + step2[55] = step1[55]; + step2[56] = step1[56]; + step2[57] = step1[57]; + step2[62] = step1[62]; + step2[63] = step1[63]; + + // stage 7 + + btf_16_lane_2_3_neon(step2[29], step2[18], c3, &step1[29], &step1[18]); + btf_16_lane_2_3_neon(step2[28], step2[19], c3, &step1[28], &step1[19]); + btf_16_lane_3_2_neon(step2[20], step2[27], c6, &step1[20], &step1[27]); + btf_16_lane_3_2_neon(step2[21], step2[26], c6, &step1[21], &step1[26]); + + step1[0] = step2[0]; + step1[1] = step2[1]; + step1[2] = step2[1]; + step1[3] = step2[0]; + step1[8] = step2[8]; + step1[9] = step2[9]; + step1[10] = step2[9]; + step1[11] = step2[8]; + step1[12] = step2[15]; + step1[13] = step2[14]; + step1[14] = step2[14]; + step1[15] = step2[15]; + step1[16] = step2[16]; + step1[17] = step2[17]; + step1[22] = step2[22]; + step1[23] = step2[23]; + step1[24] = step2[24]; + step1[25] = step2[25]; + step1[30] = step2[30]; + step1[31] = step2[31]; + step1[32] = vqaddq_s16(step2[32], step2[39]); + step1[33] = vqaddq_s16(step2[33], step2[38]); + step1[34] = vqaddq_s16(step2[34], step2[37]); + step1[35] = vqaddq_s16(step2[35], step2[36]); + step1[36] = vqsubq_s16(step2[35], step2[36]); + step1[37] = vqsubq_s16(step2[34], step2[37]); + step1[38] = vqsubq_s16(step2[33], step2[38]); + step1[39] = vqsubq_s16(step2[32], step2[39]); + step1[40] = vqsubq_s16(step2[47], step2[40]); + step1[41] = vqsubq_s16(step2[46], step2[41]); + step1[42] = vqsubq_s16(step2[45], step2[42]); + step1[43] = vqsubq_s16(step2[44], step2[43]); + step1[44] = vqaddq_s16(step2[43], step2[44]); + step1[45] = vqaddq_s16(step2[42], step2[45]); + step1[46] = vqaddq_s16(step2[41], step2[46]); + step1[47] = vqaddq_s16(step2[40], step2[47]); + step1[48] = vqaddq_s16(step2[48], step2[55]); + step1[49] = vqaddq_s16(step2[49], step2[54]); + step1[50] = vqaddq_s16(step2[50], step2[53]); + step1[51] = vqaddq_s16(step2[51], step2[52]); + step1[52] = vqsubq_s16(step2[51], step2[52]); + step1[53] = vqsubq_s16(step2[50], step2[53]); + step1[54] = vqsubq_s16(step2[49], step2[54]); + step1[55] = vqsubq_s16(step2[48], step2[55]); + step1[56] = vqsubq_s16(step2[63], step2[56]); + step1[57] = vqsubq_s16(step2[62], step2[57]); + step1[58] = vqsubq_s16(step2[61], step2[58]); + step1[59] = vqsubq_s16(step2[60], step2[59]); + step1[60] = vqaddq_s16(step2[59], step2[60]); + step1[61] = vqaddq_s16(step2[58], step2[61]); + step1[62] = vqaddq_s16(step2[57], step2[62]); + step1[63] = vqaddq_s16(step2[56], step2[63]); + + // stage 8 + + btf_16_lane_0_1_neon(step1[13], step1[10], c3, &step2[13], &step2[10]); + btf_16_lane_0_1_neon(step1[12], step1[11], c3, &step2[12], &step2[11]); + btf_16_lane_2_3_neon(step1[59], step1[36], c3, &step2[59], &step2[36]); + btf_16_lane_2_3_neon(step1[58], step1[37], c3, &step2[58], &step2[37]); + btf_16_lane_2_3_neon(step1[57], step1[38], c3, &step2[57], &step2[38]); + btf_16_lane_2_3_neon(step1[56], step1[39], c3, &step2[56], &step2[39]); + btf_16_lane_3_2_neon(step1[40], step1[55], c6, &step2[40], &step2[55]); + btf_16_lane_3_2_neon(step1[41], step1[54], c6, &step2[41], &step2[54]); + btf_16_lane_3_2_neon(step1[42], step1[53], c6, &step2[42], &step2[53]); + btf_16_lane_3_2_neon(step1[43], step1[52], c6, &step2[43], &step2[52]); + + step2[0] = step1[0]; + step2[1] = step1[1]; + step2[2] = step1[2]; + step2[3] = step1[3]; + step2[4] = step1[3]; + step2[5] = step1[2]; + step2[6] = step1[1]; + step2[7] = step1[0]; + step2[8] = step1[8]; + step2[9] = step1[9]; + step2[14] = step1[14]; + step2[15] = step1[15]; + step2[16] = vqaddq_s16(step1[16], step1[23]); + step2[17] = vqaddq_s16(step1[17], step1[22]); + step2[18] = vqaddq_s16(step1[18], step1[21]); + step2[19] = vqaddq_s16(step1[19], step1[20]); + step2[20] = vqsubq_s16(step1[19], step1[20]); + step2[21] = vqsubq_s16(step1[18], step1[21]); + step2[22] = vqsubq_s16(step1[17], step1[22]); + step2[23] = vqsubq_s16(step1[16], step1[23]); + step2[24] = vqsubq_s16(step1[31], step1[24]); + step2[25] = vqsubq_s16(step1[30], step1[25]); + step2[26] = vqsubq_s16(step1[29], step1[26]); + step2[27] = vqsubq_s16(step1[28], step1[27]); + step2[28] = vqaddq_s16(step1[28], step1[27]); + step2[29] = vqaddq_s16(step1[29], step1[26]); + step2[30] = vqaddq_s16(step1[30], step1[25]); + step2[31] = vqaddq_s16(step1[31], step1[24]); + step2[32] = step1[32]; + step2[33] = step1[33]; + step2[34] = step1[34]; + step2[35] = step1[35]; + step2[44] = step1[44]; + step2[45] = step1[45]; + step2[46] = step1[46]; + step2[47] = step1[47]; + step2[48] = step1[48]; + step2[49] = step1[49]; + step2[50] = step1[50]; + step2[51] = step1[51]; + step2[60] = step1[60]; + step2[61] = step1[61]; + step2[62] = step1[62]; + step2[63] = step1[63]; + + // stage 9 + idct64_stage9_neon(step2, step1, cos_bit); + + // stage 10 + idct64_stage10_neon(step1, step2, cos_bit); + + // stage 11 + + out[0] = vqaddq_s16(step2[0], step2[63]); + out[1] = vqaddq_s16(step2[1], step2[62]); + out[2] = vqaddq_s16(step2[2], step2[61]); + out[3] = vqaddq_s16(step2[3], step2[60]); + out[4] = vqaddq_s16(step2[4], step2[59]); + out[5] = vqaddq_s16(step2[5], step2[58]); + out[6] = vqaddq_s16(step2[6], step2[57]); + out[7] = vqaddq_s16(step2[7], step2[56]); + out[8] = vqaddq_s16(step2[8], step2[55]); + out[9] = vqaddq_s16(step2[9], step2[54]); + out[10] = vqaddq_s16(step2[10], step2[53]); + out[11] = vqaddq_s16(step2[11], step2[52]); + out[12] = vqaddq_s16(step2[12], step2[51]); + out[13] = vqaddq_s16(step2[13], step2[50]); + out[14] = vqaddq_s16(step2[14], step2[49]); + out[15] = vqaddq_s16(step2[15], step2[48]); + out[16] = vqaddq_s16(step2[16], step2[47]); + out[17] = vqaddq_s16(step2[17], step2[46]); + out[18] = vqaddq_s16(step2[18], step2[45]); + out[19] = vqaddq_s16(step2[19], step2[44]); + out[20] = vqaddq_s16(step2[20], step2[43]); + out[21] = vqaddq_s16(step2[21], step2[42]); + out[22] = vqaddq_s16(step2[22], step2[41]); + out[23] = vqaddq_s16(step2[23], step2[40]); + out[24] = vqaddq_s16(step2[24], step2[39]); + out[25] = vqaddq_s16(step2[25], step2[38]); + out[26] = vqaddq_s16(step2[26], step2[37]); + out[27] = vqaddq_s16(step2[27], step2[36]); + out[28] = vqaddq_s16(step2[28], step2[35]); + out[29] = vqaddq_s16(step2[29], step2[34]); + out[30] = vqaddq_s16(step2[30], step2[33]); + out[31] = vqaddq_s16(step2[31], step2[32]); + out[32] = vqsubq_s16(step2[31], step2[32]); + out[33] = vqsubq_s16(step2[30], step2[33]); + out[34] = vqsubq_s16(step2[29], step2[34]); + out[35] = vqsubq_s16(step2[28], step2[35]); + out[36] = vqsubq_s16(step2[27], step2[36]); + out[37] = vqsubq_s16(step2[26], step2[37]); + out[38] = vqsubq_s16(step2[25], step2[38]); + out[39] = vqsubq_s16(step2[24], step2[39]); + out[40] = vqsubq_s16(step2[23], step2[40]); + out[41] = vqsubq_s16(step2[22], step2[41]); + out[42] = vqsubq_s16(step2[21], step2[42]); + out[43] = vqsubq_s16(step2[20], step2[43]); + out[44] = vqsubq_s16(step2[19], step2[44]); + out[45] = vqsubq_s16(step2[18], step2[45]); + out[46] = vqsubq_s16(step2[17], step2[46]); + out[47] = vqsubq_s16(step2[16], step2[47]); + out[48] = vqsubq_s16(step2[15], step2[48]); + out[49] = vqsubq_s16(step2[14], step2[49]); + out[50] = vqsubq_s16(step2[13], step2[50]); + out[51] = vqsubq_s16(step2[12], step2[51]); + out[52] = vqsubq_s16(step2[11], step2[52]); + out[53] = vqsubq_s16(step2[10], step2[53]); + out[54] = vqsubq_s16(step2[9], step2[54]); + out[55] = vqsubq_s16(step2[8], step2[55]); + out[56] = vqsubq_s16(step2[7], step2[56]); + out[57] = vqsubq_s16(step2[6], step2[57]); + out[58] = vqsubq_s16(step2[5], step2[58]); + out[59] = vqsubq_s16(step2[4], step2[59]); + out[60] = vqsubq_s16(step2[3], step2[60]); + out[61] = vqsubq_s16(step2[2], step2[61]); + out[62] = vqsubq_s16(step2[1], step2[62]); + out[63] = vqsubq_s16(step2[0], step2[63]); +} + +static INLINE void idct64_low16_neon(int16x8_t *in, int16x8_t *out, + int8_t cos_bit) { + const int32_t *cospi = cospi_arr(cos_bit); + int16x8_t step2[64], step1[64]; + + const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[4], (int16_t)cospi[60], + (int16_t)cospi[36], (int16_t)cospi[28]); + const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[20], (int16_t)cospi[44], + (int16_t)cospi[52], (int16_t)cospi[12]); + const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], + (int16_t)cospi[40], (int16_t)cospi[24]); + const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], + (int16_t)cospi[16], (int16_t)cospi[48]); + const int16x4_t c4 = + set_s16x4_neon((int16_t)(-cospi[4]), (int16_t)(-cospi[60]), + (int16_t)(-cospi[36]), (int16_t)(-cospi[28])); + const int16x4_t c5 = + set_s16x4_neon((int16_t)(-cospi[20]), (int16_t)(-cospi[44]), + (int16_t)(-cospi[52]), (int16_t)(-cospi[12])); + const int16x4_t c6 = + set_s16x4_neon((int16_t)(-cospi[8]), (int16_t)(-cospi[56]), + (int16_t)(-cospi[40]), (int16_t)(-cospi[24])); + const int16x4_t c7 = + set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), + (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); + + // stage 1 + // stage 2 + + step2[0] = in[0]; + step2[4] = in[8]; + step2[8] = in[4]; + step2[12] = in[12]; + step2[16] = in[2]; + step2[20] = in[10]; + step2[24] = in[6]; + step2[28] = in[14]; + + btf_16_neon(in[1], cospi[63], cospi[1], &step2[32], &step2[63]); + btf_16_neon(in[15], -cospi[49], cospi[15], &step2[35], &step2[60]); + btf_16_neon(in[9], cospi[55], cospi[9], &step2[36], &step2[59]); + btf_16_neon(in[7], -cospi[57], cospi[7], &step2[39], &step2[56]); + btf_16_neon(in[5], cospi[59], cospi[5], &step2[40], &step2[55]); + btf_16_neon(in[11], -cospi[53], cospi[11], &step2[43], &step2[52]); + btf_16_neon(in[13], cospi[51], cospi[13], &step2[44], &step2[51]); + btf_16_neon(in[3], -cospi[61], cospi[3], &step2[47], &step2[48]); + + // stage 3 + + step1[0] = step2[0]; + step1[4] = step2[4]; + step1[8] = step2[8]; + step1[12] = step2[12]; + + btf_16_neon(step2[16], cospi[62], cospi[2], &step1[16], &step1[31]); + btf_16_neon(step2[20], cospi[54], cospi[10], &step1[20], &step1[27]); + btf_16_neon(step2[24], -cospi[58], cospi[6], &step1[23], &step1[24]); + btf_16_neon(step2[28], -cospi[50], cospi[14], &step1[19], &step1[28]); + + step1[32] = step2[32]; + step1[33] = step2[32]; + step1[34] = step2[35]; + step1[35] = step2[35]; + step1[36] = step2[36]; + step1[37] = step2[36]; + step1[38] = step2[39]; + step1[39] = step2[39]; + step1[40] = step2[40]; + step1[41] = step2[40]; + step1[42] = step2[43]; + step1[43] = step2[43]; + step1[44] = step2[44]; + step1[45] = step2[44]; + step1[46] = step2[47]; + step1[47] = step2[47]; + step1[48] = step2[48]; + step1[49] = step2[48]; + step1[50] = step2[51]; + step1[51] = step2[51]; + step1[52] = step2[52]; + step1[53] = step2[52]; + step1[54] = step2[55]; + step1[55] = step2[55]; + step1[56] = step2[56]; + step1[57] = step2[56]; + step1[58] = step2[59]; + step1[59] = step2[59]; + step1[60] = step2[60]; + step1[61] = step2[60]; + step1[62] = step2[63]; + step1[63] = step2[63]; + + // stage 4 + + step2[0] = step1[0]; + step2[4] = step1[4]; + + btf_16_neon(step1[8], cospi[60], cospi[4], &step2[8], &step2[15]); + btf_16_neon(step1[12], -cospi[52], cospi[12], &step2[11], &step2[12]); + btf_16_lane_0_1_neon(step1[62], step1[33], c0, &step2[62], &step2[33]); + btf_16_lane_1_0_neon(step1[34], step1[61], c4, &step2[34], &step2[61]); + btf_16_lane_2_3_neon(step1[58], step1[37], c0, &step2[58], &step2[37]); + btf_16_lane_3_2_neon(step1[38], step1[57], c4, &step2[38], &step2[57]); + btf_16_lane_0_1_neon(step1[54], step1[41], c1, &step2[54], &step2[41]); + btf_16_lane_1_0_neon(step1[42], step1[53], c5, &step2[42], &step2[53]); + btf_16_lane_2_3_neon(step1[50], step1[45], c1, &step2[50], &step2[45]); + btf_16_lane_3_2_neon(step1[46], step1[49], c5, &step2[46], &step2[49]); + + step2[16] = step1[16]; + step2[17] = step1[16]; + step2[18] = step1[19]; + step2[19] = step1[19]; + step2[20] = step1[20]; + step2[21] = step1[20]; + step2[22] = step1[23]; + step2[23] = step1[23]; + step2[24] = step1[24]; + step2[25] = step1[24]; + step2[26] = step1[27]; + step2[27] = step1[27]; + step2[28] = step1[28]; + step2[29] = step1[28]; + step2[30] = step1[31]; + step2[31] = step1[31]; + step2[32] = step1[32]; + step2[35] = step1[35]; + step2[36] = step1[36]; + step2[39] = step1[39]; + step2[40] = step1[40]; + step2[43] = step1[43]; + step2[44] = step1[44]; + step2[47] = step1[47]; + step2[48] = step1[48]; + step2[51] = step1[51]; + step2[52] = step1[52]; + step2[55] = step1[55]; + step2[56] = step1[56]; + step2[59] = step1[59]; + step2[60] = step1[60]; + step2[63] = step1[63]; + + // stage 5 + + step1[0] = step2[0]; + + btf_16_neon(step2[4], cospi[56], cospi[8], &step1[4], &step1[7]); + btf_16_lane_0_1_neon(step2[30], step2[17], c2, &step1[30], &step1[17]); + btf_16_lane_1_0_neon(step2[18], step2[29], c6, &step1[18], &step1[29]); + btf_16_lane_2_3_neon(step2[26], step2[21], c2, &step1[26], &step1[21]); + btf_16_lane_3_2_neon(step2[22], step2[25], c6, &step1[22], &step1[25]); + + step1[8] = step2[8]; + step1[9] = step2[8]; + step1[10] = step2[11]; + step1[11] = step2[11]; + step1[12] = step2[12]; + step1[13] = step2[12]; + step1[14] = step2[15]; + step1[15] = step2[15]; + step1[16] = step2[16]; + step1[19] = step2[19]; + step1[20] = step2[20]; + step1[23] = step2[23]; + step1[24] = step2[24]; + step1[27] = step2[27]; + step1[28] = step2[28]; + step1[31] = step2[31]; + step1[32] = vqaddq_s16(step2[32], step2[35]); + step1[33] = vqaddq_s16(step2[33], step2[34]); + step1[34] = vqsubq_s16(step2[33], step2[34]); + step1[35] = vqsubq_s16(step2[32], step2[35]); + step1[36] = vqsubq_s16(step2[39], step2[36]); + step1[37] = vqsubq_s16(step2[38], step2[37]); + step1[38] = vqaddq_s16(step2[38], step2[37]); + step1[39] = vqaddq_s16(step2[39], step2[36]); + step1[40] = vqaddq_s16(step2[40], step2[43]); + step1[41] = vqaddq_s16(step2[41], step2[42]); + step1[42] = vqsubq_s16(step2[41], step2[42]); + step1[43] = vqsubq_s16(step2[40], step2[43]); + step1[44] = vqsubq_s16(step2[47], step2[44]); + step1[45] = vqsubq_s16(step2[46], step2[45]); + step1[46] = vqaddq_s16(step2[46], step2[45]); + step1[47] = vqaddq_s16(step2[47], step2[44]); + step1[48] = vqaddq_s16(step2[48], step2[51]); + step1[49] = vqaddq_s16(step2[49], step2[50]); + step1[50] = vqsubq_s16(step2[49], step2[50]); + step1[51] = vqsubq_s16(step2[48], step2[51]); + step1[52] = vqsubq_s16(step2[55], step2[52]); + step1[53] = vqsubq_s16(step2[54], step2[53]); + step1[54] = vqaddq_s16(step2[54], step2[53]); + step1[55] = vqaddq_s16(step2[55], step2[52]); + step1[56] = vqaddq_s16(step2[56], step2[59]); + step1[57] = vqaddq_s16(step2[57], step2[58]); + step1[58] = vqsubq_s16(step2[57], step2[58]); + step1[59] = vqsubq_s16(step2[56], step2[59]); + step1[60] = vqsubq_s16(step2[63], step2[60]); + step1[61] = vqsubq_s16(step2[62], step2[61]); + step1[62] = vqaddq_s16(step2[62], step2[61]); + step1[63] = vqaddq_s16(step2[63], step2[60]); + + // stage 6 + + btf_16_neon(step1[0], cospi[32], cospi[32], &step2[0], &step2[1]); + btf_16_lane_2_3_neon(step1[14], step1[9], c3, &step2[14], &step2[9]); + btf_16_lane_3_2_neon(step1[10], step1[13], c7, &step2[10], &step2[13]); + btf_16_lane_0_1_neon(step1[61], step1[34], c2, &step2[61], &step2[34]); + btf_16_lane_0_1_neon(step1[60], step1[35], c2, &step2[60], &step2[35]); + btf_16_lane_1_0_neon(step1[36], step1[59], c6, &step2[36], &step2[59]); + btf_16_lane_1_0_neon(step1[37], step1[58], c6, &step2[37], &step2[58]); + btf_16_lane_2_3_neon(step1[53], step1[42], c2, &step2[53], &step2[42]); + btf_16_lane_2_3_neon(step1[52], step1[43], c2, &step2[52], &step2[43]); + btf_16_lane_3_2_neon(step1[44], step1[51], c6, &step2[44], &step2[51]); + btf_16_lane_3_2_neon(step1[45], step1[50], c6, &step2[45], &step2[50]); + + step2[4] = step1[4]; + step2[5] = step1[4]; + step2[6] = step1[7]; + step2[7] = step1[7]; + step2[8] = step1[8]; + step2[11] = step1[11]; + step2[12] = step1[12]; + step2[15] = step1[15]; + step2[16] = vqaddq_s16(step1[16], step1[19]); + step2[17] = vqaddq_s16(step1[17], step1[18]); + step2[18] = vqsubq_s16(step1[17], step1[18]); + step2[19] = vqsubq_s16(step1[16], step1[19]); + step2[20] = vqsubq_s16(step1[23], step1[20]); + step2[21] = vqsubq_s16(step1[22], step1[21]); + step2[22] = vqaddq_s16(step1[22], step1[21]); + step2[23] = vqaddq_s16(step1[23], step1[20]); + step2[24] = vqaddq_s16(step1[24], step1[27]); + step2[25] = vqaddq_s16(step1[25], step1[26]); + step2[26] = vqsubq_s16(step1[25], step1[26]); + step2[27] = vqsubq_s16(step1[24], step1[27]); + step2[28] = vqsubq_s16(step1[31], step1[28]); + step2[29] = vqsubq_s16(step1[30], step1[29]); + step2[30] = vqaddq_s16(step1[30], step1[29]); + step2[31] = vqaddq_s16(step1[31], step1[28]); + step2[32] = step1[32]; + step2[33] = step1[33]; + step2[38] = step1[38]; + step2[39] = step1[39]; + step2[40] = step1[40]; + step2[41] = step1[41]; + step2[46] = step1[46]; + step2[47] = step1[47]; + step2[48] = step1[48]; + step2[49] = step1[49]; + step2[54] = step1[54]; + step2[55] = step1[55]; + step2[56] = step1[56]; + step2[57] = step1[57]; + step2[62] = step1[62]; + step2[63] = step1[63]; + + // stage 7 + + btf_16_lane_0_1_neon(step2[6], step2[5], c3, &step1[6], &step1[5]); + btf_16_lane_2_3_neon(step2[29], step2[18], c3, &step1[29], &step1[18]); + btf_16_lane_2_3_neon(step2[28], step2[19], c3, &step1[28], &step1[19]); + btf_16_lane_3_2_neon(step2[20], step2[27], c7, &step1[20], &step1[27]); + btf_16_lane_3_2_neon(step2[21], step2[26], c7, &step1[21], &step1[26]); + + step1[0] = step2[0]; + step1[1] = step2[1]; + step1[2] = step2[1]; + step1[3] = step2[0]; + step1[4] = step2[4]; + step1[7] = step2[7]; + step1[8] = vqaddq_s16(step2[8], step2[11]); + step1[9] = vqaddq_s16(step2[9], step2[10]); + step1[10] = vqsubq_s16(step2[9], step2[10]); + step1[11] = vqsubq_s16(step2[8], step2[11]); + step1[12] = vqsubq_s16(step2[15], step2[12]); + step1[13] = vqsubq_s16(step2[14], step2[13]); + step1[14] = vqaddq_s16(step2[14], step2[13]); + step1[15] = vqaddq_s16(step2[15], step2[12]); + step1[16] = step2[16]; + step1[17] = step2[17]; + step1[22] = step2[22]; + step1[23] = step2[23]; + step1[24] = step2[24]; + step1[25] = step2[25]; + step1[30] = step2[30]; + step1[31] = step2[31]; + step1[32] = vqaddq_s16(step2[32], step2[39]); + step1[33] = vqaddq_s16(step2[33], step2[38]); + step1[34] = vqaddq_s16(step2[34], step2[37]); + step1[35] = vqaddq_s16(step2[35], step2[36]); + step1[36] = vqsubq_s16(step2[35], step2[36]); + step1[37] = vqsubq_s16(step2[34], step2[37]); + step1[38] = vqsubq_s16(step2[33], step2[38]); + step1[39] = vqsubq_s16(step2[32], step2[39]); + step1[40] = vqsubq_s16(step2[47], step2[40]); + step1[41] = vqsubq_s16(step2[46], step2[41]); + step1[42] = vqsubq_s16(step2[45], step2[42]); + step1[43] = vqsubq_s16(step2[44], step2[43]); + step1[44] = vqaddq_s16(step2[43], step2[44]); + step1[45] = vqaddq_s16(step2[42], step2[45]); + step1[46] = vqaddq_s16(step2[41], step2[46]); + step1[47] = vqaddq_s16(step2[40], step2[47]); + step1[48] = vqaddq_s16(step2[48], step2[55]); + step1[49] = vqaddq_s16(step2[49], step2[54]); + step1[50] = vqaddq_s16(step2[50], step2[53]); + step1[51] = vqaddq_s16(step2[51], step2[52]); + step1[52] = vqsubq_s16(step2[51], step2[52]); + step1[53] = vqsubq_s16(step2[50], step2[53]); + step1[54] = vqsubq_s16(step2[49], step2[54]); + step1[55] = vqsubq_s16(step2[48], step2[55]); + step1[56] = vqsubq_s16(step2[63], step2[56]); + step1[57] = vqsubq_s16(step2[62], step2[57]); + step1[58] = vqsubq_s16(step2[61], step2[58]); + step1[59] = vqsubq_s16(step2[60], step2[59]); + step1[60] = vqaddq_s16(step2[59], step2[60]); + step1[61] = vqaddq_s16(step2[58], step2[61]); + step1[62] = vqaddq_s16(step2[57], step2[62]); + step1[63] = vqaddq_s16(step2[56], step2[63]); + + // stage 8 + + btf_16_lane_0_1_neon(step1[13], step1[10], c3, &step2[13], &step2[10]); + btf_16_lane_0_1_neon(step1[12], step1[11], c3, &step2[12], &step2[11]); + btf_16_lane_2_3_neon(step1[59], step1[36], c3, &step2[59], &step2[36]); + btf_16_lane_2_3_neon(step1[58], step1[37], c3, &step2[58], &step2[37]); + btf_16_lane_2_3_neon(step1[57], step1[38], c3, &step2[57], &step2[38]); + btf_16_lane_2_3_neon(step1[56], step1[39], c3, &step2[56], &step2[39]); + btf_16_lane_3_2_neon(step1[40], step1[55], c7, &step2[40], &step2[55]); + btf_16_lane_3_2_neon(step1[41], step1[54], c7, &step2[41], &step2[54]); + btf_16_lane_3_2_neon(step1[42], step1[53], c7, &step2[42], &step2[53]); + btf_16_lane_3_2_neon(step1[43], step1[52], c7, &step2[43], &step2[52]); + + step2[0] = vqaddq_s16(step1[0], step1[7]); + step2[1] = vqaddq_s16(step1[1], step1[6]); + step2[2] = vqaddq_s16(step1[2], step1[5]); + step2[3] = vqaddq_s16(step1[3], step1[4]); + step2[4] = vqsubq_s16(step1[3], step1[4]); + step2[5] = vqsubq_s16(step1[2], step1[5]); + step2[6] = vqsubq_s16(step1[1], step1[6]); + step2[7] = vqsubq_s16(step1[0], step1[7]); + step2[8] = step1[8]; + step2[9] = step1[9]; + step2[14] = step1[14]; + step2[15] = step1[15]; + step2[16] = vqaddq_s16(step1[16], step1[23]); + step2[17] = vqaddq_s16(step1[17], step1[22]); + step2[18] = vqaddq_s16(step1[18], step1[21]); + step2[19] = vqaddq_s16(step1[19], step1[20]); + step2[20] = vqsubq_s16(step1[19], step1[20]); + step2[21] = vqsubq_s16(step1[18], step1[21]); + step2[22] = vqsubq_s16(step1[17], step1[22]); + step2[23] = vqsubq_s16(step1[16], step1[23]); + step2[24] = vqsubq_s16(step1[31], step1[24]); + step2[25] = vqsubq_s16(step1[30], step1[25]); + step2[26] = vqsubq_s16(step1[29], step1[26]); + step2[27] = vqsubq_s16(step1[28], step1[27]); + step2[28] = vqaddq_s16(step1[28], step1[27]); + step2[29] = vqaddq_s16(step1[29], step1[26]); + step2[30] = vqaddq_s16(step1[30], step1[25]); + step2[31] = vqaddq_s16(step1[31], step1[24]); + step2[32] = step1[32]; + step2[33] = step1[33]; + step2[34] = step1[34]; + step2[35] = step1[35]; + step2[44] = step1[44]; + step2[45] = step1[45]; + step2[46] = step1[46]; + step2[47] = step1[47]; + step2[48] = step1[48]; + step2[49] = step1[49]; + step2[50] = step1[50]; + step2[51] = step1[51]; + step2[60] = step1[60]; + step2[61] = step1[61]; + step2[62] = step1[62]; + step2[63] = step1[63]; + + // stage 9 + idct64_stage9_neon(step2, step1, cos_bit); + + // stage 10 + idct64_stage10_neon(step1, step2, cos_bit); + + // stage 11 + + out[0] = vqaddq_s16(step2[0], step2[63]); + out[1] = vqaddq_s16(step2[1], step2[62]); + out[2] = vqaddq_s16(step2[2], step2[61]); + out[3] = vqaddq_s16(step2[3], step2[60]); + out[4] = vqaddq_s16(step2[4], step2[59]); + out[5] = vqaddq_s16(step2[5], step2[58]); + out[6] = vqaddq_s16(step2[6], step2[57]); + out[7] = vqaddq_s16(step2[7], step2[56]); + out[8] = vqaddq_s16(step2[8], step2[55]); + out[9] = vqaddq_s16(step2[9], step2[54]); + out[10] = vqaddq_s16(step2[10], step2[53]); + out[11] = vqaddq_s16(step2[11], step2[52]); + out[12] = vqaddq_s16(step2[12], step2[51]); + out[13] = vqaddq_s16(step2[13], step2[50]); + out[14] = vqaddq_s16(step2[14], step2[49]); + out[15] = vqaddq_s16(step2[15], step2[48]); + out[16] = vqaddq_s16(step2[16], step2[47]); + out[17] = vqaddq_s16(step2[17], step2[46]); + out[18] = vqaddq_s16(step2[18], step2[45]); + out[19] = vqaddq_s16(step2[19], step2[44]); + out[20] = vqaddq_s16(step2[20], step2[43]); + out[21] = vqaddq_s16(step2[21], step2[42]); + out[22] = vqaddq_s16(step2[22], step2[41]); + out[23] = vqaddq_s16(step2[23], step2[40]); + out[24] = vqaddq_s16(step2[24], step2[39]); + out[25] = vqaddq_s16(step2[25], step2[38]); + out[26] = vqaddq_s16(step2[26], step2[37]); + out[27] = vqaddq_s16(step2[27], step2[36]); + out[28] = vqaddq_s16(step2[28], step2[35]); + out[29] = vqaddq_s16(step2[29], step2[34]); + out[30] = vqaddq_s16(step2[30], step2[33]); + out[31] = vqaddq_s16(step2[31], step2[32]); + out[32] = vqsubq_s16(step2[31], step2[32]); + out[33] = vqsubq_s16(step2[30], step2[33]); + out[34] = vqsubq_s16(step2[29], step2[34]); + out[35] = vqsubq_s16(step2[28], step2[35]); + out[36] = vqsubq_s16(step2[27], step2[36]); + out[37] = vqsubq_s16(step2[26], step2[37]); + out[38] = vqsubq_s16(step2[25], step2[38]); + out[39] = vqsubq_s16(step2[24], step2[39]); + out[40] = vqsubq_s16(step2[23], step2[40]); + out[41] = vqsubq_s16(step2[22], step2[41]); + out[42] = vqsubq_s16(step2[21], step2[42]); + out[43] = vqsubq_s16(step2[20], step2[43]); + out[44] = vqsubq_s16(step2[19], step2[44]); + out[45] = vqsubq_s16(step2[18], step2[45]); + out[46] = vqsubq_s16(step2[17], step2[46]); + out[47] = vqsubq_s16(step2[16], step2[47]); + out[48] = vqsubq_s16(step2[15], step2[48]); + out[49] = vqsubq_s16(step2[14], step2[49]); + out[50] = vqsubq_s16(step2[13], step2[50]); + out[51] = vqsubq_s16(step2[12], step2[51]); + out[52] = vqsubq_s16(step2[11], step2[52]); + out[53] = vqsubq_s16(step2[10], step2[53]); + out[54] = vqsubq_s16(step2[9], step2[54]); + out[55] = vqsubq_s16(step2[8], step2[55]); + out[56] = vqsubq_s16(step2[7], step2[56]); + out[57] = vqsubq_s16(step2[6], step2[57]); + out[58] = vqsubq_s16(step2[5], step2[58]); + out[59] = vqsubq_s16(step2[4], step2[59]); + out[60] = vqsubq_s16(step2[3], step2[60]); + out[61] = vqsubq_s16(step2[2], step2[61]); + out[62] = vqsubq_s16(step2[1], step2[62]); + out[63] = vqsubq_s16(step2[0], step2[63]); +} + +// Functions for blocks with eob at DC and within +// topleft 8x8, 16x16, 32x32 corner +static const transform_neon + lowbd_txfm_all_1d_zeros_w_arr[TX_SIZES][ITX_TYPES_1D][4] = { + { + { NULL, NULL, NULL, NULL }, + { NULL, NULL, NULL, NULL }, + { NULL, NULL, NULL, NULL }, + }, + { { idct8_low1_neon, idct8_neon, NULL, NULL }, + { iadst8_low1_neon, iadst8_neon, NULL, NULL }, + { NULL, NULL, NULL, NULL } }, + { + { idct16_low1_neon, idct16_low8_neon, idct16_neon, NULL }, + { iadst16_low1_neon, iadst16_low8_neon, iadst16_neon, NULL }, + { NULL, NULL, NULL, NULL }, + }, + { { idct32_low1_neon, idct32_low8_neon, idct32_low16_neon, idct32_neon }, + { NULL, NULL, NULL, NULL }, + { NULL, NULL, NULL, NULL } }, + { { idct64_low1_neon, idct64_low8_neon, idct64_low16_neon, + idct64_low32_neon }, + { NULL, NULL, NULL, NULL }, + { NULL, NULL, NULL, NULL } } + }; + +static INLINE void lowbd_inv_txfm2d_add_idtx_neon(const int32_t *input, + uint8_t *output, int stride, + TX_TYPE tx_type, + TX_SIZE tx_size, int eob) { + (void)tx_type; + int16x8_t a[32 * 4]; + int16x8_t b[32 * 4]; + int eobx, eoby; + get_eobx_eoby_scan_default(&eobx, &eoby, tx_size, eob); + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + lowbd_inv_txfm2d_memset_neon(&a[0], (txfm_size_col * (txfm_size_row) >> 3), + 0); + lowbd_inv_txfm2d_memset_neon(&b[0], (txfm_size_col * (txfm_size_row) >> 3), + 0); + const int buf_size_w_div8 = txfm_size_col >> 3; + const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); + const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3; + const int buf_size_nonzero_w = (eobx + 8) >> 3 << 3; + const int input_stride = txfm_size_row; + int temp_b = 0; + + for (int i = 0; i < buf_size_nonzero_h_div8; i++) { + int16x8_t *cur_a = &a[i * txfm_size_col]; + load_buffer_32bit_to_16bit_neon(input, input_stride, cur_a, + buf_size_nonzero_w); + input += 8; + if (abs(rect_type) == 1) { + round_shift_for_rect(cur_a, cur_a, buf_size_nonzero_w); + } + identity_txfm_round_neon(cur_a, cur_a, txw_idx, buf_size_nonzero_w, + -shift[0]); + for (int j = 0; j < buf_size_w_div8; ++j) { + transpose_arrays_s16_8x8(&cur_a[j * 8], &b[temp_b + txfm_size_row * j]); + } + temp_b += 8; + } + for (int j = 0; j < buf_size_w_div8; ++j) { + identity_txfm_round_neon(&b[j * txfm_size_row], &b[j * txfm_size_row], + txh_idx, txfm_size_row, -shift[1]); + } + if (txfm_size_col >= 16) { + for (int i = 0; i < (txfm_size_col >> 4); i++) { + lowbd_add_flip_buffer_16xn_neon( + &b[i * txfm_size_row * 2], output + 16 * i, stride, 0, txfm_size_row); + } + } else if (txfm_size_col == 8) { + lowbd_add_flip_buffer_8xn_neon(b, output, stride, 0, txfm_size_row); + } +} + +static INLINE void lowbd_inv_txfm2d_add_v_identity_neon( + const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type, + TX_SIZE tx_size, int eob) { + int16x8_t a[16 * 2]; + int16x8_t b[16 * 2]; + int eobx, eoby, ud_flip, lr_flip; + get_eobx_eoby_scan_v_identity(&eobx, &eoby, tx_size, eob); + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + lowbd_inv_txfm2d_memset_neon(&b[0], (txfm_size_col * (txfm_size_row) >> 3), + 0); + const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); + const int buf_size_w_div8 = txfm_size_col >> 3; + const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3; + const int buf_size_nonzero_w = (eobx + 8) >> 3 << 3; + const int input_stride = txfm_size_row; + const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx]; + int temp_b = 0; + const transform_neon row_txfm = + lowbd_txfm_all_1d_zeros_w_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x]; + + assert(row_txfm != NULL); + + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < buf_size_nonzero_h_div8; i++) { + int16x8_t *cur_a = &a[i * txfm_size_col]; + load_buffer_32bit_to_16bit_neon(input, input_stride, cur_a, + buf_size_nonzero_w); + input += 8; + if (abs(rect_type) == 1) { + round_shift_for_rect(cur_a, cur_a, buf_size_nonzero_w); + } + row_txfm(cur_a, cur_a, INV_COS_BIT); + av1_round_shift_array_16_neon(cur_a, txfm_size_col, -shift[0]); + if (lr_flip == 1) { + for (int j = 0; j < buf_size_w_div8; ++j) { + flip_buf_ud_neon(&cur_a[j * 8], 8); + transpose_arrays_s16_8x8( + &cur_a[j * 8], + &b[temp_b + txfm_size_row * (buf_size_w_div8 - 1 - j)]); + } + temp_b += 8; + } else { + for (int j = 0; j < buf_size_w_div8; ++j) { + transpose_arrays_s16_8x8(&cur_a[j * 8], &b[temp_b + txfm_size_row * j]); + } + temp_b += 8; + } + } + for (int j = 0; j < buf_size_w_div8; ++j) { + identity_txfm_round_neon(&b[j * txfm_size_row], &b[j * txfm_size_row], + txh_idx, txfm_size_row, -shift[1]); + } + if (txfm_size_col >= 16) { + for (int i = 0; i < (txfm_size_col >> 4); i++) { + lowbd_add_flip_buffer_16xn_neon( + &b[i * txfm_size_row * 2], output + 16 * i, stride, 0, txfm_size_row); + } + } else if (txfm_size_col == 8) { + lowbd_add_flip_buffer_8xn_neon(b, output, stride, 0, txfm_size_row); + } +} + +static INLINE void lowbd_inv_txfm2d_add_h_identity_neon( + const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type, + TX_SIZE tx_size, int eob) { + int16x8_t a[16 * 2]; + int16x8_t b[16 * 2]; + int eobx, eoby, ud_flip, lr_flip; + get_eobx_eoby_scan_h_identity(&eobx, &eoby, tx_size, eob); + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + lowbd_inv_txfm2d_memset_neon(&a[0], (txfm_size_col * (txfm_size_row) >> 3), + 0); + const int buf_size_w_div8 = txfm_size_col >> 3; + const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); + const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3; + const int buf_size_nonzero_w = (eobx + 8) >> 3 << 3; + const int input_stride = txfm_size_row; + const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby]; + int temp_b = 0; + const transform_neon col_txfm = + lowbd_txfm_all_1d_zeros_w_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y]; + + assert(col_txfm != NULL); + + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < buf_size_nonzero_h_div8; i++) { + int16x8_t *cur_a = &a[i * txfm_size_col]; + load_buffer_32bit_to_16bit_neon(input, input_stride, cur_a, + buf_size_nonzero_w); + input += 8; + if (abs(rect_type) == 1) { + round_shift_for_rect(cur_a, cur_a, buf_size_nonzero_w); + } + identity_txfm_round_neon(cur_a, cur_a, txw_idx, buf_size_nonzero_w, + -shift[0]); + for (int j = 0; j < buf_size_w_div8; ++j) { + transpose_arrays_s16_8x8(&cur_a[j * 8], &b[temp_b + txfm_size_row * j]); + } + temp_b += 8; + } + for (int j = 0; j < buf_size_w_div8; ++j) { + col_txfm(&b[j * txfm_size_row], &b[j * txfm_size_row], INV_COS_BIT); + av1_round_shift_array_16_neon(&b[j * txfm_size_row], txfm_size_row, + -shift[1]); + } + if (txfm_size_col >= 16) { + for (int i = 0; i < (txfm_size_col >> 4); i++) { + lowbd_add_flip_buffer_16xn_neon(&b[i * txfm_size_row * 2], + output + 16 * i, stride, ud_flip, + txfm_size_row); + } + } else if (txfm_size_col == 8) { + lowbd_add_flip_buffer_8xn_neon(b, output, stride, ud_flip, txfm_size_row); + } +} + +static INLINE void lowbd_inv_txfm2d_add_4x4_neon(const int32_t *input, + uint8_t *output, int stride, + TX_TYPE tx_type, int eob) { + (void)eob; + TX_SIZE tx_size = TX_4X4; + DECLARE_ALIGNED(32, int, txfm_buf[4 * 4 + 8 + 8]); + int32_t *temp_in = txfm_buf; + + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col); + int32_t *temp_out = temp_in + buf_offset; + int32_t *buf = temp_out + buf_offset; + int32_t *buf_ptr = buf; + const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16, 16, 16, 16, 16, 16, 16 }; + int r; + const transform_1d_neon row_txfm = + lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]]; + const transform_1d_neon col_txfm = + lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]]; + + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < txfm_size_row; i++) { + for (int c = 0; c < txfm_size_col; ++c) + temp_in[c] = input[c * txfm_size_row]; + row_txfm(temp_in, buf_ptr, INV_COS_BIT, stage_range); + + input++; + buf_ptr += txfm_size_col; + } + + for (int c = 0; c < txfm_size_col; ++c) { + if (lr_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + c]; + } else { + // flip left right + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)]; + } + clamp_buf(temp_in, txfm_size_row, 16); + col_txfm(temp_in, temp_out, INV_COS_BIT, stage_range); + av1_round_shift_array(temp_out, txfm_size_row, -shift[1]); + + if (ud_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = + clip_pixel(output[r * stride + c] + temp_out[r]); + } + } else { + // flip upside down + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = clip_pixel(output[r * stride + c] + + temp_out[txfm_size_row - r - 1]); + } + } + } +} + +void lowbd_inv_txfm2d_add_4x8_neon(const int32_t *input, uint8_t *output, + int stride, TX_TYPE tx_type, int eob) { + (void)eob; + TX_SIZE tx_size = TX_4X8; + DECLARE_ALIGNED(32, int, txfm_buf[4 * 8 + 8 + 8]); + int32_t *temp_in = txfm_buf; + + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col); + int32_t *temp_out = temp_in + buf_offset; + int32_t *buf = temp_out + buf_offset; + int32_t *buf_ptr = buf; + const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16, 16, 16, 16, + 16, 16, 16, 16 }; + int r; + const transform_1d_neon row_txfm = + lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]]; + const transform_1d_neon col_txfm = + lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]]; + + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < txfm_size_row; i++) { + for (int c = 0; c < txfm_size_col; c++) + temp_in[c] = round_shift((int64_t)input[c * txfm_size_row] * NewInvSqrt2, + NewSqrt2Bits); + + row_txfm(temp_in, buf_ptr, INV_COS_BIT, stage_range); + input++; + buf_ptr += txfm_size_col; + } + + for (int c = 0; c < txfm_size_col; ++c) { + if (lr_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + c]; + } else { + // flip left right + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)]; + } + clamp_buf(temp_in, txfm_size_row, 16); + col_txfm(temp_in, temp_out, INV_COS_BIT, stage_range); + av1_round_shift_array(temp_out, txfm_size_row, -shift[1]); + + if (ud_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = + clip_pixel(output[r * stride + c] + temp_out[r]); + } + } else { + // flip upside down + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = clip_pixel(output[r * stride + c] + + temp_out[txfm_size_row - r - 1]); + } + } + } +} + +void lowbd_inv_txfm2d_add_8x4_neon(const int32_t *input, uint8_t *output, + int stride, TX_TYPE tx_type, int eob) { + (void)eob; + TX_SIZE tx_size = TX_8X4; + DECLARE_ALIGNED(32, int, txfm_buf[8 * 4 + 8 + 8]); + int32_t *temp_in = txfm_buf; + + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col); + int32_t *temp_out = temp_in + buf_offset; + int32_t *buf = temp_out + buf_offset; + int32_t *buf_ptr = buf; + const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16, 16, 16, 16, + 16, 16, 16, 16 }; + int r; + const transform_1d_neon row_txfm = + lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]]; + const transform_1d_neon col_txfm = + lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]]; + + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < txfm_size_row; i++) { + for (int c = 0; c < txfm_size_col; c++) + temp_in[c] = round_shift((int64_t)input[c * txfm_size_row] * NewInvSqrt2, + NewSqrt2Bits); + + row_txfm(temp_in, buf_ptr, INV_COS_BIT, stage_range); + input++; + buf_ptr += txfm_size_col; + } + + for (int c = 0; c < txfm_size_col; ++c) { + if (lr_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + c]; + } else { + // flip left right + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)]; + } + clamp_buf(temp_in, txfm_size_row, 16); + col_txfm(temp_in, temp_out, INV_COS_BIT, stage_range); + av1_round_shift_array(temp_out, txfm_size_row, -shift[1]); + + if (ud_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = + clip_pixel(output[r * stride + c] + temp_out[r]); + } + } else { + // flip upside down + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = clip_pixel(output[r * stride + c] + + temp_out[txfm_size_row - r - 1]); + } + } + } +} + +void lowbd_inv_txfm2d_add_4x16_neon(const int32_t *input, uint8_t *output, + int stride, TX_TYPE tx_type, int eob) { + (void)eob; + TX_SIZE tx_size = TX_4X16; + DECLARE_ALIGNED(32, int, txfm_buf[4 * 16 + 16 + 16]); + int32_t *temp_in = txfm_buf; + + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col); + int32_t *temp_out = temp_in + buf_offset; + int32_t *buf = temp_out + buf_offset; + int32_t *buf_ptr = buf; + const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16 }; + int r; + const transform_1d_neon row_txfm = + lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]]; + const transform_1d_neon col_txfm = + lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]]; + + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < txfm_size_row; i++) { + for (int c = 0; c < txfm_size_col; c++) + temp_in[c] = input[c * txfm_size_row]; + row_txfm(temp_in, buf_ptr, INV_COS_BIT, stage_range); + av1_round_shift_array(buf_ptr, txfm_size_col, -shift[0]); + input++; + buf_ptr += txfm_size_col; + } + + for (int c = 0; c < txfm_size_col; ++c) { + if (lr_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + c]; + } else { + // flip left right + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)]; + } + clamp_buf(temp_in, txfm_size_row, 16); + col_txfm(temp_in, temp_out, INV_COS_BIT, stage_range); + av1_round_shift_array(temp_out, txfm_size_row, -shift[1]); + + if (ud_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = + clip_pixel(output[r * stride + c] + temp_out[r]); + } + } else { + // flip upside down + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = clip_pixel(output[r * stride + c] + + temp_out[txfm_size_row - r - 1]); + } + } + } +} + +void lowbd_inv_txfm2d_add_16x4_neon(const int32_t *input, uint8_t *output, + int stride, TX_TYPE tx_type, int eob) { + (void)eob; + TX_SIZE tx_size = TX_16X4; + DECLARE_ALIGNED(32, int, txfm_buf[16 * 4 + 16 + 16]); + int32_t *temp_in = txfm_buf; + + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_offset = AOMMAX(txfm_size_row, txfm_size_col); + int32_t *temp_out = temp_in + buf_offset; + int32_t *buf = temp_out + buf_offset; + int32_t *buf_ptr = buf; + const int8_t stage_range[MAX_TXFM_STAGE_NUM] = { 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16 }; + int r; + const transform_1d_neon row_txfm = + lowbd_txfm_all_1d_arr[txw_idx][hitx_1d_tab[tx_type]]; + const transform_1d_neon col_txfm = + lowbd_txfm_all_1d_arr[txh_idx][vitx_1d_tab[tx_type]]; + + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < txfm_size_row; i++) { + for (int c = 0; c < txfm_size_col; c++) + temp_in[c] = input[c * txfm_size_row]; + row_txfm(temp_in, buf_ptr, INV_COS_BIT, stage_range); + av1_round_shift_array(buf_ptr, txfm_size_col, -shift[0]); + input++; + buf_ptr += txfm_size_col; + } + + for (int c = 0; c < txfm_size_col; ++c) { + if (lr_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + c]; + } else { + // flip left right + for (r = 0; r < txfm_size_row; ++r) + temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)]; + } + clamp_buf(temp_in, txfm_size_row, 16); + col_txfm(temp_in, temp_out, INV_COS_BIT, stage_range); + av1_round_shift_array(temp_out, txfm_size_row, -shift[1]); + + if (ud_flip == 0) { + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = + clip_pixel(output[r * stride + c] + temp_out[r]); + } + } else { + // flip upside down + for (r = 0; r < txfm_size_row; ++r) { + output[r * stride + c] = clip_pixel(output[r * stride + c] + + temp_out[txfm_size_row - r - 1]); + } + } + } +} + +static INLINE void lowbd_inv_txfm2d_add_no_identity_neon( + const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type, + TX_SIZE tx_size, int eob) { + int16x8_t a[64 * 8]; + int16x8_t b[64 * 8]; + int eobx, eoby, ud_flip, lr_flip; + get_eobx_eoby_scan_default(&eobx, &eoby, tx_size, eob); + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); + const int buf_size_w_div8 = txfm_size_col >> 3; + const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3; + const int buf_size_nonzero_w = (eobx + 8) >> 3 << 3; + const int input_stride = AOMMIN(32, txfm_size_row); + const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx]; + const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby]; + int temp_b = 0; + + const transform_neon row_txfm = + lowbd_txfm_all_1d_zeros_w_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x]; + const transform_neon col_txfm = + lowbd_txfm_all_1d_zeros_w_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y]; + + assert(col_txfm != NULL); + assert(row_txfm != NULL); + + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < buf_size_nonzero_h_div8; i++) { + int16x8_t *cur_a = &a[i * txfm_size_col]; + load_buffer_32bit_to_16bit_neon(input, input_stride, cur_a, + buf_size_nonzero_w); + input += 8; + if (abs(rect_type) == 1) { + round_shift_for_rect(cur_a, cur_a, buf_size_nonzero_w); + } + row_txfm(cur_a, cur_a, INV_COS_BIT); + av1_round_shift_array_16_neon(cur_a, txfm_size_col, -shift[0]); + if (lr_flip == 1) { + for (int j = 0; j < buf_size_w_div8; ++j) { + flip_buf_ud_neon(&cur_a[j * 8], 8); + transpose_arrays_s16_8x8( + &cur_a[j * 8], + &b[temp_b + txfm_size_row * (buf_size_w_div8 - 1 - j)]); + } + temp_b += 8; + } else { + for (int j = 0; j < buf_size_w_div8; ++j) { + transpose_arrays_s16_8x8(&cur_a[j * 8], &b[temp_b + txfm_size_row * j]); + } + temp_b += 8; + } + } + for (int j = 0; j < buf_size_w_div8; ++j) { + col_txfm(&b[j * txfm_size_row], &b[j * txfm_size_row], INV_COS_BIT); + av1_round_shift_array_16_neon(&b[j * txfm_size_row], txfm_size_row, + -shift[1]); + } + + if (txfm_size_col >= 16) { + for (int i = 0; i < (txfm_size_col >> 4); i++) { + lowbd_add_flip_buffer_16xn_neon(&b[i * txfm_size_row * 2], + output + 16 * i, stride, ud_flip, + txfm_size_row); + } + } else if (txfm_size_col == 8) { + lowbd_add_flip_buffer_8xn_neon(b, output, stride, ud_flip, txfm_size_row); + } +} + +static INLINE void lowbd_inv_txfm2d_add_universe_neon( + const int32_t *input, uint8_t *output, int stride, TX_TYPE tx_type, + TX_SIZE tx_size, int eob) { + switch (tx_type) { + case IDTX: + lowbd_inv_txfm2d_add_idtx_neon(input, output, stride, tx_type, tx_size, + eob); + break; + + case H_DCT: + case H_ADST: + case H_FLIPADST: + lowbd_inv_txfm2d_add_v_identity_neon(input, output, stride, tx_type, + tx_size, eob); + break; + + case V_DCT: + case V_ADST: + case V_FLIPADST: + lowbd_inv_txfm2d_add_h_identity_neon(input, output, stride, tx_type, + tx_size, eob); + break; + + default: + lowbd_inv_txfm2d_add_no_identity_neon(input, output, stride, tx_type, + tx_size, eob); + break; + } +} + +void av1_lowbd_inv_txfm2d_add_neon(const int32_t *input, uint8_t *output, + int stride, TX_TYPE tx_type, TX_SIZE tx_size, + int eob) { + switch (tx_size) { + case TX_4X4: + lowbd_inv_txfm2d_add_4x4_neon(input, output, stride, tx_type, eob); + break; + + case TX_4X8: + lowbd_inv_txfm2d_add_4x8_neon(input, output, stride, tx_type, eob); + break; + + case TX_8X4: + lowbd_inv_txfm2d_add_8x4_neon(input, output, stride, tx_type, eob); + break; + + case TX_4X16: + lowbd_inv_txfm2d_add_4x16_neon(input, output, stride, tx_type, eob); + break; + + case TX_16X4: + lowbd_inv_txfm2d_add_16x4_neon(input, output, stride, tx_type, eob); + break; + + default: + lowbd_inv_txfm2d_add_universe_neon(input, output, stride, tx_type, + tx_size, eob); + break; + } +} +void av1_inv_txfm_add_neon(const tran_low_t *dqcoeff, uint8_t *dst, int stride, + const TxfmParam *txfm_param) { + const TX_TYPE tx_type = txfm_param->tx_type; + if (!txfm_param->lossless) { + av1_lowbd_inv_txfm2d_add_neon(dqcoeff, dst, stride, tx_type, + txfm_param->tx_size, txfm_param->eob); + } else { + av1_inv_txfm_add_c(dqcoeff, dst, stride, txfm_param); + } +} diff --git a/third_party/aom/av1/common/arm/av1_inv_txfm_neon.h b/third_party/aom/av1/common/arm/av1_inv_txfm_neon.h new file mode 100644 index 0000000000..97099c2042 --- /dev/null +++ b/third_party/aom/av1/common/arm/av1_inv_txfm_neon.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ +#ifndef AOM_AV1_COMMON_ARM_AV1_INV_TXFM_NEON_H_ +#define AOM_AV1_COMMON_ARM_AV1_INV_TXFM_NEON_H_ + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom/aom_integer.h" +#include "av1/common/enums.h" +#include "av1/common/av1_inv_txfm1d.h" +#include "av1/common/av1_inv_txfm1d_cfg.h" +#include "av1/common/av1_txfm.h" + +typedef void (*transform_1d_neon)(const int32_t *input, int32_t *output, + const int8_t cos_bit, + const int8_t *stage_ptr); +typedef void (*transform_neon)(int16x8_t *input, int16x8_t *output, + int8_t cos_bit); + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_8x8_default[8]) = { + 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, +}; + +DECLARE_ALIGNED(16, static const int16_t, + av1_eob_to_eobxy_16x16_default[16]) = { + 0x0707, 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, + 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, +}; + +DECLARE_ALIGNED(16, static const int16_t, + av1_eob_to_eobxy_32x32_default[32]) = { + 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, + 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, + 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, + 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, +}; + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_8x16_default[16]) = { + 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0f07, 0x0f07, 0x0f07, + 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, +}; + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_16x8_default[8]) = { + 0x0707, 0x0707, 0x070f, 0x070f, 0x070f, 0x070f, 0x070f, 0x070f, +}; + +DECLARE_ALIGNED(16, static const int16_t, + av1_eob_to_eobxy_16x32_default[32]) = { + 0x0707, 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, + 0x0f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, + 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, + 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, +}; + +DECLARE_ALIGNED(16, static const int16_t, + av1_eob_to_eobxy_32x16_default[16]) = { + 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, + 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, +}; + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_8x32_default[32]) = { + 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0f07, 0x0f07, 0x0f07, + 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x1f07, 0x1f07, 0x1f07, + 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, + 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, +}; + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_32x8_default[8]) = { + 0x0707, 0x070f, 0x070f, 0x071f, 0x071f, 0x071f, 0x071f, 0x071f, +}; + +DECLARE_ALIGNED(16, static const int16_t *, + av1_eob_to_eobxy_default[TX_SIZES_ALL]) = { + NULL, + av1_eob_to_eobxy_8x8_default, + av1_eob_to_eobxy_16x16_default, + av1_eob_to_eobxy_32x32_default, + av1_eob_to_eobxy_32x32_default, + NULL, + NULL, + av1_eob_to_eobxy_8x16_default, + av1_eob_to_eobxy_16x8_default, + av1_eob_to_eobxy_16x32_default, + av1_eob_to_eobxy_32x16_default, + av1_eob_to_eobxy_32x32_default, + av1_eob_to_eobxy_32x32_default, + NULL, + NULL, + av1_eob_to_eobxy_8x32_default, + av1_eob_to_eobxy_32x8_default, + av1_eob_to_eobxy_16x32_default, + av1_eob_to_eobxy_32x16_default, +}; + +static const int lowbd_txfm_all_1d_zeros_idx[32] = { + 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, +}; + +// Transform block width in log2 for eob (size of 64 map to 32) +static const int tx_size_wide_log2_eob[TX_SIZES_ALL] = { + 2, 3, 4, 5, 5, 2, 3, 3, 4, 4, 5, 5, 5, 2, 4, 3, 5, 4, 5, +}; + +static int eob_fill[32] = { + 0, 7, 7, 7, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, +}; + +static INLINE void get_eobx_eoby_scan_default(int *eobx, int *eoby, + TX_SIZE tx_size, int eob) { + if (eob == 1) { + *eobx = 0; + *eoby = 0; + return; + } + + const int tx_w_log2 = tx_size_wide_log2_eob[tx_size]; + const int eob_row = (eob - 1) >> tx_w_log2; + const int eobxy = av1_eob_to_eobxy_default[tx_size][eob_row]; + *eobx = eobxy & 0xFF; + *eoby = eobxy >> 8; +} + +static INLINE void get_eobx_eoby_scan_v_identity(int *eobx, int *eoby, + TX_SIZE tx_size, int eob) { + eob -= 1; + const int txfm_size_row = tx_size_high[tx_size]; + const int eoby_max = AOMMIN(32, txfm_size_row) - 1; + *eobx = eob / (eoby_max + 1); + *eoby = (eob >= eoby_max) ? eoby_max : eob_fill[eob]; +} + +static INLINE void get_eobx_eoby_scan_h_identity(int *eobx, int *eoby, + TX_SIZE tx_size, int eob) { + eob -= 1; + const int txfm_size_col = tx_size_wide[tx_size]; + const int eobx_max = AOMMIN(32, txfm_size_col) - 1; + *eobx = (eob >= eobx_max) ? eobx_max : eob_fill[eob]; + const int temp_eoby = eob / (eobx_max + 1); + assert(temp_eoby < 32); + *eoby = eob_fill[temp_eoby]; +} + +#endif // AOM_AV1_COMMON_ARM_AV1_INV_TXFM_NEON_H_ diff --git a/third_party/aom/av1/common/arm/av1_txfm_neon.c b/third_party/aom/av1/common/arm/av1_txfm_neon.c new file mode 100644 index 0000000000..f955a379f7 --- /dev/null +++ b/third_party/aom/av1/common/arm/av1_txfm_neon.c @@ -0,0 +1,30 @@ +/* + * + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ +#include <arm_neon.h> +#include <assert.h> + +#include "config/av1_rtcd.h" + +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" + +void av1_round_shift_array_neon(int32_t *arr, int size, int bit) { + assert(!(size % 4)); + if (!bit) return; + const int32x4_t dup_bits_n_32x4 = vdupq_n_s32((int32_t)(-bit)); + for (int i = 0; i < size; i += 4) { + int32x4_t tmp_q_s32 = vld1q_s32(arr); + tmp_q_s32 = vrshlq_s32(tmp_q_s32, dup_bits_n_32x4); + vst1q_s32(arr, tmp_q_s32); + arr += 4; + } +} diff --git a/third_party/aom/av1/common/arm/blend_a64_hmask_neon.c b/third_party/aom/av1/common/arm/blend_a64_hmask_neon.c new file mode 100644 index 0000000000..7afb1a909d --- /dev/null +++ b/third_party/aom/av1/common/arm/blend_a64_hmask_neon.c @@ -0,0 +1,102 @@ +/* + * + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "config/aom_dsp_rtcd.h" + +#include "aom/aom_integer.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/blend_neon.h" +#include "aom_dsp/arm/mem_neon.h" + +void aom_blend_a64_hmask_neon(uint8_t *dst, uint32_t dst_stride, + const uint8_t *src0, uint32_t src0_stride, + const uint8_t *src1, uint32_t src1_stride, + const uint8_t *mask, int w, int h) { + assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); + assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); + + assert(h >= 2); + assert(w >= 2); + assert(IS_POWER_OF_TWO(h)); + assert(IS_POWER_OF_TWO(w)); + + if (w > 8) { + do { + int i = 0; + do { + uint8x16_t m0 = vld1q_u8(mask + i); + uint8x16_t s0 = vld1q_u8(src0 + i); + uint8x16_t s1 = vld1q_u8(src1 + i); + + uint8x16_t blend = alpha_blend_a64_u8x16(m0, s0, s1); + + vst1q_u8(dst + i, blend); + + i += 16; + } while (i < w); + + src0 += src0_stride; + src1 += src1_stride; + dst += dst_stride; + } while (--h != 0); + } else if (w == 8) { + const uint8x8_t m0 = vld1_u8(mask); + do { + uint8x8_t s0 = vld1_u8(src0); + uint8x8_t s1 = vld1_u8(src1); + + uint8x8_t blend = alpha_blend_a64_u8x8(m0, s0, s1); + + vst1_u8(dst, blend); + + src0 += src0_stride; + src1 += src1_stride; + dst += dst_stride; + } while (--h != 0); + } else if (w == 4) { + const uint8x8_t m0 = load_unaligned_dup_u8_4x2(mask); + do { + uint8x8_t s0 = load_unaligned_u8_4x2(src0, src0_stride); + uint8x8_t s1 = load_unaligned_u8_4x2(src1, src1_stride); + + uint8x8_t blend = alpha_blend_a64_u8x8(m0, s0, s1); + + store_u8x4_strided_x2(dst, dst_stride, blend); + + src0 += 2 * src0_stride; + src1 += 2 * src1_stride; + dst += 2 * dst_stride; + h -= 2; + } while (h != 0); + } else if (w == 2 && h >= 16) { + const uint8x8_t m0 = vreinterpret_u8_u16(vld1_dup_u16((uint16_t *)mask)); + do { + uint8x8_t s0 = load_unaligned_u8_2x2(src0, src0_stride); + uint8x8_t s1 = load_unaligned_u8_2x2(src1, src1_stride); + + uint8x8_t blend = alpha_blend_a64_u8x8(m0, s0, s1); + + store_u8x2_strided_x2(dst, dst_stride, blend); + + src0 += 2 * src0_stride; + src1 += 2 * src1_stride; + dst += 2 * dst_stride; + h -= 2; + } while (h != 0); + } else { + aom_blend_a64_hmask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride, + mask, w, h); + } +} diff --git a/third_party/aom/av1/common/arm/blend_a64_vmask_neon.c b/third_party/aom/av1/common/arm/blend_a64_vmask_neon.c new file mode 100644 index 0000000000..9aea29992a --- /dev/null +++ b/third_party/aom/av1/common/arm/blend_a64_vmask_neon.c @@ -0,0 +1,112 @@ +/* + * + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "aom/aom_integer.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/blend.h" +#include "aom_dsp/arm/blend_neon.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" +#include "config/aom_dsp_rtcd.h" + +void aom_blend_a64_vmask_neon(uint8_t *dst, uint32_t dst_stride, + const uint8_t *src0, uint32_t src0_stride, + const uint8_t *src1, uint32_t src1_stride, + const uint8_t *mask, int w, int h) { + assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); + assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); + + assert(h >= 2); + assert(w >= 2); + assert(IS_POWER_OF_TWO(h)); + assert(IS_POWER_OF_TWO(w)); + + if (w > 8) { + do { + uint8x16_t m0 = vdupq_n_u8(mask[0]); + int i = 0; + do { + uint8x16_t s0 = vld1q_u8(src0 + i); + uint8x16_t s1 = vld1q_u8(src1 + i); + + uint8x16_t blend = alpha_blend_a64_u8x16(m0, s0, s1); + + vst1q_u8(dst + i, blend); + + i += 16; + } while (i < w); + + mask += 1; + src0 += src0_stride; + src1 += src1_stride; + dst += dst_stride; + } while (--h != 0); + } else if (w == 8) { + do { + uint8x8_t m0 = vdup_n_u8(mask[0]); + uint8x8_t s0 = vld1_u8(src0); + uint8x8_t s1 = vld1_u8(src1); + + uint8x8_t blend = alpha_blend_a64_u8x8(m0, s0, s1); + + vst1_u8(dst, blend); + + mask += 1; + src0 += src0_stride; + src1 += src1_stride; + dst += dst_stride; + } while (--h != 0); + } else if (w == 4) { + do { + const uint16x4_t m0 = vdup_n_u16((uint16_t)mask[0]); + const uint16x4_t m1 = vdup_n_u16((uint16_t)mask[1]); + const uint8x8_t m = vmovn_u16(vcombine_u16(m0, m1)); + uint8x8_t s0 = load_unaligned_u8_4x2(src0, src0_stride); + uint8x8_t s1 = load_unaligned_u8_4x2(src1, src1_stride); + + uint8x8_t blend = alpha_blend_a64_u8x8(m, s0, s1); + + store_u8x4_strided_x2(dst, dst_stride, blend); + + mask += 2; + src0 += 2 * src0_stride; + src1 += 2 * src1_stride; + dst += 2 * dst_stride; + h -= 2; + } while (h != 0); + } else if (w == 2 && h >= 16) { + do { + uint16x4_t m0 = vdup_n_u16(0); + m0 = vld1_lane_u16((uint16_t *)mask, m0, 0); + uint8x8_t m = + vzip_u8(vreinterpret_u8_u16(m0), vreinterpret_u8_u16(m0)).val[0]; + uint8x8_t s0 = load_unaligned_u8_2x2(src0, src0_stride); + uint8x8_t s1 = load_unaligned_u8_2x2(src1, src1_stride); + + uint8x8_t blend = alpha_blend_a64_u8x8(m, s0, s1); + + store_u8x2_strided_x2(dst, dst_stride, blend); + + mask += 2; + src0 += 2 * src0_stride; + src1 += 2 * src1_stride; + dst += 2 * dst_stride; + h -= 2; + } while (h != 0); + } else { + aom_blend_a64_vmask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride, + mask, w, h); + } +} diff --git a/third_party/aom/av1/common/arm/cdef_block_neon.c b/third_party/aom/av1/common/arm/cdef_block_neon.c new file mode 100644 index 0000000000..53d3a9f1e0 --- /dev/null +++ b/third_party/aom/av1/common/arm/cdef_block_neon.c @@ -0,0 +1,1355 @@ +/* + * Copyright (c) 2016, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/sum_neon.h" +#include "av1/common/cdef_block.h" + +void cdef_copy_rect8_8bit_to_16bit_neon(uint16_t *dst, int dstride, + const uint8_t *src, int sstride, + int width, int height) { + do { + const uint8_t *src_ptr = src; + uint16_t *dst_ptr = dst; + + int w = 0; + while (width - w >= 16) { + uint8x16_t row = vld1q_u8(src_ptr + w); + uint8x16x2_t row_u16 = { { row, vdupq_n_u8(0) } }; + vst2q_u8((uint8_t *)(dst_ptr + w), row_u16); + + w += 16; + } + if (width - w >= 8) { + uint8x8_t row = vld1_u8(src_ptr + w); + vst1q_u16(dst_ptr + w, vmovl_u8(row)); + w += 8; + } + if (width - w == 4) { + for (int i = w; i < w + 4; i++) { + dst_ptr[i] = src_ptr[i]; + } + } + + src += sstride; + dst += dstride; + } while (--height != 0); +} + +void cdef_copy_rect8_16bit_to_16bit_neon(uint16_t *dst, int dstride, + const uint16_t *src, int sstride, + int width, int height) { + do { + const uint16_t *src_ptr = src; + uint16_t *dst_ptr = dst; + + int w = 0; + while (width - w >= 8) { + uint16x8_t row = vld1q_u16(src_ptr + w); + vst1q_u16(dst_ptr + w, row); + + w += 8; + } + if (width - w == 4) { + uint16x4_t row = vld1_u16(src_ptr + w); + vst1_u16(dst_ptr + w, row); + } + + src += sstride; + dst += dstride; + } while (--height != 0); +} + +// partial A is a 16-bit vector of the form: +// [x8 x7 x6 x5 x4 x3 x2 x1] and partial B has the form: +// [0 y1 y2 y3 y4 y5 y6 y7]. +// This function computes (x1^2+y1^2)*C1 + (x2^2+y2^2)*C2 + ... +// (x7^2+y2^7)*C7 + (x8^2+0^2)*C8 where the C1..C8 constants are in const1 +// and const2. +static INLINE uint32x4_t fold_mul_and_sum_neon(int16x8_t partiala, + int16x8_t partialb, + uint32x4_t const1, + uint32x4_t const2) { + // Reverse partial B. + // pattern = { 12 13 10 11 8 9 6 7 4 5 2 3 0 1 14 15 }. + uint8x16_t pattern = vreinterpretq_u8_u64( + vcombine_u64(vcreate_u64((uint64_t)0x07060908 << 32 | 0x0b0a0d0c), + vcreate_u64((uint64_t)0x0f0e0100 << 32 | 0x03020504))); + +#if AOM_ARCH_AARCH64 + partialb = + vreinterpretq_s16_s8(vqtbl1q_s8(vreinterpretq_s8_s16(partialb), pattern)); +#else + int8x8x2_t p = { { vget_low_s8(vreinterpretq_s8_s16(partialb)), + vget_high_s8(vreinterpretq_s8_s16(partialb)) } }; + int8x8_t shuffle_hi = vtbl2_s8(p, vget_high_s8(vreinterpretq_s8_u8(pattern))); + int8x8_t shuffle_lo = vtbl2_s8(p, vget_low_s8(vreinterpretq_s8_u8(pattern))); + partialb = vreinterpretq_s16_s8(vcombine_s8(shuffle_lo, shuffle_hi)); +#endif + + // Square and add the corresponding x and y values. + int32x4_t cost_lo = vmull_s16(vget_low_s16(partiala), vget_low_s16(partiala)); + cost_lo = vmlal_s16(cost_lo, vget_low_s16(partialb), vget_low_s16(partialb)); + int32x4_t cost_hi = + vmull_s16(vget_high_s16(partiala), vget_high_s16(partiala)); + cost_hi = + vmlal_s16(cost_hi, vget_high_s16(partialb), vget_high_s16(partialb)); + + // Multiply by constant. + uint32x4_t cost = vmulq_u32(vreinterpretq_u32_s32(cost_lo), const1); + cost = vmlaq_u32(cost, vreinterpretq_u32_s32(cost_hi), const2); + return cost; +} + +// This function computes the cost along directions 4, 5, 6, 7. (4 is diagonal +// down-right, 6 is vertical). +// +// For each direction the lines are shifted so that we can perform a +// basic sum on each vector element. For example, direction 5 is "south by +// southeast", so we need to add the pixels along each line i below: +// +// 0 1 2 3 4 5 6 7 +// 0 1 2 3 4 5 6 7 +// 8 0 1 2 3 4 5 6 +// 8 0 1 2 3 4 5 6 +// 9 8 0 1 2 3 4 5 +// 9 8 0 1 2 3 4 5 +// 10 9 8 0 1 2 3 4 +// 10 9 8 0 1 2 3 4 +// +// For this to fit nicely in vectors, the lines need to be shifted like so: +// 0 1 2 3 4 5 6 7 +// 0 1 2 3 4 5 6 7 +// 8 0 1 2 3 4 5 6 +// 8 0 1 2 3 4 5 6 +// 9 8 0 1 2 3 4 5 +// 9 8 0 1 2 3 4 5 +// 10 9 8 0 1 2 3 4 +// 10 9 8 0 1 2 3 4 +// +// In this configuration we can now perform SIMD additions to get the cost +// along direction 5. Since this won't fit into a single 128-bit vector, we use +// two of them to compute each half of the new configuration, and pad the empty +// spaces with zeros. Similar shifting is done for other directions, except +// direction 6 which is straightforward as it's the vertical direction. +static INLINE uint32x4_t compute_vert_directions_neon(int16x8_t lines[8], + uint32_t cost[4]) { + const int16x8_t zero = vdupq_n_s16(0); + + // Partial sums for lines 0 and 1. + int16x8_t partial4a = vextq_s16(zero, lines[0], 1); + partial4a = vaddq_s16(partial4a, vextq_s16(zero, lines[1], 2)); + int16x8_t partial4b = vextq_s16(lines[0], zero, 1); + partial4b = vaddq_s16(partial4b, vextq_s16(lines[1], zero, 2)); + int16x8_t tmp = vaddq_s16(lines[0], lines[1]); + int16x8_t partial5a = vextq_s16(zero, tmp, 3); + int16x8_t partial5b = vextq_s16(tmp, zero, 3); + int16x8_t partial7a = vextq_s16(zero, tmp, 6); + int16x8_t partial7b = vextq_s16(tmp, zero, 6); + int16x8_t partial6 = tmp; + + // Partial sums for lines 2 and 3. + partial4a = vaddq_s16(partial4a, vextq_s16(zero, lines[2], 3)); + partial4a = vaddq_s16(partial4a, vextq_s16(zero, lines[3], 4)); + partial4b = vaddq_s16(partial4b, vextq_s16(lines[2], zero, 3)); + partial4b = vaddq_s16(partial4b, vextq_s16(lines[3], zero, 4)); + tmp = vaddq_s16(lines[2], lines[3]); + partial5a = vaddq_s16(partial5a, vextq_s16(zero, tmp, 4)); + partial5b = vaddq_s16(partial5b, vextq_s16(tmp, zero, 4)); + partial7a = vaddq_s16(partial7a, vextq_s16(zero, tmp, 5)); + partial7b = vaddq_s16(partial7b, vextq_s16(tmp, zero, 5)); + partial6 = vaddq_s16(partial6, tmp); + + // Partial sums for lines 4 and 5. + partial4a = vaddq_s16(partial4a, vextq_s16(zero, lines[4], 5)); + partial4a = vaddq_s16(partial4a, vextq_s16(zero, lines[5], 6)); + partial4b = vaddq_s16(partial4b, vextq_s16(lines[4], zero, 5)); + partial4b = vaddq_s16(partial4b, vextq_s16(lines[5], zero, 6)); + tmp = vaddq_s16(lines[4], lines[5]); + partial5a = vaddq_s16(partial5a, vextq_s16(zero, tmp, 5)); + partial5b = vaddq_s16(partial5b, vextq_s16(tmp, zero, 5)); + partial7a = vaddq_s16(partial7a, vextq_s16(zero, tmp, 4)); + partial7b = vaddq_s16(partial7b, vextq_s16(tmp, zero, 4)); + partial6 = vaddq_s16(partial6, tmp); + + // Partial sums for lines 6 and 7. + partial4a = vaddq_s16(partial4a, vextq_s16(zero, lines[6], 7)); + partial4a = vaddq_s16(partial4a, lines[7]); + partial4b = vaddq_s16(partial4b, vextq_s16(lines[6], zero, 7)); + tmp = vaddq_s16(lines[6], lines[7]); + partial5a = vaddq_s16(partial5a, vextq_s16(zero, tmp, 6)); + partial5b = vaddq_s16(partial5b, vextq_s16(tmp, zero, 6)); + partial7a = vaddq_s16(partial7a, vextq_s16(zero, tmp, 3)); + partial7b = vaddq_s16(partial7b, vextq_s16(tmp, zero, 3)); + partial6 = vaddq_s16(partial6, tmp); + + uint32x4_t const0 = vreinterpretq_u32_u64( + vcombine_u64(vcreate_u64((uint64_t)420 << 32 | 840), + vcreate_u64((uint64_t)210 << 32 | 280))); + uint32x4_t const1 = vreinterpretq_u32_u64( + vcombine_u64(vcreate_u64((uint64_t)140 << 32 | 168), + vcreate_u64((uint64_t)105 << 32 | 120))); + uint32x4_t const2 = vreinterpretq_u32_u64( + vcombine_u64(vcreate_u64(0), vcreate_u64((uint64_t)210 << 32 | 420))); + uint32x4_t const3 = vreinterpretq_u32_u64( + vcombine_u64(vcreate_u64((uint64_t)105 << 32 | 140), + vcreate_u64((uint64_t)105 << 32 | 105))); + + // Compute costs in terms of partial sums. + int32x4_t partial6_s32 = + vmull_s16(vget_low_s16(partial6), vget_low_s16(partial6)); + partial6_s32 = + vmlal_s16(partial6_s32, vget_high_s16(partial6), vget_high_s16(partial6)); + + uint32x4_t costs[4]; + costs[0] = fold_mul_and_sum_neon(partial4a, partial4b, const0, const1); + costs[1] = fold_mul_and_sum_neon(partial5a, partial5b, const2, const3); + costs[2] = vmulq_n_u32(vreinterpretq_u32_s32(partial6_s32), 105); + costs[3] = fold_mul_and_sum_neon(partial7a, partial7b, const2, const3); + + costs[0] = horizontal_add_4d_u32x4(costs); + vst1q_u32(cost, costs[0]); + return costs[0]; +} + +static INLINE uint32x4_t fold_mul_and_sum_pairwise_neon(int16x8_t partiala, + int16x8_t partialb, + int16x8_t partialc, + uint32x4_t const0) { + // Reverse partial c. + // pattern = { 10 11 8 9 6 7 4 5 2 3 0 1 12 13 14 15 }. + uint8x16_t pattern = vreinterpretq_u8_u64( + vcombine_u64(vcreate_u64((uint64_t)0x05040706 << 32 | 0x09080b0a), + vcreate_u64((uint64_t)0x0f0e0d0c << 32 | 0x01000302))); + +#if AOM_ARCH_AARCH64 + partialc = + vreinterpretq_s16_s8(vqtbl1q_s8(vreinterpretq_s8_s16(partialc), pattern)); +#else + int8x8x2_t p = { { vget_low_s8(vreinterpretq_s8_s16(partialc)), + vget_high_s8(vreinterpretq_s8_s16(partialc)) } }; + int8x8_t shuffle_hi = vtbl2_s8(p, vget_high_s8(vreinterpretq_s8_u8(pattern))); + int8x8_t shuffle_lo = vtbl2_s8(p, vget_low_s8(vreinterpretq_s8_u8(pattern))); + partialc = vreinterpretq_s16_s8(vcombine_s8(shuffle_lo, shuffle_hi)); +#endif + + int32x4_t partiala_s32 = vpaddlq_s16(partiala); + int32x4_t partialb_s32 = vpaddlq_s16(partialb); + int32x4_t partialc_s32 = vpaddlq_s16(partialc); + + partiala_s32 = vmulq_s32(partiala_s32, partiala_s32); + partialb_s32 = vmulq_s32(partialb_s32, partialb_s32); + partialc_s32 = vmulq_s32(partialc_s32, partialc_s32); + + partiala_s32 = vaddq_s32(partiala_s32, partialc_s32); + + uint32x4_t cost = vmulq_n_u32(vreinterpretq_u32_s32(partialb_s32), 105); + cost = vmlaq_u32(cost, vreinterpretq_u32_s32(partiala_s32), const0); + return cost; +} + +// This function computes the cost along directions 0, 1, 2, 3. (0 means +// 45-degree up-right, 2 is horizontal). +// +// For direction 1 and 3 ("east northeast" and "east southeast") the shifted +// lines need three vectors instead of two. For direction 1 for example, we need +// to compute the sums along the line i below: +// 0 0 1 1 2 2 3 3 +// 1 1 2 2 3 3 4 4 +// 2 2 3 3 4 4 5 5 +// 3 3 4 4 5 5 6 6 +// 4 4 5 5 6 6 7 7 +// 5 5 6 6 7 7 8 8 +// 6 6 7 7 8 8 9 9 +// 7 7 8 8 9 9 10 10 +// +// Which means we need the following configuration: +// 0 0 1 1 2 2 3 3 +// 1 1 2 2 3 3 4 4 +// 2 2 3 3 4 4 5 5 +// 3 3 4 4 5 5 6 6 +// 4 4 5 5 6 6 7 7 +// 5 5 6 6 7 7 8 8 +// 6 6 7 7 8 8 9 9 +// 7 7 8 8 9 9 10 10 +// +// Three vectors are needed to compute this, as well as some extra pairwise +// additions. +static uint32x4_t compute_horiz_directions_neon(int16x8_t lines[8], + uint32_t cost[4]) { + const int16x8_t zero = vdupq_n_s16(0); + + // Compute diagonal directions (1, 2, 3). + // Partial sums for lines 0 and 1. + int16x8_t partial0a = lines[0]; + partial0a = vaddq_s16(partial0a, vextq_s16(zero, lines[1], 7)); + int16x8_t partial0b = vextq_s16(lines[1], zero, 7); + int16x8_t partial1a = vaddq_s16(lines[0], vextq_s16(zero, lines[1], 6)); + int16x8_t partial1b = vextq_s16(lines[1], zero, 6); + int16x8_t partial3a = vextq_s16(lines[0], zero, 2); + partial3a = vaddq_s16(partial3a, vextq_s16(lines[1], zero, 4)); + int16x8_t partial3b = vextq_s16(zero, lines[0], 2); + partial3b = vaddq_s16(partial3b, vextq_s16(zero, lines[1], 4)); + + // Partial sums for lines 2 and 3. + partial0a = vaddq_s16(partial0a, vextq_s16(zero, lines[2], 6)); + partial0a = vaddq_s16(partial0a, vextq_s16(zero, lines[3], 5)); + partial0b = vaddq_s16(partial0b, vextq_s16(lines[2], zero, 6)); + partial0b = vaddq_s16(partial0b, vextq_s16(lines[3], zero, 5)); + partial1a = vaddq_s16(partial1a, vextq_s16(zero, lines[2], 4)); + partial1a = vaddq_s16(partial1a, vextq_s16(zero, lines[3], 2)); + partial1b = vaddq_s16(partial1b, vextq_s16(lines[2], zero, 4)); + partial1b = vaddq_s16(partial1b, vextq_s16(lines[3], zero, 2)); + partial3a = vaddq_s16(partial3a, vextq_s16(lines[2], zero, 6)); + partial3b = vaddq_s16(partial3b, vextq_s16(zero, lines[2], 6)); + partial3b = vaddq_s16(partial3b, lines[3]); + + // Partial sums for lines 4 and 5. + partial0a = vaddq_s16(partial0a, vextq_s16(zero, lines[4], 4)); + partial0a = vaddq_s16(partial0a, vextq_s16(zero, lines[5], 3)); + partial0b = vaddq_s16(partial0b, vextq_s16(lines[4], zero, 4)); + partial0b = vaddq_s16(partial0b, vextq_s16(lines[5], zero, 3)); + partial1b = vaddq_s16(partial1b, lines[4]); + partial1b = vaddq_s16(partial1b, vextq_s16(zero, lines[5], 6)); + int16x8_t partial1c = vextq_s16(lines[5], zero, 6); + partial3b = vaddq_s16(partial3b, vextq_s16(lines[4], zero, 2)); + partial3b = vaddq_s16(partial3b, vextq_s16(lines[5], zero, 4)); + int16x8_t partial3c = vextq_s16(zero, lines[4], 2); + partial3c = vaddq_s16(partial3c, vextq_s16(zero, lines[5], 4)); + + // Partial sums for lines 6 and 7. + partial0a = vaddq_s16(partial0a, vextq_s16(zero, lines[6], 2)); + partial0a = vaddq_s16(partial0a, vextq_s16(zero, lines[7], 1)); + partial0b = vaddq_s16(partial0b, vextq_s16(lines[6], zero, 2)); + partial0b = vaddq_s16(partial0b, vextq_s16(lines[7], zero, 1)); + partial1b = vaddq_s16(partial1b, vextq_s16(zero, lines[6], 4)); + partial1b = vaddq_s16(partial1b, vextq_s16(zero, lines[7], 2)); + partial1c = vaddq_s16(partial1c, vextq_s16(lines[6], zero, 4)); + partial1c = vaddq_s16(partial1c, vextq_s16(lines[7], zero, 2)); + partial3b = vaddq_s16(partial3b, vextq_s16(lines[6], zero, 6)); + partial3c = vaddq_s16(partial3c, vextq_s16(zero, lines[6], 6)); + partial3c = vaddq_s16(partial3c, lines[7]); + + // Special case for direction 2 as it's just a sum along each line. + int16x8_t lines03[4] = { lines[0], lines[1], lines[2], lines[3] }; + int16x8_t lines47[4] = { lines[4], lines[5], lines[6], lines[7] }; + int32x4_t partial2a = horizontal_add_4d_s16x8(lines03); + int32x4_t partial2b = horizontal_add_4d_s16x8(lines47); + + uint32x4_t partial2a_u32 = + vreinterpretq_u32_s32(vmulq_s32(partial2a, partial2a)); + uint32x4_t partial2b_u32 = + vreinterpretq_u32_s32(vmulq_s32(partial2b, partial2b)); + + uint32x4_t const0 = vreinterpretq_u32_u64( + vcombine_u64(vcreate_u64((uint64_t)420 << 32 | 840), + vcreate_u64((uint64_t)210 << 32 | 280))); + uint32x4_t const1 = vreinterpretq_u32_u64( + vcombine_u64(vcreate_u64((uint64_t)140 << 32 | 168), + vcreate_u64((uint64_t)105 << 32 | 120))); + uint32x4_t const2 = vreinterpretq_u32_u64( + vcombine_u64(vcreate_u64((uint64_t)210 << 32 | 420), + vcreate_u64((uint64_t)105 << 32 | 140))); + + uint32x4_t costs[4]; + costs[0] = fold_mul_and_sum_neon(partial0a, partial0b, const0, const1); + costs[1] = + fold_mul_and_sum_pairwise_neon(partial1a, partial1b, partial1c, const2); + costs[2] = vaddq_u32(partial2a_u32, partial2b_u32); + costs[2] = vmulq_n_u32(costs[2], 105); + costs[3] = + fold_mul_and_sum_pairwise_neon(partial3c, partial3b, partial3a, const2); + + costs[0] = horizontal_add_4d_u32x4(costs); + vst1q_u32(cost, costs[0]); + return costs[0]; +} + +int cdef_find_dir_neon(const uint16_t *img, int stride, int32_t *var, + int coeff_shift) { + uint32_t cost[8]; + uint32_t best_cost = 0; + int best_dir = 0; + int16x8_t lines[8]; + for (int i = 0; i < 8; i++) { + uint16x8_t s = vld1q_u16(&img[i * stride]); + lines[i] = vreinterpretq_s16_u16( + vsubq_u16(vshlq_u16(s, vdupq_n_s16(-coeff_shift)), vdupq_n_u16(128))); + } + + // Compute "mostly vertical" directions. + uint32x4_t cost47 = compute_vert_directions_neon(lines, cost + 4); + + // Compute "mostly horizontal" directions. + uint32x4_t cost03 = compute_horiz_directions_neon(lines, cost); + + // Find max cost as well as its index to get best_dir. + // The max cost needs to be propagated in the whole vector to find its + // position in the original cost vectors cost03 and cost47. + uint32x4_t cost07 = vmaxq_u32(cost03, cost47); +#if AOM_ARCH_AARCH64 + best_cost = vmaxvq_u32(cost07); + uint32x4_t max_cost = vdupq_n_u32(best_cost); + uint8x16x2_t costs = { { vreinterpretq_u8_u32(vceqq_u32(max_cost, cost03)), + vreinterpretq_u8_u32( + vceqq_u32(max_cost, cost47)) } }; + // idx = { 28, 24, 20, 16, 12, 8, 4, 0 }; + uint8x8_t idx = vreinterpret_u8_u64(vcreate_u64(0x0004080c1014181cULL)); + // Get the lowest 8 bit of each 32-bit elements and reverse them. + uint8x8_t tbl = vqtbl2_u8(costs, idx); + uint64_t a = vget_lane_u64(vreinterpret_u64_u8(tbl), 0); + best_dir = aom_clzll(a) >> 3; +#else + uint32x2_t cost64 = vpmax_u32(vget_low_u32(cost07), vget_high_u32(cost07)); + cost64 = vpmax_u32(cost64, cost64); + uint32x4_t max_cost = vcombine_u32(cost64, cost64); + best_cost = vget_lane_u32(cost64, 0); + uint16x8_t costs = vcombine_u16(vmovn_u32(vceqq_u32(max_cost, cost03)), + vmovn_u32(vceqq_u32(max_cost, cost47))); + uint8x8_t idx = + vand_u8(vmovn_u16(costs), + vreinterpret_u8_u64(vcreate_u64(0x8040201008040201ULL))); + int sum = horizontal_add_u8x8(idx); + best_dir = get_msb(sum ^ (sum - 1)); +#endif + + // Difference between the optimal variance and the variance along the + // orthogonal direction. Again, the sum(x^2) terms cancel out. + *var = best_cost - cost[(best_dir + 4) & 7]; + // We'd normally divide by 840, but dividing by 1024 is close enough + // for what we're going to do with this. + *var >>= 10; + return best_dir; +} + +void cdef_find_dir_dual_neon(const uint16_t *img1, const uint16_t *img2, + int stride, int32_t *var_out_1st, + int32_t *var_out_2nd, int coeff_shift, + int *out_dir_1st_8x8, int *out_dir_2nd_8x8) { + // Process first 8x8. + *out_dir_1st_8x8 = cdef_find_dir(img1, stride, var_out_1st, coeff_shift); + + // Process second 8x8. + *out_dir_2nd_8x8 = cdef_find_dir(img2, stride, var_out_2nd, coeff_shift); +} + +// sign(a-b) * min(abs(a-b), max(0, threshold - (abs(a-b) >> adjdamp))) +static INLINE int16x8_t constrain16(uint16x8_t a, uint16x8_t b, + unsigned int threshold, int adjdamp) { + uint16x8_t diff = vabdq_u16(a, b); + const uint16x8_t a_gt_b = vcgtq_u16(a, b); + const uint16x8_t s = vqsubq_u16(vdupq_n_u16(threshold), + vshlq_u16(diff, vdupq_n_s16(-adjdamp))); + const int16x8_t clip = vreinterpretq_s16_u16(vminq_u16(diff, s)); + return vbslq_s16(a_gt_b, clip, vnegq_s16(clip)); +} + +static INLINE void primary_filter(uint16x8_t s, uint16x8_t tap[4], + const int *pri_taps, int pri_strength, + int pri_damping, int16x8_t *sum) { + // Near taps + int16x8_t n0 = constrain16(tap[0], s, pri_strength, pri_damping); + int16x8_t n1 = constrain16(tap[1], s, pri_strength, pri_damping); + // sum += pri_taps[0] * (n0 + n1) + n0 = vaddq_s16(n0, n1); + *sum = vmlaq_n_s16(*sum, n0, pri_taps[0]); + + // Far taps + int16x8_t f0 = constrain16(tap[2], s, pri_strength, pri_damping); + int16x8_t f1 = constrain16(tap[3], s, pri_strength, pri_damping); + // sum += pri_taps[1] * (f0 + f1) + f0 = vaddq_s16(f0, f1); + *sum = vmlaq_n_s16(*sum, f0, pri_taps[1]); +} + +static INLINE void secondary_filter(uint16x8_t s, uint16x8_t tap[8], + const int *sec_taps, int sec_strength, + int sec_damping, int16x8_t *sum) { + // Near taps + int16x8_t s0 = constrain16(tap[0], s, sec_strength, sec_damping); + int16x8_t s1 = constrain16(tap[1], s, sec_strength, sec_damping); + int16x8_t s2 = constrain16(tap[2], s, sec_strength, sec_damping); + int16x8_t s3 = constrain16(tap[3], s, sec_strength, sec_damping); + + // sum += sec_taps[0] * (p0 + p1 + p2 + p3) + s0 = vaddq_s16(s0, s1); + s2 = vaddq_s16(s2, s3); + s0 = vaddq_s16(s0, s2); + *sum = vmlaq_n_s16(*sum, s0, sec_taps[0]); + + // Far taps + s0 = constrain16(tap[4], s, sec_strength, sec_damping); + s1 = constrain16(tap[5], s, sec_strength, sec_damping); + s2 = constrain16(tap[6], s, sec_strength, sec_damping); + s3 = constrain16(tap[7], s, sec_strength, sec_damping); + + // sum += sec_taps[1] * (p0 + p1 + p2 + p3) + s0 = vaddq_s16(s0, s1); + s2 = vaddq_s16(s2, s3); + s0 = vaddq_s16(s0, s2); + *sum = vmlaq_n_s16(*sum, s0, sec_taps[1]); +} + +void cdef_filter_8_0_neon(void *dest, int dstride, const uint16_t *in, + int pri_strength, int sec_strength, int dir, + int pri_damping, int sec_damping, int coeff_shift, + int block_width, int block_height) { + uint16x8_t max, min; + const uint16x8_t cdef_large_value_mask = + vdupq_n_u16(((uint16_t)~CDEF_VERY_LARGE)); + const int po1 = cdef_directions[dir][0]; + const int po2 = cdef_directions[dir][1]; + const int s1o1 = cdef_directions[dir + 2][0]; + const int s1o2 = cdef_directions[dir + 2][1]; + const int s2o1 = cdef_directions[dir - 2][0]; + const int s2o2 = cdef_directions[dir - 2][1]; + const int *pri_taps = cdef_pri_taps[(pri_strength >> coeff_shift) & 1]; + const int *sec_taps = cdef_sec_taps; + + if (pri_strength) { + pri_damping = AOMMAX(0, pri_damping - get_msb(pri_strength)); + } + if (sec_strength) { + sec_damping = AOMMAX(0, sec_damping - get_msb(sec_strength)); + } + + if (block_width == 8) { + uint8_t *dst8 = (uint8_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = vld1q_u16(in); + max = min = s; + + uint16x8_t pri_src[4]; + + // Primary near taps + pri_src[0] = vld1q_u16(in + po1); + pri_src[1] = vld1q_u16(in - po1); + + // Primary far taps + pri_src[2] = vld1q_u16(in + po2); + pri_src[3] = vld1q_u16(in - po2); + + primary_filter(s, pri_src, pri_taps, pri_strength, pri_damping, &sum); + + // The source is 16 bits, however, we only really care about the lower + // 8 bits. The upper 8 bits contain the "large" flag. After the final + // primary max has been calculated, zero out the upper 8 bits. Use this + // to find the "16 bit" max. + uint8x16_t pri_max0 = vmaxq_u8(vreinterpretq_u8_u16(pri_src[0]), + vreinterpretq_u8_u16(pri_src[1])); + uint8x16_t pri_max1 = vmaxq_u8(vreinterpretq_u8_u16(pri_src[2]), + vreinterpretq_u8_u16(pri_src[3])); + pri_max0 = vmaxq_u8(pri_max0, pri_max1); + max = vmaxq_u16(max, vandq_u16(vreinterpretq_u16_u8(pri_max0), + cdef_large_value_mask)); + + uint16x8_t pri_min0 = vminq_u16(pri_src[0], pri_src[1]); + uint16x8_t pri_min1 = vminq_u16(pri_src[2], pri_src[3]); + pri_min0 = vminq_u16(pri_min0, pri_min1); + min = vminq_u16(min, pri_min0); + + uint16x8_t sec_src[8]; + + // Secondary near taps + sec_src[0] = vld1q_u16(in + s1o1); + sec_src[1] = vld1q_u16(in - s1o1); + sec_src[2] = vld1q_u16(in + s2o1); + sec_src[3] = vld1q_u16(in - s2o1); + + // Secondary far taps + sec_src[4] = vld1q_u16(in + s1o2); + sec_src[5] = vld1q_u16(in - s1o2); + sec_src[6] = vld1q_u16(in + s2o2); + sec_src[7] = vld1q_u16(in - s2o2); + + secondary_filter(s, sec_src, sec_taps, sec_strength, sec_damping, &sum); + + // The source is 16 bits, however, we only really care about the lower + // 8 bits. The upper 8 bits contain the "large" flag. After the final + // primary max has been calculated, zero out the upper 8 bits. Use this + // to find the "16 bit" max. + uint8x16_t sec_max0 = vmaxq_u8(vreinterpretq_u8_u16(sec_src[0]), + vreinterpretq_u8_u16(sec_src[1])); + uint8x16_t sec_max1 = vmaxq_u8(vreinterpretq_u8_u16(sec_src[2]), + vreinterpretq_u8_u16(sec_src[3])); + uint8x16_t sec_max2 = vmaxq_u8(vreinterpretq_u8_u16(sec_src[4]), + vreinterpretq_u8_u16(sec_src[5])); + uint8x16_t sec_max3 = vmaxq_u8(vreinterpretq_u8_u16(sec_src[6]), + vreinterpretq_u8_u16(sec_src[7])); + sec_max0 = vmaxq_u8(sec_max0, sec_max1); + sec_max2 = vmaxq_u8(sec_max2, sec_max3); + sec_max0 = vmaxq_u8(sec_max0, sec_max2); + max = vmaxq_u16(max, vandq_u16(vreinterpretq_u16_u8(sec_max0), + cdef_large_value_mask)); + + uint16x8_t sec_min0 = vminq_u16(sec_src[0], sec_src[1]); + uint16x8_t sec_min1 = vminq_u16(sec_src[2], sec_src[3]); + uint16x8_t sec_min2 = vminq_u16(sec_src[4], sec_src[5]); + uint16x8_t sec_min3 = vminq_u16(sec_src[6], sec_src[7]); + sec_min0 = vminq_u16(sec_min0, sec_min1); + sec_min2 = vminq_u16(sec_min2, sec_min3); + sec_min0 = vminq_u16(sec_min0, sec_min2); + min = vminq_u16(min, sec_min0); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + int16x8_t res_s16 = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + res_s16 = vminq_s16(vmaxq_s16(res_s16, vreinterpretq_s16_u16(min)), + vreinterpretq_s16_u16(max)); + + const uint8x8_t res_u8 = vqmovun_s16(res_s16); + vst1_u8(dst8, res_u8); + + in += CDEF_BSTRIDE; + dst8 += dstride; + } while (--h != 0); + } else { + uint8_t *dst8 = (uint8_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = load_unaligned_u16_4x2(in, CDEF_BSTRIDE); + max = min = s; + + uint16x8_t pri_src[4]; + + // Primary near taps + pri_src[0] = load_unaligned_u16_4x2(in + po1, CDEF_BSTRIDE); + pri_src[1] = load_unaligned_u16_4x2(in - po1, CDEF_BSTRIDE); + + // Primary far taps + pri_src[2] = load_unaligned_u16_4x2(in + po2, CDEF_BSTRIDE); + pri_src[3] = load_unaligned_u16_4x2(in - po2, CDEF_BSTRIDE); + + primary_filter(s, pri_src, pri_taps, pri_strength, pri_damping, &sum); + + // The source is 16 bits, however, we only really care about the lower + // 8 bits. The upper 8 bits contain the "large" flag. After the final + // primary max has been calculated, zero out the upper 8 bits. Use this + // to find the "16 bit" max. + uint8x16_t pri_max0 = vmaxq_u8(vreinterpretq_u8_u16(pri_src[0]), + vreinterpretq_u8_u16(pri_src[1])); + uint8x16_t pri_max1 = vmaxq_u8(vreinterpretq_u8_u16(pri_src[2]), + vreinterpretq_u8_u16(pri_src[3])); + pri_max0 = vmaxq_u8(pri_max0, pri_max1); + max = vmaxq_u16(max, vandq_u16(vreinterpretq_u16_u8(pri_max0), + cdef_large_value_mask)); + + uint16x8_t pri_min1 = vminq_u16(pri_src[0], pri_src[1]); + uint16x8_t pri_min2 = vminq_u16(pri_src[2], pri_src[3]); + pri_min1 = vminq_u16(pri_min1, pri_min2); + min = vminq_u16(min, pri_min1); + + uint16x8_t sec_src[8]; + + // Secondary near taps + sec_src[0] = load_unaligned_u16_4x2(in + s1o1, CDEF_BSTRIDE); + sec_src[1] = load_unaligned_u16_4x2(in - s1o1, CDEF_BSTRIDE); + sec_src[2] = load_unaligned_u16_4x2(in + s2o1, CDEF_BSTRIDE); + sec_src[3] = load_unaligned_u16_4x2(in - s2o1, CDEF_BSTRIDE); + + // Secondary far taps + sec_src[4] = load_unaligned_u16_4x2(in + s1o2, CDEF_BSTRIDE); + sec_src[5] = load_unaligned_u16_4x2(in - s1o2, CDEF_BSTRIDE); + sec_src[6] = load_unaligned_u16_4x2(in + s2o2, CDEF_BSTRIDE); + sec_src[7] = load_unaligned_u16_4x2(in - s2o2, CDEF_BSTRIDE); + + secondary_filter(s, sec_src, sec_taps, sec_strength, sec_damping, &sum); + + // The source is 16 bits, however, we only really care about the lower + // 8 bits. The upper 8 bits contain the "large" flag. After the final + // primary max has been calculated, zero out the upper 8 bits. Use this + // to find the "16 bit" max. + uint8x16_t sec_max0 = vmaxq_u8(vreinterpretq_u8_u16(sec_src[0]), + vreinterpretq_u8_u16(sec_src[1])); + uint8x16_t sec_max1 = vmaxq_u8(vreinterpretq_u8_u16(sec_src[2]), + vreinterpretq_u8_u16(sec_src[3])); + uint8x16_t sec_max2 = vmaxq_u8(vreinterpretq_u8_u16(sec_src[4]), + vreinterpretq_u8_u16(sec_src[5])); + uint8x16_t sec_max3 = vmaxq_u8(vreinterpretq_u8_u16(sec_src[6]), + vreinterpretq_u8_u16(sec_src[7])); + sec_max0 = vmaxq_u8(sec_max0, sec_max1); + sec_max2 = vmaxq_u8(sec_max2, sec_max3); + sec_max0 = vmaxq_u8(sec_max0, sec_max2); + max = vmaxq_u16(max, vandq_u16(vreinterpretq_u16_u8(sec_max0), + cdef_large_value_mask)); + + uint16x8_t sec_min0 = vminq_u16(sec_src[0], sec_src[1]); + uint16x8_t sec_min1 = vminq_u16(sec_src[2], sec_src[3]); + uint16x8_t sec_min2 = vminq_u16(sec_src[4], sec_src[5]); + uint16x8_t sec_min3 = vminq_u16(sec_src[6], sec_src[7]); + sec_min0 = vminq_u16(sec_min0, sec_min1); + sec_min2 = vminq_u16(sec_min2, sec_min3); + sec_min0 = vminq_u16(sec_min0, sec_min2); + min = vminq_u16(min, sec_min0); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + int16x8_t res_s16 = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + res_s16 = vminq_s16(vmaxq_s16(res_s16, vreinterpretq_s16_u16(min)), + vreinterpretq_s16_u16(max)); + + const uint8x8_t res_u8 = vqmovun_s16(res_s16); + store_u8x4_strided_x2(dst8, dstride, res_u8); + + in += 2 * CDEF_BSTRIDE; + dst8 += 2 * dstride; + h -= 2; + } while (h != 0); + } +} + +void cdef_filter_8_1_neon(void *dest, int dstride, const uint16_t *in, + int pri_strength, int sec_strength, int dir, + int pri_damping, int sec_damping, int coeff_shift, + int block_width, int block_height) { + (void)sec_strength; + (void)sec_damping; + + const int po1 = cdef_directions[dir][0]; + const int po2 = cdef_directions[dir][1]; + const int *pri_taps = cdef_pri_taps[(pri_strength >> coeff_shift) & 1]; + + if (pri_strength) { + pri_damping = AOMMAX(0, pri_damping - get_msb(pri_strength)); + } + + if (block_width == 8) { + uint8_t *dst8 = (uint8_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = vld1q_u16(in); + + uint16x8_t tap[4]; + + // Primary near taps + tap[0] = vld1q_u16(in + po1); + tap[1] = vld1q_u16(in - po1); + + // Primary far taps + tap[2] = vld1q_u16(in + po2); + tap[3] = vld1q_u16(in - po2); + + primary_filter(s, tap, pri_taps, pri_strength, pri_damping, &sum); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + const int16x8_t res_s16 = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + const uint8x8_t res_u8 = vqmovun_s16(res_s16); + vst1_u8(dst8, res_u8); + + in += CDEF_BSTRIDE; + dst8 += dstride; + } while (--h != 0); + + } else { + uint8_t *dst8 = (uint8_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = load_unaligned_u16_4x2(in, CDEF_BSTRIDE); + + uint16x8_t pri_src[4]; + + // Primary near taps + pri_src[0] = load_unaligned_u16_4x2(in + po1, CDEF_BSTRIDE); + pri_src[1] = load_unaligned_u16_4x2(in - po1, CDEF_BSTRIDE); + + // Primary far taps + pri_src[2] = load_unaligned_u16_4x2(in + po2, CDEF_BSTRIDE); + pri_src[3] = load_unaligned_u16_4x2(in - po2, CDEF_BSTRIDE); + + primary_filter(s, pri_src, pri_taps, pri_strength, pri_damping, &sum); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + const int16x8_t res_s16 = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + const uint8x8_t res_u8 = vqmovun_s16(res_s16); + store_u8x4_strided_x2(dst8, dstride, res_u8); + + in += 2 * CDEF_BSTRIDE; + dst8 += 2 * dstride; + h -= 2; + } while (h != 0); + } +} + +void cdef_filter_8_2_neon(void *dest, int dstride, const uint16_t *in, + int pri_strength, int sec_strength, int dir, + int pri_damping, int sec_damping, int coeff_shift, + int block_width, int block_height) { + (void)pri_strength; + (void)pri_damping; + (void)coeff_shift; + + const int s1o1 = cdef_directions[dir + 2][0]; + const int s1o2 = cdef_directions[dir + 2][1]; + const int s2o1 = cdef_directions[dir - 2][0]; + const int s2o2 = cdef_directions[dir - 2][1]; + const int *sec_taps = cdef_sec_taps; + + if (sec_strength) { + sec_damping = AOMMAX(0, sec_damping - get_msb(sec_strength)); + } + + if (block_width == 8) { + uint8_t *dst8 = (uint8_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = vld1q_u16(in); + + uint16x8_t sec_src[8]; + + // Secondary near taps + sec_src[0] = vld1q_u16(in + s1o1); + sec_src[1] = vld1q_u16(in - s1o1); + sec_src[2] = vld1q_u16(in + s2o1); + sec_src[3] = vld1q_u16(in - s2o1); + + // Secondary far taps + sec_src[4] = vld1q_u16(in + s1o2); + sec_src[5] = vld1q_u16(in - s1o2); + sec_src[6] = vld1q_u16(in + s2o2); + sec_src[7] = vld1q_u16(in - s2o2); + + secondary_filter(s, sec_src, sec_taps, sec_strength, sec_damping, &sum); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + const int16x8_t res_s16 = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + const uint8x8_t res_u8 = vqmovun_s16(res_s16); + vst1_u8(dst8, res_u8); + + in += CDEF_BSTRIDE; + dst8 += dstride; + } while (--h != 0); + } else { + uint8_t *dst8 = (uint8_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = load_unaligned_u16_4x2(in, CDEF_BSTRIDE); + + uint16x8_t sec_src[8]; + + // Secondary near taps + sec_src[0] = load_unaligned_u16_4x2(in + s1o1, CDEF_BSTRIDE); + sec_src[1] = load_unaligned_u16_4x2(in - s1o1, CDEF_BSTRIDE); + sec_src[2] = load_unaligned_u16_4x2(in + s2o1, CDEF_BSTRIDE); + sec_src[3] = load_unaligned_u16_4x2(in - s2o1, CDEF_BSTRIDE); + + // Secondary far taps + sec_src[4] = load_unaligned_u16_4x2(in + s1o2, CDEF_BSTRIDE); + sec_src[5] = load_unaligned_u16_4x2(in - s1o2, CDEF_BSTRIDE); + sec_src[6] = load_unaligned_u16_4x2(in + s2o2, CDEF_BSTRIDE); + sec_src[7] = load_unaligned_u16_4x2(in - s2o2, CDEF_BSTRIDE); + + secondary_filter(s, sec_src, sec_taps, sec_strength, sec_damping, &sum); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + const int16x8_t res_s16 = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + const uint8x8_t res_u8 = vqmovun_s16(res_s16); + store_u8x4_strided_x2(dst8, dstride, res_u8); + + in += 2 * CDEF_BSTRIDE; + dst8 += 2 * dstride; + h -= 2; + } while (h != 0); + } +} + +void cdef_filter_8_3_neon(void *dest, int dstride, const uint16_t *in, + int pri_strength, int sec_strength, int dir, + int pri_damping, int sec_damping, int coeff_shift, + int block_width, int block_height) { + (void)pri_strength; + (void)sec_strength; + (void)dir; + (void)pri_damping; + (void)sec_damping; + (void)coeff_shift; + (void)block_width; + if (block_width == 8) { + uint8_t *dst8 = (uint8_t *)dest; + + int h = block_height; + do { + const uint16x8_t s = vld1q_u16(in); + const uint8x8_t res = vqmovn_u16(s); + vst1_u8(dst8, res); + + in += CDEF_BSTRIDE; + dst8 += dstride; + } while (--h != 0); + } else { + uint8_t *dst8 = (uint8_t *)dest; + + int h = block_height; + do { + const uint16x8_t s = load_unaligned_u16_4x2(in, CDEF_BSTRIDE); + const uint8x8_t res = vqmovn_u16(s); + store_u8x4_strided_x2(dst8, dstride, res); + + in += 2 * CDEF_BSTRIDE; + dst8 += 2 * dstride; + h -= 2; + } while (h != 0); + } +} + +void cdef_filter_16_0_neon(void *dest, int dstride, const uint16_t *in, + int pri_strength, int sec_strength, int dir, + int pri_damping, int sec_damping, int coeff_shift, + int block_width, int block_height) { + uint16x8_t max, min; + const uint16x8_t cdef_large_value_mask = + vdupq_n_u16(((uint16_t)~CDEF_VERY_LARGE)); + const int po1 = cdef_directions[dir][0]; + const int po2 = cdef_directions[dir][1]; + const int s1o1 = cdef_directions[dir + 2][0]; + const int s1o2 = cdef_directions[dir + 2][1]; + const int s2o1 = cdef_directions[dir - 2][0]; + const int s2o2 = cdef_directions[dir - 2][1]; + const int *pri_taps = cdef_pri_taps[(pri_strength >> coeff_shift) & 1]; + const int *sec_taps = cdef_sec_taps; + + if (pri_strength) { + pri_damping = AOMMAX(0, pri_damping - get_msb(pri_strength)); + } + if (sec_strength) { + sec_damping = AOMMAX(0, sec_damping - get_msb(sec_strength)); + } + + if (block_width == 8) { + uint16_t *dst16 = (uint16_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = vld1q_u16(in); + max = min = s; + + uint16x8_t pri_src[4]; + + // Primary near taps + pri_src[0] = vld1q_u16(in + po1); + pri_src[1] = vld1q_u16(in - po1); + + // Primary far taps + pri_src[2] = vld1q_u16(in + po2); + pri_src[3] = vld1q_u16(in - po2); + + primary_filter(s, pri_src, pri_taps, pri_strength, pri_damping, &sum); + + uint16x8_t pri_min0 = vminq_u16(pri_src[0], pri_src[1]); + uint16x8_t pri_min1 = vminq_u16(pri_src[2], pri_src[3]); + pri_min0 = vminq_u16(pri_min0, pri_min1); + min = vminq_u16(min, pri_min0); + + /* Convert CDEF_VERY_LARGE to 0 before calculating max. */ + pri_src[0] = vandq_u16(pri_src[0], cdef_large_value_mask); + pri_src[1] = vandq_u16(pri_src[1], cdef_large_value_mask); + pri_src[2] = vandq_u16(pri_src[2], cdef_large_value_mask); + pri_src[3] = vandq_u16(pri_src[3], cdef_large_value_mask); + + uint16x8_t pri_max0 = vmaxq_u16(pri_src[0], pri_src[1]); + uint16x8_t pri_max1 = vmaxq_u16(pri_src[2], pri_src[3]); + pri_max0 = vmaxq_u16(pri_max0, pri_max1); + max = vmaxq_u16(max, pri_max0); + + uint16x8_t sec_src[8]; + + // Secondary near taps + sec_src[0] = vld1q_u16(in + s1o1); + sec_src[1] = vld1q_u16(in - s1o1); + sec_src[2] = vld1q_u16(in + s2o1); + sec_src[3] = vld1q_u16(in - s2o1); + + // Secondary far taps + sec_src[4] = vld1q_u16(in + s1o2); + sec_src[5] = vld1q_u16(in - s1o2); + sec_src[6] = vld1q_u16(in + s2o2); + sec_src[7] = vld1q_u16(in - s2o2); + + secondary_filter(s, sec_src, sec_taps, sec_strength, sec_damping, &sum); + + uint16x8_t sec_min0 = vminq_u16(sec_src[0], sec_src[1]); + uint16x8_t sec_min1 = vminq_u16(sec_src[2], sec_src[3]); + uint16x8_t sec_min2 = vminq_u16(sec_src[4], sec_src[5]); + uint16x8_t sec_min3 = vminq_u16(sec_src[6], sec_src[7]); + sec_min0 = vminq_u16(sec_min0, sec_min1); + sec_min2 = vminq_u16(sec_min2, sec_min3); + sec_min0 = vminq_u16(sec_min0, sec_min2); + min = vminq_u16(min, sec_min0); + + /* Convert CDEF_VERY_LARGE to 0 before calculating max. */ + sec_src[0] = vandq_u16(sec_src[0], cdef_large_value_mask); + sec_src[1] = vandq_u16(sec_src[1], cdef_large_value_mask); + sec_src[2] = vandq_u16(sec_src[2], cdef_large_value_mask); + sec_src[3] = vandq_u16(sec_src[3], cdef_large_value_mask); + sec_src[4] = vandq_u16(sec_src[4], cdef_large_value_mask); + sec_src[5] = vandq_u16(sec_src[5], cdef_large_value_mask); + sec_src[6] = vandq_u16(sec_src[6], cdef_large_value_mask); + sec_src[7] = vandq_u16(sec_src[7], cdef_large_value_mask); + + uint16x8_t sec_max0 = vmaxq_u16(sec_src[0], sec_src[1]); + uint16x8_t sec_max1 = vmaxq_u16(sec_src[2], sec_src[3]); + uint16x8_t sec_max2 = vmaxq_u16(sec_src[4], sec_src[5]); + uint16x8_t sec_max3 = vmaxq_u16(sec_src[6], sec_src[7]); + sec_max0 = vmaxq_u16(sec_max0, sec_max1); + sec_max2 = vmaxq_u16(sec_max2, sec_max3); + sec_max0 = vmaxq_u16(sec_max0, sec_max2); + max = vmaxq_u16(max, sec_max0); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + int16x8_t res = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + res = vminq_s16(vmaxq_s16(res, vreinterpretq_s16_u16(min)), + vreinterpretq_s16_u16(max)); + + vst1q_u16(dst16, vreinterpretq_u16_s16(res)); + + in += CDEF_BSTRIDE; + dst16 += dstride; + } while (--h != 0); + } else { + uint16_t *dst16 = (uint16_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = load_unaligned_u16_4x2(in, CDEF_BSTRIDE); + max = min = s; + + uint16x8_t pri_src[4]; + + // Primary near taps + pri_src[0] = load_unaligned_u16_4x2(in + po1, CDEF_BSTRIDE); + pri_src[1] = load_unaligned_u16_4x2(in - po1, CDEF_BSTRIDE); + + // Primary far taps + pri_src[2] = load_unaligned_u16_4x2(in + po2, CDEF_BSTRIDE); + pri_src[3] = load_unaligned_u16_4x2(in - po2, CDEF_BSTRIDE); + + primary_filter(s, pri_src, pri_taps, pri_strength, pri_damping, &sum); + + uint16x8_t pri_min1 = vminq_u16(pri_src[0], pri_src[1]); + uint16x8_t pri_min2 = vminq_u16(pri_src[2], pri_src[3]); + pri_min1 = vminq_u16(pri_min1, pri_min2); + min = vminq_u16(min, pri_min1); + + /* Convert CDEF_VERY_LARGE to 0 before calculating max. */ + pri_src[0] = vandq_u16(pri_src[0], cdef_large_value_mask); + pri_src[1] = vandq_u16(pri_src[1], cdef_large_value_mask); + pri_src[2] = vandq_u16(pri_src[2], cdef_large_value_mask); + pri_src[3] = vandq_u16(pri_src[3], cdef_large_value_mask); + uint16x8_t pri_max0 = vmaxq_u16(pri_src[0], pri_src[1]); + uint16x8_t pri_max1 = vmaxq_u16(pri_src[2], pri_src[3]); + pri_max0 = vmaxq_u16(pri_max0, pri_max1); + max = vmaxq_u16(max, pri_max0); + + uint16x8_t sec_src[8]; + + // Secondary near taps + sec_src[0] = load_unaligned_u16_4x2(in + s1o1, CDEF_BSTRIDE); + sec_src[1] = load_unaligned_u16_4x2(in - s1o1, CDEF_BSTRIDE); + sec_src[2] = load_unaligned_u16_4x2(in + s2o1, CDEF_BSTRIDE); + sec_src[3] = load_unaligned_u16_4x2(in - s2o1, CDEF_BSTRIDE); + + // Secondary far taps + sec_src[4] = load_unaligned_u16_4x2(in + s1o2, CDEF_BSTRIDE); + sec_src[5] = load_unaligned_u16_4x2(in - s1o2, CDEF_BSTRIDE); + sec_src[6] = load_unaligned_u16_4x2(in + s2o2, CDEF_BSTRIDE); + sec_src[7] = load_unaligned_u16_4x2(in - s2o2, CDEF_BSTRIDE); + + secondary_filter(s, sec_src, sec_taps, sec_strength, sec_damping, &sum); + + uint16x8_t sec_min0 = vminq_u16(sec_src[0], sec_src[1]); + uint16x8_t sec_min1 = vminq_u16(sec_src[2], sec_src[3]); + uint16x8_t sec_min2 = vminq_u16(sec_src[4], sec_src[5]); + uint16x8_t sec_min3 = vminq_u16(sec_src[6], sec_src[7]); + sec_min0 = vminq_u16(sec_min0, sec_min1); + sec_min2 = vminq_u16(sec_min2, sec_min3); + sec_min0 = vminq_u16(sec_min0, sec_min2); + min = vminq_u16(min, sec_min0); + + /* Convert CDEF_VERY_LARGE to 0 before calculating max. */ + sec_src[0] = vandq_u16(sec_src[0], cdef_large_value_mask); + sec_src[1] = vandq_u16(sec_src[1], cdef_large_value_mask); + sec_src[2] = vandq_u16(sec_src[2], cdef_large_value_mask); + sec_src[3] = vandq_u16(sec_src[3], cdef_large_value_mask); + sec_src[4] = vandq_u16(sec_src[4], cdef_large_value_mask); + sec_src[5] = vandq_u16(sec_src[5], cdef_large_value_mask); + sec_src[6] = vandq_u16(sec_src[6], cdef_large_value_mask); + sec_src[7] = vandq_u16(sec_src[7], cdef_large_value_mask); + + uint16x8_t sec_max0 = vmaxq_u16(sec_src[0], sec_src[1]); + uint16x8_t sec_max1 = vmaxq_u16(sec_src[2], sec_src[3]); + uint16x8_t sec_max2 = vmaxq_u16(sec_src[4], sec_src[5]); + uint16x8_t sec_max3 = vmaxq_u16(sec_src[6], sec_src[7]); + sec_max0 = vmaxq_u16(sec_max0, sec_max1); + sec_max2 = vmaxq_u16(sec_max2, sec_max3); + sec_max0 = vmaxq_u16(sec_max0, sec_max2); + max = vmaxq_u16(max, sec_max0); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + int16x8_t res = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + res = vminq_s16(vmaxq_s16(res, vreinterpretq_s16_u16(min)), + vreinterpretq_s16_u16(max)); + + store_u16x4_strided_x2(dst16, dstride, vreinterpretq_u16_s16(res)); + + in += 2 * CDEF_BSTRIDE; + dst16 += 2 * dstride; + h -= 2; + } while (h != 0); + } +} + +void cdef_filter_16_1_neon(void *dest, int dstride, const uint16_t *in, + int pri_strength, int sec_strength, int dir, + int pri_damping, int sec_damping, int coeff_shift, + int block_width, int block_height) { + (void)sec_strength; + (void)sec_damping; + + const int po1 = cdef_directions[dir][0]; + const int po2 = cdef_directions[dir][1]; + const int *pri_taps = cdef_pri_taps[(pri_strength >> coeff_shift) & 1]; + + if (pri_strength) { + pri_damping = AOMMAX(0, pri_damping - get_msb(pri_strength)); + } + + if (block_width == 8) { + uint16_t *dst16 = (uint16_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = vld1q_u16(in); + + uint16x8_t tap[4]; + + // Primary near taps + tap[0] = vld1q_u16(in + po1); + tap[1] = vld1q_u16(in - po1); + + // Primary far taps + tap[2] = vld1q_u16(in + po2); + tap[3] = vld1q_u16(in - po2); + + primary_filter(s, tap, pri_taps, pri_strength, pri_damping, &sum); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + const int16x8_t res = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + vst1q_u16(dst16, vreinterpretq_u16_s16(res)); + + in += CDEF_BSTRIDE; + dst16 += dstride; + } while (--h != 0); + } else { + uint16_t *dst16 = (uint16_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = load_unaligned_u16_4x2(in, CDEF_BSTRIDE); + + uint16x8_t pri_src[4]; + + // Primary near taps + pri_src[0] = load_unaligned_u16_4x2(in + po1, CDEF_BSTRIDE); + pri_src[1] = load_unaligned_u16_4x2(in - po1, CDEF_BSTRIDE); + + // Primary far taps + pri_src[2] = load_unaligned_u16_4x2(in + po2, CDEF_BSTRIDE); + pri_src[3] = load_unaligned_u16_4x2(in - po2, CDEF_BSTRIDE); + + primary_filter(s, pri_src, pri_taps, pri_strength, pri_damping, &sum); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + const int16x8_t res = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + store_u16x4_strided_x2(dst16, dstride, vreinterpretq_u16_s16(res)); + + in += 2 * CDEF_BSTRIDE; + dst16 += 2 * dstride; + h -= 2; + } while (h != 0); + } +} + +void cdef_filter_16_2_neon(void *dest, int dstride, const uint16_t *in, + int pri_strength, int sec_strength, int dir, + int pri_damping, int sec_damping, int coeff_shift, + int block_width, int block_height) { + (void)pri_strength; + (void)pri_damping; + (void)coeff_shift; + + const int s1o1 = cdef_directions[dir + 2][0]; + const int s1o2 = cdef_directions[dir + 2][1]; + const int s2o1 = cdef_directions[dir - 2][0]; + const int s2o2 = cdef_directions[dir - 2][1]; + const int *sec_taps = cdef_sec_taps; + + if (sec_strength) { + sec_damping = AOMMAX(0, sec_damping - get_msb(sec_strength)); + } + + if (block_width == 8) { + uint16_t *dst16 = (uint16_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = vld1q_u16(in); + + uint16x8_t sec_src[8]; + + // Secondary near taps + sec_src[0] = vld1q_u16(in + s1o1); + sec_src[1] = vld1q_u16(in - s1o1); + sec_src[2] = vld1q_u16(in + s2o1); + sec_src[3] = vld1q_u16(in - s2o1); + + // Secondary far taps + sec_src[4] = vld1q_u16(in + s1o2); + sec_src[5] = vld1q_u16(in - s1o2); + sec_src[6] = vld1q_u16(in + s2o2); + sec_src[7] = vld1q_u16(in - s2o2); + + secondary_filter(s, sec_src, sec_taps, sec_strength, sec_damping, &sum); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + const int16x8_t res = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + vst1q_u16(dst16, vreinterpretq_u16_s16(res)); + + in += CDEF_BSTRIDE; + dst16 += dstride; + } while (--h != 0); + } else { + uint16_t *dst16 = (uint16_t *)dest; + + int h = block_height; + do { + int16x8_t sum = vdupq_n_s16(0); + uint16x8_t s = load_unaligned_u16_4x2(in, CDEF_BSTRIDE); + + uint16x8_t sec_src[8]; + + // Secondary near taps + sec_src[0] = load_unaligned_u16_4x2(in + s1o1, CDEF_BSTRIDE); + sec_src[1] = load_unaligned_u16_4x2(in - s1o1, CDEF_BSTRIDE); + sec_src[2] = load_unaligned_u16_4x2(in + s2o1, CDEF_BSTRIDE); + sec_src[3] = load_unaligned_u16_4x2(in - s2o1, CDEF_BSTRIDE); + + // Secondary far taps + sec_src[4] = load_unaligned_u16_4x2(in + s1o2, CDEF_BSTRIDE); + sec_src[5] = load_unaligned_u16_4x2(in - s1o2, CDEF_BSTRIDE); + sec_src[6] = load_unaligned_u16_4x2(in + s2o2, CDEF_BSTRIDE); + sec_src[7] = load_unaligned_u16_4x2(in - s2o2, CDEF_BSTRIDE); + + secondary_filter(s, sec_src, sec_taps, sec_strength, sec_damping, &sum); + + // res = s + ((sum - (sum < 0) + 8) >> 4) + sum = + vaddq_s16(sum, vreinterpretq_s16_u16(vcltq_s16(sum, vdupq_n_s16(0)))); + const int16x8_t res = vrsraq_n_s16(vreinterpretq_s16_u16(s), sum, 4); + + store_u16x4_strided_x2(dst16, dstride, vreinterpretq_u16_s16(res)); + + in += 2 * CDEF_BSTRIDE; + dst16 += 2 * dstride; + h -= 2; + } while (h != 0); + } +} + +void cdef_filter_16_3_neon(void *dest, int dstride, const uint16_t *in, + int pri_strength, int sec_strength, int dir, + int pri_damping, int sec_damping, int coeff_shift, + int block_width, int block_height) { + (void)pri_strength; + (void)sec_strength; + (void)dir; + (void)pri_damping; + (void)sec_damping; + (void)coeff_shift; + (void)block_width; + if (block_width == 8) { + uint16_t *dst16 = (uint16_t *)dest; + + int h = block_height; + do { + const uint16x8_t s = vld1q_u16(in); + vst1q_u16(dst16, s); + + in += CDEF_BSTRIDE; + dst16 += dstride; + } while (--h != 0); + } else { + uint16_t *dst16 = (uint16_t *)dest; + + int h = block_height; + do { + const uint16x8_t s = load_unaligned_u16_4x2(in, CDEF_BSTRIDE); + store_u16x4_strided_x2(dst16, dstride, s); + + in += 2 * CDEF_BSTRIDE; + dst16 += 2 * dstride; + h -= 2; + } while (h != 0); + } +} diff --git a/third_party/aom/av1/common/arm/cfl_neon.c b/third_party/aom/av1/common/arm/cfl_neon.c new file mode 100644 index 0000000000..0871b4fe06 --- /dev/null +++ b/third_party/aom/av1/common/arm/cfl_neon.c @@ -0,0 +1,589 @@ +/* + * Copyright (c) 2017, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ +#include <arm_neon.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "av1/common/cfl.h" + +static INLINE void vldsubstq_s16(int16_t *dst, const uint16_t *src, int offset, + int16x8_t sub) { + vst1q_s16(dst + offset, + vsubq_s16(vreinterpretq_s16_u16(vld1q_u16(src + offset)), sub)); +} + +static INLINE uint16x8_t vldaddq_u16(const uint16_t *buf, size_t offset) { + return vaddq_u16(vld1q_u16(buf), vld1q_u16(buf + offset)); +} + +// Load half of a vector and duplicated in other half +static INLINE uint8x8_t vldh_dup_u8(const uint8_t *ptr) { + return vreinterpret_u8_u32(vld1_dup_u32((const uint32_t *)ptr)); +} + +// Store half of a vector. +static INLINE void vsth_u16(uint16_t *ptr, uint16x4_t val) { + vst1_lane_u32((uint32_t *)ptr, vreinterpret_u32_u16(val), 0); +} + +// Store half of a vector. +static INLINE void vsth_u8(uint8_t *ptr, uint8x8_t val) { + vst1_lane_u32((uint32_t *)ptr, vreinterpret_u32_u8(val), 0); +} + +static void cfl_luma_subsampling_420_lbd_neon(const uint8_t *input, + int input_stride, + uint16_t *pred_buf_q3, int width, + int height) { + const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE; + const int luma_stride = input_stride << 1; + do { + if (width == 4) { + const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input)); + const uint16x4_t sum = vpadal_u8(top, vldh_dup_u8(input + input_stride)); + vsth_u16(pred_buf_q3, vshl_n_u16(sum, 1)); + } else if (width == 8) { + const uint16x4_t top = vpaddl_u8(vld1_u8(input)); + const uint16x4_t sum = vpadal_u8(top, vld1_u8(input + input_stride)); + vst1_u16(pred_buf_q3, vshl_n_u16(sum, 1)); + } else if (width == 16) { + const uint16x8_t top = vpaddlq_u8(vld1q_u8(input)); + const uint16x8_t sum = vpadalq_u8(top, vld1q_u8(input + input_stride)); + vst1q_u16(pred_buf_q3, vshlq_n_u16(sum, 1)); + } else { + const uint8x8x4_t top = vld4_u8(input); + const uint8x8x4_t bot = vld4_u8(input + input_stride); + // equivalent to a vpaddlq_u8 (because vld4q interleaves) + const uint16x8_t top_0 = vaddl_u8(top.val[0], top.val[1]); + // equivalent to a vpaddlq_u8 (because vld4q interleaves) + const uint16x8_t bot_0 = vaddl_u8(bot.val[0], bot.val[1]); + // equivalent to a vpaddlq_u8 (because vld4q interleaves) + const uint16x8_t top_1 = vaddl_u8(top.val[2], top.val[3]); + // equivalent to a vpaddlq_u8 (because vld4q interleaves) + const uint16x8_t bot_1 = vaddl_u8(bot.val[2], bot.val[3]); + uint16x8x2_t sum; + sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1); + sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1); + vst2q_u16(pred_buf_q3, sum); + } + input += luma_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); +} + +static void cfl_luma_subsampling_422_lbd_neon(const uint8_t *input, + int input_stride, + uint16_t *pred_buf_q3, int width, + int height) { + const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE; + do { + if (width == 4) { + const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input)); + vsth_u16(pred_buf_q3, vshl_n_u16(top, 2)); + } else if (width == 8) { + const uint16x4_t top = vpaddl_u8(vld1_u8(input)); + vst1_u16(pred_buf_q3, vshl_n_u16(top, 2)); + } else if (width == 16) { + const uint16x8_t top = vpaddlq_u8(vld1q_u8(input)); + vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 2)); + } else { + const uint8x8x4_t top = vld4_u8(input); + uint16x8x2_t sum; + // vaddl_u8 is equivalent to a vpaddlq_u8 (because vld4q interleaves) + sum.val[0] = vshlq_n_u16(vaddl_u8(top.val[0], top.val[1]), 2); + sum.val[1] = vshlq_n_u16(vaddl_u8(top.val[2], top.val[3]), 2); + vst2q_u16(pred_buf_q3, sum); + } + input += input_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); +} + +static void cfl_luma_subsampling_444_lbd_neon(const uint8_t *input, + int input_stride, + uint16_t *pred_buf_q3, int width, + int height) { + const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE; + do { + if (width == 4) { + const uint16x8_t top = vshll_n_u8(vldh_dup_u8(input), 3); + vst1_u16(pred_buf_q3, vget_low_u16(top)); + } else if (width == 8) { + const uint16x8_t top = vshll_n_u8(vld1_u8(input), 3); + vst1q_u16(pred_buf_q3, top); + } else { + const uint8x16_t top = vld1q_u8(input); + vst1q_u16(pred_buf_q3, vshll_n_u8(vget_low_u8(top), 3)); + vst1q_u16(pred_buf_q3 + 8, vshll_n_u8(vget_high_u8(top), 3)); + if (width == 32) { + const uint8x16_t next_top = vld1q_u8(input + 16); + vst1q_u16(pred_buf_q3 + 16, vshll_n_u8(vget_low_u8(next_top), 3)); + vst1q_u16(pred_buf_q3 + 24, vshll_n_u8(vget_high_u8(next_top), 3)); + } + } + input += input_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); +} + +#if CONFIG_AV1_HIGHBITDEPTH +#if !AOM_ARCH_AARCH64 +uint16x8_t vpaddq_u16(uint16x8_t a, uint16x8_t b) { + return vcombine_u16(vpadd_u16(vget_low_u16(a), vget_high_u16(a)), + vpadd_u16(vget_low_u16(b), vget_high_u16(b))); +} +#endif + +static void cfl_luma_subsampling_420_hbd_neon(const uint16_t *input, + int input_stride, + uint16_t *pred_buf_q3, int width, + int height) { + const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE; + const int luma_stride = input_stride << 1; + do { + if (width == 4) { + const uint16x4_t top = vld1_u16(input); + const uint16x4_t bot = vld1_u16(input + input_stride); + const uint16x4_t sum = vadd_u16(top, bot); + const uint16x4_t hsum = vpadd_u16(sum, sum); + vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 1)); + } else if (width < 32) { + const uint16x8_t top = vld1q_u16(input); + const uint16x8_t bot = vld1q_u16(input + input_stride); + const uint16x8_t sum = vaddq_u16(top, bot); + if (width == 8) { + const uint16x4_t hsum = vget_low_u16(vpaddq_u16(sum, sum)); + vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 1)); + } else { + const uint16x8_t top_1 = vld1q_u16(input + 8); + const uint16x8_t bot_1 = vld1q_u16(input + 8 + input_stride); + const uint16x8_t sum_1 = vaddq_u16(top_1, bot_1); + const uint16x8_t hsum = vpaddq_u16(sum, sum_1); + vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 1)); + } + } else { + const uint16x8x4_t top = vld4q_u16(input); + const uint16x8x4_t bot = vld4q_u16(input + input_stride); + // equivalent to a vpaddq_u16 (because vld4q interleaves) + const uint16x8_t top_0 = vaddq_u16(top.val[0], top.val[1]); + // equivalent to a vpaddq_u16 (because vld4q interleaves) + const uint16x8_t bot_0 = vaddq_u16(bot.val[0], bot.val[1]); + // equivalent to a vpaddq_u16 (because vld4q interleaves) + const uint16x8_t top_1 = vaddq_u16(top.val[2], top.val[3]); + // equivalent to a vpaddq_u16 (because vld4q interleaves) + const uint16x8_t bot_1 = vaddq_u16(bot.val[2], bot.val[3]); + uint16x8x2_t sum; + sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1); + sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1); + vst2q_u16(pred_buf_q3, sum); + } + input += luma_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); +} + +static void cfl_luma_subsampling_422_hbd_neon(const uint16_t *input, + int input_stride, + uint16_t *pred_buf_q3, int width, + int height) { + const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE; + do { + if (width == 4) { + const uint16x4_t top = vld1_u16(input); + const uint16x4_t hsum = vpadd_u16(top, top); + vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 2)); + } else if (width == 8) { + const uint16x4x2_t top = vld2_u16(input); + // equivalent to a vpadd_u16 (because vld2 interleaves) + const uint16x4_t hsum = vadd_u16(top.val[0], top.val[1]); + vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 2)); + } else if (width == 16) { + const uint16x8x2_t top = vld2q_u16(input); + // equivalent to a vpaddq_u16 (because vld2q interleaves) + const uint16x8_t hsum = vaddq_u16(top.val[0], top.val[1]); + vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 2)); + } else { + const uint16x8x4_t top = vld4q_u16(input); + // equivalent to a vpaddq_u16 (because vld4q interleaves) + const uint16x8_t hsum_0 = vaddq_u16(top.val[0], top.val[1]); + // equivalent to a vpaddq_u16 (because vld4q interleaves) + const uint16x8_t hsum_1 = vaddq_u16(top.val[2], top.val[3]); + uint16x8x2_t result = { { vshlq_n_u16(hsum_0, 2), + vshlq_n_u16(hsum_1, 2) } }; + vst2q_u16(pred_buf_q3, result); + } + input += input_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); +} + +static void cfl_luma_subsampling_444_hbd_neon(const uint16_t *input, + int input_stride, + uint16_t *pred_buf_q3, int width, + int height) { + const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE; + do { + if (width == 4) { + const uint16x4_t top = vld1_u16(input); + vst1_u16(pred_buf_q3, vshl_n_u16(top, 3)); + } else if (width == 8) { + const uint16x8_t top = vld1q_u16(input); + vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 3)); + } else if (width == 16) { + uint16x8x2_t top = vld2q_u16(input); + top.val[0] = vshlq_n_u16(top.val[0], 3); + top.val[1] = vshlq_n_u16(top.val[1], 3); + vst2q_u16(pred_buf_q3, top); + } else { + uint16x8x4_t top = vld4q_u16(input); + top.val[0] = vshlq_n_u16(top.val[0], 3); + top.val[1] = vshlq_n_u16(top.val[1], 3); + top.val[2] = vshlq_n_u16(top.val[2], 3); + top.val[3] = vshlq_n_u16(top.val[3], 3); + vst4q_u16(pred_buf_q3, top); + } + input += input_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); +} +#endif // CONFIG_AV1_HIGHBITDEPTH + +CFL_GET_SUBSAMPLE_FUNCTION(neon) + +static INLINE void subtract_average_neon(const uint16_t *src, int16_t *dst, + int width, int height, + int round_offset, + const int num_pel_log2) { + const uint16_t *const end = src + height * CFL_BUF_LINE; + + // Round offset is not needed, because NEON will handle the rounding. + (void)round_offset; + + // To optimize the use of the CPU pipeline, we process 4 rows per iteration + const int step = 4 * CFL_BUF_LINE; + + // At this stage, the prediction buffer contains scaled reconstructed luma + // pixels, which are positive integer and only require 15 bits. By using + // unsigned integer for the sum, we can do one addition operation inside 16 + // bits (8 lanes) before having to convert to 32 bits (4 lanes). + const uint16_t *sum_buf = src; + uint32x4_t sum_32x4 = vdupq_n_u32(0); + do { + // For all widths, we load, add and combine the data so it fits in 4 lanes. + if (width == 4) { + const uint16x4_t a0 = + vadd_u16(vld1_u16(sum_buf), vld1_u16(sum_buf + CFL_BUF_LINE)); + const uint16x4_t a1 = vadd_u16(vld1_u16(sum_buf + 2 * CFL_BUF_LINE), + vld1_u16(sum_buf + 3 * CFL_BUF_LINE)); + sum_32x4 = vaddq_u32(sum_32x4, vaddl_u16(a0, a1)); + } else if (width == 8) { + const uint16x8_t a0 = vldaddq_u16(sum_buf, CFL_BUF_LINE); + const uint16x8_t a1 = + vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, CFL_BUF_LINE); + sum_32x4 = vpadalq_u16(sum_32x4, a0); + sum_32x4 = vpadalq_u16(sum_32x4, a1); + } else { + const uint16x8_t row0 = vldaddq_u16(sum_buf, 8); + const uint16x8_t row1 = vldaddq_u16(sum_buf + CFL_BUF_LINE, 8); + const uint16x8_t row2 = vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, 8); + const uint16x8_t row3 = vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE, 8); + sum_32x4 = vpadalq_u16(sum_32x4, row0); + sum_32x4 = vpadalq_u16(sum_32x4, row1); + sum_32x4 = vpadalq_u16(sum_32x4, row2); + sum_32x4 = vpadalq_u16(sum_32x4, row3); + + if (width == 32) { + const uint16x8_t row0_1 = vldaddq_u16(sum_buf + 16, 8); + const uint16x8_t row1_1 = vldaddq_u16(sum_buf + CFL_BUF_LINE + 16, 8); + const uint16x8_t row2_1 = + vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE + 16, 8); + const uint16x8_t row3_1 = + vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE + 16, 8); + + sum_32x4 = vpadalq_u16(sum_32x4, row0_1); + sum_32x4 = vpadalq_u16(sum_32x4, row1_1); + sum_32x4 = vpadalq_u16(sum_32x4, row2_1); + sum_32x4 = vpadalq_u16(sum_32x4, row3_1); + } + } + sum_buf += step; + } while (sum_buf < end); + + // Permute and add in such a way that each lane contains the block sum. + // [A+C+B+D, B+D+A+C, C+A+D+B, D+B+C+A] +#if AOM_ARCH_AARCH64 + sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4); + sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4); +#else + uint32x4_t flip = + vcombine_u32(vget_high_u32(sum_32x4), vget_low_u32(sum_32x4)); + sum_32x4 = vaddq_u32(sum_32x4, flip); + sum_32x4 = vaddq_u32(sum_32x4, vrev64q_u32(sum_32x4)); +#endif + + // Computing the average could be done using scalars, but getting off the NEON + // engine introduces latency, so we use vqrshrn. + int16x4_t avg_16x4; + // Constant propagation makes for some ugly code. + switch (num_pel_log2) { + case 4: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 4)); break; + case 5: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 5)); break; + case 6: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 6)); break; + case 7: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 7)); break; + case 8: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 8)); break; + case 9: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 9)); break; + case 10: + avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 10)); + break; + default: assert(0); + } + + if (width == 4) { + do { + vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4)); + src += CFL_BUF_LINE; + dst += CFL_BUF_LINE; + } while (src < end); + } else { + const int16x8_t avg_16x8 = vcombine_s16(avg_16x4, avg_16x4); + do { + vldsubstq_s16(dst, src, 0, avg_16x8); + vldsubstq_s16(dst, src, CFL_BUF_LINE, avg_16x8); + vldsubstq_s16(dst, src, 2 * CFL_BUF_LINE, avg_16x8); + vldsubstq_s16(dst, src, 3 * CFL_BUF_LINE, avg_16x8); + + if (width > 8) { + vldsubstq_s16(dst, src, 8, avg_16x8); + vldsubstq_s16(dst, src, 8 + CFL_BUF_LINE, avg_16x8); + vldsubstq_s16(dst, src, 8 + 2 * CFL_BUF_LINE, avg_16x8); + vldsubstq_s16(dst, src, 8 + 3 * CFL_BUF_LINE, avg_16x8); + } + if (width == 32) { + vldsubstq_s16(dst, src, 16, avg_16x8); + vldsubstq_s16(dst, src, 16 + CFL_BUF_LINE, avg_16x8); + vldsubstq_s16(dst, src, 16 + 2 * CFL_BUF_LINE, avg_16x8); + vldsubstq_s16(dst, src, 16 + 3 * CFL_BUF_LINE, avg_16x8); + vldsubstq_s16(dst, src, 24, avg_16x8); + vldsubstq_s16(dst, src, 24 + CFL_BUF_LINE, avg_16x8); + vldsubstq_s16(dst, src, 24 + 2 * CFL_BUF_LINE, avg_16x8); + vldsubstq_s16(dst, src, 24 + 3 * CFL_BUF_LINE, avg_16x8); + } + src += step; + dst += step; + } while (src < end); + } +} + +CFL_SUB_AVG_FN(neon) + +// Saturating negate 16-bit integers in a when the corresponding signed 16-bit +// integer in b is negative. +// Notes: +// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in +// practice, as scaled_luma is the multiplication of two absolute values. +// * In the Intel equivalent, elements in a are zeroed out when the +// corresponding elements in b are zero. Because vsign is used twice in a +// row, with b in the first call becoming a in the second call, there's no +// impact from not zeroing out. +static int16x4_t vsign_s16(int16x4_t a, int16x4_t b) { + const int16x4_t mask = vshr_n_s16(b, 15); + return veor_s16(vadd_s16(a, mask), mask); +} + +// Saturating negate 16-bit integers in a when the corresponding signed 16-bit +// integer in b is negative. +// Notes: +// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in +// practice, as scaled_luma is the multiplication of two absolute values. +// * In the Intel equivalent, elements in a are zeroed out when the +// corresponding elements in b are zero. Because vsignq is used twice in a +// row, with b in the first call becoming a in the second call, there's no +// impact from not zeroing out. +static int16x8_t vsignq_s16(int16x8_t a, int16x8_t b) { + const int16x8_t mask = vshrq_n_s16(b, 15); + return veorq_s16(vaddq_s16(a, mask), mask); +} + +static INLINE int16x4_t predict_w4(const int16_t *pred_buf_q3, + int16x4_t alpha_sign, int abs_alpha_q12, + int16x4_t dc) { + const int16x4_t ac_q3 = vld1_s16(pred_buf_q3); + const int16x4_t ac_sign = veor_s16(alpha_sign, ac_q3); + int16x4_t scaled_luma = vqrdmulh_n_s16(vabs_s16(ac_q3), abs_alpha_q12); + return vadd_s16(vsign_s16(scaled_luma, ac_sign), dc); +} + +static INLINE int16x8_t predict_w8(const int16_t *pred_buf_q3, + int16x8_t alpha_sign, int abs_alpha_q12, + int16x8_t dc) { + const int16x8_t ac_q3 = vld1q_s16(pred_buf_q3); + const int16x8_t ac_sign = veorq_s16(alpha_sign, ac_q3); + int16x8_t scaled_luma = vqrdmulhq_n_s16(vabsq_s16(ac_q3), abs_alpha_q12); + return vaddq_s16(vsignq_s16(scaled_luma, ac_sign), dc); +} + +static INLINE int16x8x2_t predict_w16(const int16_t *pred_buf_q3, + int16x8_t alpha_sign, int abs_alpha_q12, + int16x8_t dc) { + // vld2q_s16 interleaves, which is not useful for prediction. vst1q_s16_x2 + // does not interleave, but is not currently available in the compilier used + // by the AOM build system. + const int16x8x2_t ac_q3 = vld2q_s16(pred_buf_q3); + const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]); + const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]); + const int16x8_t scaled_luma_0 = + vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12); + const int16x8_t scaled_luma_1 = + vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12); + int16x8x2_t result; + result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc); + result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc); + return result; +} + +static INLINE int16x8x4_t predict_w32(const int16_t *pred_buf_q3, + int16x8_t alpha_sign, int abs_alpha_q12, + int16x8_t dc) { + // vld4q_s16 interleaves, which is not useful for prediction. vst1q_s16_x4 + // does not interleave, but is not currently available in the compilier used + // by the AOM build system. + const int16x8x4_t ac_q3 = vld4q_s16(pred_buf_q3); + const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]); + const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]); + const int16x8_t ac_sign_2 = veorq_s16(alpha_sign, ac_q3.val[2]); + const int16x8_t ac_sign_3 = veorq_s16(alpha_sign, ac_q3.val[3]); + const int16x8_t scaled_luma_0 = + vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12); + const int16x8_t scaled_luma_1 = + vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12); + const int16x8_t scaled_luma_2 = + vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[2]), abs_alpha_q12); + const int16x8_t scaled_luma_3 = + vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[3]), abs_alpha_q12); + int16x8x4_t result; + result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc); + result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc); + result.val[2] = vaddq_s16(vsignq_s16(scaled_luma_2, ac_sign_2), dc); + result.val[3] = vaddq_s16(vsignq_s16(scaled_luma_3, ac_sign_3), dc); + return result; +} + +static INLINE void cfl_predict_lbd_neon(const int16_t *pred_buf_q3, + uint8_t *dst, int dst_stride, + int alpha_q3, int width, int height) { + const int16_t abs_alpha_q12 = abs(alpha_q3) << 9; + const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE; + if (width == 4) { + const int16x4_t alpha_sign = vdup_n_s16(alpha_q3); + const int16x4_t dc = vdup_n_s16(*dst); + do { + const int16x4_t pred = + predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc); + vsth_u8(dst, vqmovun_s16(vcombine_s16(pred, pred))); + dst += dst_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); + } else { + const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3); + const int16x8_t dc = vdupq_n_s16(*dst); + do { + if (width == 8) { + vst1_u8(dst, vqmovun_s16(predict_w8(pred_buf_q3, alpha_sign, + abs_alpha_q12, dc))); + } else if (width == 16) { + const int16x8x2_t pred = + predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc); + const uint8x8x2_t predun = { { vqmovun_s16(pred.val[0]), + vqmovun_s16(pred.val[1]) } }; + vst2_u8(dst, predun); + } else { + const int16x8x4_t pred = + predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc); + const uint8x8x4_t predun = { + { vqmovun_s16(pred.val[0]), vqmovun_s16(pred.val[1]), + vqmovun_s16(pred.val[2]), vqmovun_s16(pred.val[3]) } + }; + vst4_u8(dst, predun); + } + dst += dst_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); + } +} + +CFL_PREDICT_FN(neon, lbd) + +#if CONFIG_AV1_HIGHBITDEPTH +static INLINE uint16x4_t clamp_s16(int16x4_t a, int16x4_t max) { + return vreinterpret_u16_s16(vmax_s16(vmin_s16(a, max), vdup_n_s16(0))); +} + +static INLINE uint16x8_t clampq_s16(int16x8_t a, int16x8_t max) { + return vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(a, max), vdupq_n_s16(0))); +} + +static INLINE uint16x8x2_t clamp2q_s16(int16x8x2_t a, int16x8_t max) { + uint16x8x2_t result; + result.val[0] = vreinterpretq_u16_s16( + vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0))); + result.val[1] = vreinterpretq_u16_s16( + vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0))); + return result; +} + +static INLINE uint16x8x4_t clamp4q_s16(int16x8x4_t a, int16x8_t max) { + uint16x8x4_t result; + result.val[0] = vreinterpretq_u16_s16( + vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0))); + result.val[1] = vreinterpretq_u16_s16( + vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0))); + result.val[2] = vreinterpretq_u16_s16( + vmaxq_s16(vminq_s16(a.val[2], max), vdupq_n_s16(0))); + result.val[3] = vreinterpretq_u16_s16( + vmaxq_s16(vminq_s16(a.val[3], max), vdupq_n_s16(0))); + return result; +} + +static INLINE void cfl_predict_hbd_neon(const int16_t *pred_buf_q3, + uint16_t *dst, int dst_stride, + int alpha_q3, int bd, int width, + int height) { + const int max = (1 << bd) - 1; + const int16_t abs_alpha_q12 = abs(alpha_q3) << 9; + const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE; + if (width == 4) { + const int16x4_t alpha_sign = vdup_n_s16(alpha_q3); + const int16x4_t dc = vdup_n_s16(*dst); + const int16x4_t max_16x4 = vdup_n_s16(max); + do { + const int16x4_t scaled_luma = + predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc); + vst1_u16(dst, clamp_s16(scaled_luma, max_16x4)); + dst += dst_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); + } else { + const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3); + const int16x8_t dc = vdupq_n_s16(*dst); + const int16x8_t max_16x8 = vdupq_n_s16(max); + do { + if (width == 8) { + const int16x8_t pred = + predict_w8(pred_buf_q3, alpha_sign, abs_alpha_q12, dc); + vst1q_u16(dst, clampq_s16(pred, max_16x8)); + } else if (width == 16) { + const int16x8x2_t pred = + predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc); + vst2q_u16(dst, clamp2q_s16(pred, max_16x8)); + } else { + const int16x8x4_t pred = + predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc); + vst4q_u16(dst, clamp4q_s16(pred, max_16x8)); + } + dst += dst_stride; + } while ((pred_buf_q3 += CFL_BUF_LINE) < end); + } +} + +CFL_PREDICT_FN(neon, hbd) +#endif // CONFIG_AV1_HIGHBITDEPTH diff --git a/third_party/aom/av1/common/arm/compound_convolve_neon.c b/third_party/aom/av1/common/arm/compound_convolve_neon.c new file mode 100644 index 0000000000..6a596234dc --- /dev/null +++ b/third_party/aom/av1/common/arm/compound_convolve_neon.c @@ -0,0 +1,2719 @@ +/* + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "av1/common/arm/compound_convolve_neon.h" +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +static INLINE int16x4_t convolve4_4_2d_h(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t x_filter, + const int16x4_t horiz_const) { + int16x4_t sum = horiz_const; + sum = vmla_lane_s16(sum, s0, x_filter, 0); + sum = vmla_lane_s16(sum, s1, x_filter, 1); + sum = vmla_lane_s16(sum, s2, x_filter, 2); + sum = vmla_lane_s16(sum, s3, x_filter, 3); + + // We halved the convolution filter values so -1 from the right shift. + return vshr_n_s16(sum, ROUND0_BITS - 1); +} + +static INLINE int16x8_t convolve8_8_2d_h(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t x_filter, + const int16x8_t horiz_const) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter); + + int16x8_t sum = horiz_const; + sum = vmlaq_lane_s16(sum, s0, x_filter_0_3, 0); + sum = vmlaq_lane_s16(sum, s1, x_filter_0_3, 1); + sum = vmlaq_lane_s16(sum, s2, x_filter_0_3, 2); + sum = vmlaq_lane_s16(sum, s3, x_filter_0_3, 3); + sum = vmlaq_lane_s16(sum, s4, x_filter_4_7, 0); + sum = vmlaq_lane_s16(sum, s5, x_filter_4_7, 1); + sum = vmlaq_lane_s16(sum, s6, x_filter_4_7, 2); + sum = vmlaq_lane_s16(sum, s7, x_filter_4_7, 3); + + // We halved the convolution filter values so -1 from the right shift. + return vshrq_n_s16(sum, ROUND0_BITS - 1); +} + +static INLINE void dist_wtd_convolve_2d_horiz_neon( + const uint8_t *src, int src_stride, int16_t *im_block, const int im_stride, + const int16_t *x_filter_ptr, const int im_h, int w) { + const int bd = 8; + + const uint8_t *src_ptr = src; + int16_t *dst_ptr = im_block; + int dst_stride = im_stride; + int height = im_h; + + if (w == 4) { + // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // (The extra -1 is needed because we halved the filter values.) + const int16x4_t horiz_const = vdup_n_s16((1 << (bd + FILTER_BITS - 2)) + + (1 << ((ROUND0_BITS - 1) - 1))); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x4_t x_filter = vshr_n_s16(vld1_s16(x_filter_ptr + 2), 1); + + src_ptr += 2; + + do { + uint8x8_t t0 = vld1_u8(src_ptr); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + + __builtin_prefetch(dst_ptr); + + int16x4_t s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4 + int16x4_t s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5 + int16x4_t s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6 + + int16x4_t d0 = convolve4_4_2d_h(s0, s1, s2, s3, x_filter, horiz_const); + + vst1_s16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } else { + // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // (The extra -1 is needed because we halved the filter values.) + const int16x8_t horiz_const = vdupq_n_s16((1 << (bd + FILTER_BITS - 2)) + + (1 << ((ROUND0_BITS - 1) - 1))); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x8_t x_filter = vshrq_n_s16(vld1q_s16(x_filter_ptr), 1); + +#if AOM_ARCH_AARCH64 + do { + const uint8_t *s; + int16_t *d = dst_ptr; + int width = w; + + __builtin_prefetch(src_ptr + 0 * src_stride); + __builtin_prefetch(src_ptr + 1 * src_stride); + __builtin_prefetch(src_ptr + 2 * src_stride); + __builtin_prefetch(src_ptr + 3 * src_stride); + __builtin_prefetch(src_ptr + 4 * src_stride); + __builtin_prefetch(src_ptr + 5 * src_stride); + __builtin_prefetch(src_ptr + 6 * src_stride); + __builtin_prefetch(src_ptr + 7 * src_stride); + + uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7; + load_u8_8x8(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + s = src_ptr + 7; + + __builtin_prefetch(dst_ptr + 0 * dst_stride); + __builtin_prefetch(dst_ptr + 1 * dst_stride); + __builtin_prefetch(dst_ptr + 2 * dst_stride); + __builtin_prefetch(dst_ptr + 3 * dst_stride); + __builtin_prefetch(dst_ptr + 4 * dst_stride); + __builtin_prefetch(dst_ptr + 5 * dst_stride); + __builtin_prefetch(dst_ptr + 6 * dst_stride); + __builtin_prefetch(dst_ptr + 7 * dst_stride); + + do { + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + int16x8_t d0 = convolve8_8_2d_h(s0, s1, s2, s3, s4, s5, s6, s7, + x_filter, horiz_const); + int16x8_t d1 = convolve8_8_2d_h(s1, s2, s3, s4, s5, s6, s7, s8, + x_filter, horiz_const); + int16x8_t d2 = convolve8_8_2d_h(s2, s3, s4, s5, s6, s7, s8, s9, + x_filter, horiz_const); + int16x8_t d3 = convolve8_8_2d_h(s3, s4, s5, s6, s7, s8, s9, s10, + x_filter, horiz_const); + int16x8_t d4 = convolve8_8_2d_h(s4, s5, s6, s7, s8, s9, s10, s11, + x_filter, horiz_const); + int16x8_t d5 = convolve8_8_2d_h(s5, s6, s7, s8, s9, s10, s11, s12, + x_filter, horiz_const); + int16x8_t d6 = convolve8_8_2d_h(s6, s7, s8, s9, s10, s11, s12, s13, + x_filter, horiz_const); + int16x8_t d7 = convolve8_8_2d_h(s7, s8, s9, s10, s11, s12, s13, s14, + x_filter, horiz_const); + + transpose_elems_inplace_s16_8x8(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7); + store_s16_8x8(d, dst_stride, d0, d1, d2, d3, d4, d5, d6, d7); + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s5 = s13; + s6 = s14; + s += 8; + d += 8; + width -= 8; + } while (width > 0); + src_ptr += 8 * src_stride; + dst_ptr += 8 * dst_stride; + height -= 8; + } while (height > 8); +#endif // AOM_ARCH_AARCH64 + + do { + const uint8_t *s; + int16_t *d = dst_ptr; + int width = w; + + uint8x8_t t0 = vld1_u8(src_ptr); + int16x8_t s0 = + vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7 + + s = src_ptr + 8; + __builtin_prefetch(dst_ptr); + + do { + t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15 + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t0)); + + int16x8_t s1 = vextq_s16(s0, s8, 1); // a1 a2 a3 a4 a5 a6 a7 a8 + int16x8_t s2 = vextq_s16(s0, s8, 2); // a2 a3 a4 a5 a6 a7 a8 a9 + int16x8_t s3 = vextq_s16(s0, s8, 3); // a3 a4 a5 a6 a7 a8 a9 a10 + int16x8_t s4 = vextq_s16(s0, s8, 4); // a4 a5 a6 a7 a8 a9 a10 a11 + int16x8_t s5 = vextq_s16(s0, s8, 5); // a5 a6 a7 a8 a9 a10 a11 a12 + int16x8_t s6 = vextq_s16(s0, s8, 6); // a6 a7 a8 a9 a10 a11 a12 a13 + int16x8_t s7 = vextq_s16(s0, s8, 7); // a7 a8 a9 a10 a11 a12 a13 a14 + + int16x8_t d0 = convolve8_8_2d_h(s0, s1, s2, s3, s4, s5, s6, s7, + x_filter, horiz_const); + vst1q_s16(d, d0); + + s0 = s8; + s += 8; + d += 8; + width -= 8; + } while (width > 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +void av1_dist_wtd_convolve_2d_neon(const uint8_t *src, int src_stride, + uint8_t *dst8, int dst8_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, + const int subpel_x_qn, const int subpel_y_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + DECLARE_ALIGNED(16, int16_t, + im_block[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE]); + + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 6 ? 6 : y_filter_taps; + + const int im_h = h + clamped_y_taps - 1; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + dist_wtd_convolve_2d_horiz_neon(src_ptr, src_stride, im_block, im_stride, + x_filter_ptr, im_h, w); + + if (clamped_y_taps == 6) { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_2d_vert_6tap_dist_wtd_avg_neon( + im_block, im_stride, dst8, dst8_stride, conv_params, y_filter, h, + w); + } else { + dist_wtd_convolve_2d_vert_6tap_avg_neon(im_block, im_stride, dst8, + dst8_stride, conv_params, + y_filter, h, w); + } + } else { + dist_wtd_convolve_2d_vert_6tap_neon(im_block, im_stride, conv_params, + y_filter, h, w); + } + } else { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_2d_vert_8tap_dist_wtd_avg_neon( + im_block, im_stride, dst8, dst8_stride, conv_params, y_filter, h, + w); + } else { + dist_wtd_convolve_2d_vert_8tap_avg_neon(im_block, im_stride, dst8, + dst8_stride, conv_params, + y_filter, h, w); + } + } else { + dist_wtd_convolve_2d_vert_8tap_neon(im_block, im_stride, conv_params, + y_filter, h, w); + } + } +} + +static INLINE void dist_wtd_convolve_2d_copy_dist_wtd_avg_neon( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const uint16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const uint16x8_t round_offset_vec = vdupq_n_u16(round_offset); + const uint8x8_t shift_by_bits = vdup_n_u8(1 << (FILTER_BITS - ROUND0_BITS)); + + const uint16_t fwd_offset = conv_params->fwd_offset; + const uint16_t bck_offset = conv_params->bck_offset; + + CONV_BUF_TYPE *dst = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + do { + uint8x8_t s0, s1, s2, s3; + load_u8_8x4(src, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = + vget_low_u16(vmlal_u8(round_offset_vec, s0, shift_by_bits)); + uint16x4_t d1 = + vget_low_u16(vmlal_u8(round_offset_vec, s1, shift_by_bits)); + uint16x4_t d2 = + vget_low_u16(vmlal_u8(round_offset_vec, s2, shift_by_bits)); + uint16x4_t d3 = + vget_low_u16(vmlal_u8(round_offset_vec, s3, shift_by_bits)); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01, d23; + compute_dist_wtd_avg_4x4( + dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, bck_offset, + vreinterpretq_s16_u16(round_offset_vec), &d01, &d23); + + store_u8x4_strided_x2(dst8 + 0 * dst8_stride, dst8_stride, d01); + store_u8x4_strided_x2(dst8 + 2 * dst8_stride, dst8_stride, d23); + + src += 4 * src_stride; + dst += 4 * dst_stride; + dst8 += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } else { + do { + const uint8_t *s = src; + CONV_BUF_TYPE *d = dst; + uint8_t *d_u8 = dst8; + int width = w; + + do { + uint8x8_t s0, s1, s2, s3; + load_u8_8x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x8_t d0 = vmlal_u8(round_offset_vec, s0, shift_by_bits); + uint16x8_t d1 = vmlal_u8(round_offset_vec, s1, shift_by_bits); + uint16x8_t d2 = vmlal_u8(round_offset_vec, s2, shift_by_bits); + uint16x8_t d3 = vmlal_u8(round_offset_vec, s3, shift_by_bits); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, + vreinterpretq_s16_u16(round_offset_vec), + &d0_u8, &d1_u8, &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + dst8 += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE void dist_wtd_convolve_2d_copy_avg_neon( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const uint16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const uint16x8_t round_offset_vec = vdupq_n_u16(round_offset); + const uint8x8_t shift_by_bits = vdup_n_u8(1 << (FILTER_BITS - ROUND0_BITS)); + + CONV_BUF_TYPE *dst = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + do { + uint8x8_t s0, s1, s2, s3; + load_u8_8x4(src, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = + vget_low_u16(vmlal_u8(round_offset_vec, s0, shift_by_bits)); + uint16x4_t d1 = + vget_low_u16(vmlal_u8(round_offset_vec, s1, shift_by_bits)); + uint16x4_t d2 = + vget_low_u16(vmlal_u8(round_offset_vec, s2, shift_by_bits)); + uint16x4_t d3 = + vget_low_u16(vmlal_u8(round_offset_vec, s3, shift_by_bits)); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01, d23; + compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + vreinterpretq_s16_u16(round_offset_vec), &d01, + &d23); + + store_u8x4_strided_x2(dst8 + 0 * dst8_stride, dst8_stride, d01); + store_u8x4_strided_x2(dst8 + 2 * dst8_stride, dst8_stride, d23); + + src += 4 * src_stride; + dst += 4 * dst_stride; + dst8 += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } else { + do { + const uint8_t *s = src; + CONV_BUF_TYPE *d = dst; + uint8_t *d_u8 = dst8; + int width = w; + + do { + uint8x8_t s0, s1, s2, s3; + load_u8_8x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x8_t d0 = vmlal_u8(round_offset_vec, s0, shift_by_bits); + uint16x8_t d1 = vmlal_u8(round_offset_vec, s1, shift_by_bits); + uint16x8_t d2 = vmlal_u8(round_offset_vec, s2, shift_by_bits); + uint16x8_t d3 = vmlal_u8(round_offset_vec, s3, shift_by_bits); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + vreinterpretq_s16_u16(round_offset_vec), &d0_u8, + &d1_u8, &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + dst8 += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE void dist_wtd_convolve_2d_copy_neon(const uint8_t *src, + int src_stride, int w, int h, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const uint16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const uint16x8_t round_offset_vec = vdupq_n_u16(round_offset); + const uint8x8_t shift_by_bits = vdup_n_u8(1 << (FILTER_BITS - ROUND0_BITS)); + + CONV_BUF_TYPE *dst = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + do { + uint8x8_t s0, s1, s2, s3; + load_u8_8x4(src, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = + vget_low_u16(vmlal_u8(round_offset_vec, s0, shift_by_bits)); + uint16x4_t d1 = + vget_low_u16(vmlal_u8(round_offset_vec, s1, shift_by_bits)); + uint16x4_t d2 = + vget_low_u16(vmlal_u8(round_offset_vec, s2, shift_by_bits)); + uint16x4_t d3 = + vget_low_u16(vmlal_u8(round_offset_vec, s3, shift_by_bits)); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + do { + const uint8_t *s = src; + CONV_BUF_TYPE *d = dst; + int width = w; + + do { + uint8x8_t s0, s1, s2, s3; + load_u8_8x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x8_t d0 = vmlal_u8(round_offset_vec, s0, shift_by_bits); + uint16x8_t d1 = vmlal_u8(round_offset_vec, s1, shift_by_bits); + uint16x8_t d2 = vmlal_u8(round_offset_vec, s2, shift_by_bits); + uint16x8_t d3 = vmlal_u8(round_offset_vec, s3, shift_by_bits); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +void av1_dist_wtd_convolve_2d_copy_neon(const uint8_t *src, int src_stride, + uint8_t *dst8, int dst8_stride, int w, + int h, ConvolveParams *conv_params) { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_2d_copy_dist_wtd_avg_neon( + src, src_stride, dst8, dst8_stride, w, h, conv_params); + } else { + dist_wtd_convolve_2d_copy_avg_neon(src, src_stride, dst8, dst8_stride, w, + h, conv_params); + } + } else { + dist_wtd_convolve_2d_copy_neon(src, src_stride, w, h, conv_params); + } +} + +static INLINE uint16x4_t convolve4_4_x(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t x_filter, + const int16x4_t round_offset) { + int16x4_t sum = vmul_lane_s16(s0, x_filter, 0); + sum = vmla_lane_s16(sum, s1, x_filter, 1); + sum = vmla_lane_s16(sum, s2, x_filter, 2); + sum = vmla_lane_s16(sum, s3, x_filter, 3); + + // We halved the convolution filter values so -1 from the right shift. + int16x4_t res = vrsra_n_s16(round_offset, sum, ROUND0_BITS - 1); + return vreinterpret_u16_s16(res); +} + +static INLINE uint16x8_t convolve8_8_x(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t x_filter, + const int16x8_t round_offset) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter); + + int16x8_t sum = vmulq_lane_s16(s0, x_filter_0_3, 0); + sum = vmlaq_lane_s16(sum, s1, x_filter_0_3, 1); + sum = vmlaq_lane_s16(sum, s2, x_filter_0_3, 2); + sum = vmlaq_lane_s16(sum, s3, x_filter_0_3, 3); + sum = vmlaq_lane_s16(sum, s4, x_filter_4_7, 0); + sum = vmlaq_lane_s16(sum, s5, x_filter_4_7, 1); + sum = vmlaq_lane_s16(sum, s6, x_filter_4_7, 2); + sum = vmlaq_lane_s16(sum, s7, x_filter_4_7, 3); + + // We halved the convolution filter values so -1 from the right shift. + int16x8_t res = vrsraq_n_s16(round_offset, sum, ROUND0_BITS - 1); + return vreinterpretq_u16_s16(res); +} + +static INLINE void dist_wtd_convolve_x_dist_wtd_avg_neon( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + const uint16_t fwd_offset = conv_params->fwd_offset; + const uint16_t bck_offset = conv_params->bck_offset; + + // Horizontal filter. + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - horiz_offset; + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + uint8_t *dst8_ptr = dst8; + int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x4_t x_filter = vshr_n_s16(vld1_s16(x_filter_ptr + 2), 1); + + src_ptr += 2; + + do { + uint8x8_t t0 = vld1_u8(src_ptr); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + + __builtin_prefetch(dst_ptr); + __builtin_prefetch(dst8_ptr); + + int16x4_t s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4 + int16x4_t s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5 + int16x4_t s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6 + + uint16x4_t d0 = convolve4_4_x(s0, s1, s2, s3, x_filter, + vget_low_s16(round_offset_vec)); + + uint16x4_t dd0 = vld1_u16(dst_ptr); + + uint8x8_t d01; + compute_dist_wtd_avg_4x1(dd0, d0, fwd_offset, bck_offset, + vget_low_s16(round_offset_vec), &d01); + + store_u8_4x1(dst8_ptr, d01); + + src_ptr += src_stride; + dst_ptr += dst_stride; + dst8_ptr += dst8_stride; + } while (--height != 0); + } else { + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x8_t x_filter = vshrq_n_s16(vld1q_s16(x_filter_ptr), 1); + +#if AOM_ARCH_AARCH64 + while (height >= 8) { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int width = w; + + uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + __builtin_prefetch(d + 0 * dst_stride); + __builtin_prefetch(d + 1 * dst_stride); + __builtin_prefetch(d + 2 * dst_stride); + __builtin_prefetch(d + 3 * dst_stride); + __builtin_prefetch(d + 4 * dst_stride); + __builtin_prefetch(d + 5 * dst_stride); + __builtin_prefetch(d + 6 * dst_stride); + __builtin_prefetch(d + 7 * dst_stride); + + s += 7; + + do { + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + uint16x8_t d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter, + round_offset_vec); + uint16x8_t d1 = convolve8_8_x(s1, s2, s3, s4, s5, s6, s7, s8, x_filter, + round_offset_vec); + uint16x8_t d2 = convolve8_8_x(s2, s3, s4, s5, s6, s7, s8, s9, x_filter, + round_offset_vec); + uint16x8_t d3 = convolve8_8_x(s3, s4, s5, s6, s7, s8, s9, s10, x_filter, + round_offset_vec); + uint16x8_t d4 = convolve8_8_x(s4, s5, s6, s7, s8, s9, s10, s11, + x_filter, round_offset_vec); + uint16x8_t d5 = convolve8_8_x(s5, s6, s7, s8, s9, s10, s11, s12, + x_filter, round_offset_vec); + uint16x8_t d6 = convolve8_8_x(s6, s7, s8, s9, s10, s11, s12, s13, + x_filter, round_offset_vec); + uint16x8_t d7 = convolve8_8_x(s7, s8, s9, s10, s11, s12, s13, s14, + x_filter, round_offset_vec); + + transpose_elems_inplace_u16_8x8(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d0_u8, &d1_u8, + &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + + uint16x8_t dd4, dd5, dd6, dd7; + load_u16_8x4(d + 4 * dst_stride, dst_stride, &dd4, &dd5, &dd6, &dd7); + + uint8x8_t d4_u8, d5_u8, d6_u8, d7_u8; + compute_dist_wtd_avg_8x4(dd4, dd5, dd6, dd7, d4, d5, d6, d7, fwd_offset, + bck_offset, round_offset_vec, &d4_u8, &d5_u8, + &d6_u8, &d7_u8); + + store_u8_8x4(d_u8 + 4 * dst8_stride, dst8_stride, d4_u8, d5_u8, d6_u8, + d7_u8); + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s5 = s13; + s6 = s14; + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src_ptr += 8 * src_stride; + dst_ptr += 8 * dst_stride; + dst8_ptr += 8 * dst8_stride; + height -= 8; + } +#endif // AOM_ARCH_AARCH64 + + while (height > 0) { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int width = w; + + uint8x8_t t0 = vld1_u8(s); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + + __builtin_prefetch(d); + + s += 8; + + do { + t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15 + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t0)); + + int16x8_t s1 = vextq_s16(s0, s8, 1); // a1 a2 a3 a4 a5 a6 a7 a8 + int16x8_t s2 = vextq_s16(s0, s8, 2); // a2 a3 a4 a5 a6 a7 a8 a9 + int16x8_t s3 = vextq_s16(s0, s8, 3); // a3 a4 a5 a6 a7 a8 a9 a10 + int16x8_t s4 = vextq_s16(s0, s8, 4); // a4 a5 a6 a7 a8 a9 a10 a11 + int16x8_t s5 = vextq_s16(s0, s8, 5); // a5 a6 a7 a8 a9 a10 a11 a12 + int16x8_t s6 = vextq_s16(s0, s8, 6); // a6 a7 a8 a9 a10 a11 a12 a13 + int16x8_t s7 = vextq_s16(s0, s8, 7); // a7 a8 a9 a10 a11 a12 a13 a14 + + uint16x8_t d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter, + round_offset_vec); + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_dist_wtd_avg_8x1(dd0, d0, fwd_offset, bck_offset, + round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + + s0 = s8; + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + dst8_ptr += dst8_stride; + height--; + } + } +} + +static INLINE void dist_wtd_convolve_x_avg_neon( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + // Horizontal filter. + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - horiz_offset; + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + uint8_t *dst8_ptr = dst8; + int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x4_t x_filter = vshr_n_s16(vld1_s16(x_filter_ptr + 2), 1); + + src_ptr += 2; + + do { + uint8x8_t t0 = vld1_u8(src_ptr); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + + __builtin_prefetch(dst_ptr); + __builtin_prefetch(dst8_ptr); + + int16x4_t s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4 + int16x4_t s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5 + int16x4_t s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6 + + uint16x4_t d0 = convolve4_4_x(s0, s1, s2, s3, x_filter, + vget_low_s16(round_offset_vec)); + + uint16x4_t dd0 = vld1_u16(dst_ptr); + + uint8x8_t d01; + compute_basic_avg_4x1(dd0, d0, vget_low_s16(round_offset_vec), &d01); + + store_u8_4x1(dst8_ptr, d01); + + src_ptr += src_stride; + dst_ptr += dst_stride; + dst8_ptr += dst8_stride; + } while (--height != 0); + } else { + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x8_t x_filter = vshrq_n_s16(vld1q_s16(x_filter_ptr), 1); + +#if AOM_ARCH_AARCH64 + while (height >= 8) { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int width = w; + + uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + __builtin_prefetch(d + 0 * dst_stride); + __builtin_prefetch(d + 1 * dst_stride); + __builtin_prefetch(d + 2 * dst_stride); + __builtin_prefetch(d + 3 * dst_stride); + __builtin_prefetch(d + 4 * dst_stride); + __builtin_prefetch(d + 5 * dst_stride); + __builtin_prefetch(d + 6 * dst_stride); + __builtin_prefetch(d + 7 * dst_stride); + + s += 7; + + do { + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + uint16x8_t d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter, + round_offset_vec); + uint16x8_t d1 = convolve8_8_x(s1, s2, s3, s4, s5, s6, s7, s8, x_filter, + round_offset_vec); + uint16x8_t d2 = convolve8_8_x(s2, s3, s4, s5, s6, s7, s8, s9, x_filter, + round_offset_vec); + uint16x8_t d3 = convolve8_8_x(s3, s4, s5, s6, s7, s8, s9, s10, x_filter, + round_offset_vec); + uint16x8_t d4 = convolve8_8_x(s4, s5, s6, s7, s8, s9, s10, s11, + x_filter, round_offset_vec); + uint16x8_t d5 = convolve8_8_x(s5, s6, s7, s8, s9, s10, s11, s12, + x_filter, round_offset_vec); + uint16x8_t d6 = convolve8_8_x(s6, s7, s8, s9, s10, s11, s12, s13, + x_filter, round_offset_vec); + uint16x8_t d7 = convolve8_8_x(s7, s8, s9, s10, s11, s12, s13, s14, + x_filter, round_offset_vec); + + transpose_elems_inplace_u16_8x8(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + + uint16x8_t dd4, dd5, dd6, dd7; + load_u16_8x4(d + 4 * dst_stride, dst_stride, &dd4, &dd5, &dd6, &dd7); + + uint8x8_t d4_u8, d5_u8, d6_u8, d7_u8; + compute_basic_avg_8x4(dd4, dd5, dd6, dd7, d4, d5, d6, d7, + round_offset_vec, &d4_u8, &d5_u8, &d6_u8, &d7_u8); + + store_u8_8x4(d_u8 + 4 * dst8_stride, dst8_stride, d4_u8, d5_u8, d6_u8, + d7_u8); + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s5 = s13; + s6 = s14; + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src_ptr += 8 * src_stride; + dst_ptr += 8 * dst_stride; + dst8_ptr += 8 * dst8_stride; + height -= 8; + } +#endif // AOM_ARCH_AARCH64 + + while (height > 0) { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int width = w; + + uint8x8_t t0 = vld1_u8(s); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + + __builtin_prefetch(d); + + s += 8; + + do { + t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15 + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t0)); + + int16x8_t s1 = vextq_s16(s0, s8, 1); // a1 a2 a3 a4 a5 a6 a7 a8 + int16x8_t s2 = vextq_s16(s0, s8, 2); // a2 a3 a4 a5 a6 a7 a8 a9 + int16x8_t s3 = vextq_s16(s0, s8, 3); // a3 a4 a5 a6 a7 a8 a9 a10 + int16x8_t s4 = vextq_s16(s0, s8, 4); // a4 a5 a6 a7 a8 a9 a10 a11 + int16x8_t s5 = vextq_s16(s0, s8, 5); // a5 a6 a7 a8 a9 a10 a11 a12 + int16x8_t s6 = vextq_s16(s0, s8, 6); // a6 a7 a8 a9 a10 a11 a12 a13 + int16x8_t s7 = vextq_s16(s0, s8, 7); // a7 a8 a9 a10 a11 a12 a13 a14 + + uint16x8_t d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter, + round_offset_vec); + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_basic_avg_8x1(dd0, d0, round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + + s0 = s8; + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + dst8_ptr += dst8_stride; + height--; + } + } +} + +static INLINE void dist_wtd_convolve_x_neon( + const uint8_t *src, int src_stride, int w, int h, + const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + // Horizontal filter. + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - horiz_offset; + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x4_t x_filter = vshr_n_s16(vld1_s16(x_filter_ptr + 2), 1); + + src_ptr += 2; + + do { + uint8x8_t t0 = vld1_u8(src_ptr); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + + __builtin_prefetch(dst_ptr); + + int16x4_t s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4 + int16x4_t s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5 + int16x4_t s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6 + + uint16x4_t d0 = convolve4_4_x(s0, s1, s2, s3, x_filter, + vget_low_s16(round_offset_vec)); + + vst1_u16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } else { + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x8_t x_filter = vshrq_n_s16(vld1q_s16(x_filter_ptr), 1); + +#if AOM_ARCH_AARCH64 + while (height >= 8) { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + int width = w; + + uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + __builtin_prefetch(d + 0 * dst_stride); + __builtin_prefetch(d + 1 * dst_stride); + __builtin_prefetch(d + 2 * dst_stride); + __builtin_prefetch(d + 3 * dst_stride); + __builtin_prefetch(d + 4 * dst_stride); + __builtin_prefetch(d + 5 * dst_stride); + __builtin_prefetch(d + 6 * dst_stride); + __builtin_prefetch(d + 7 * dst_stride); + + s += 7; + + do { + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + uint16x8_t d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter, + round_offset_vec); + uint16x8_t d1 = convolve8_8_x(s1, s2, s3, s4, s5, s6, s7, s8, x_filter, + round_offset_vec); + uint16x8_t d2 = convolve8_8_x(s2, s3, s4, s5, s6, s7, s8, s9, x_filter, + round_offset_vec); + uint16x8_t d3 = convolve8_8_x(s3, s4, s5, s6, s7, s8, s9, s10, x_filter, + round_offset_vec); + uint16x8_t d4 = convolve8_8_x(s4, s5, s6, s7, s8, s9, s10, s11, + x_filter, round_offset_vec); + uint16x8_t d5 = convolve8_8_x(s5, s6, s7, s8, s9, s10, s11, s12, + x_filter, round_offset_vec); + uint16x8_t d6 = convolve8_8_x(s6, s7, s8, s9, s10, s11, s12, s13, + x_filter, round_offset_vec); + uint16x8_t d7 = convolve8_8_x(s7, s8, s9, s10, s11, s12, s13, s14, + x_filter, round_offset_vec); + + transpose_elems_inplace_u16_8x8(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7); + + store_u16_8x8(d, dst_stride, d0, d1, d2, d3, d4, d5, d6, d7); + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s5 = s13; + s6 = s14; + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 8 * src_stride; + dst_ptr += 8 * dst_stride; + height -= 8; + } +#endif // AOM_ARCH_AARCH64 + + while (height > 0) { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + int width = w; + + uint8x8_t t0 = vld1_u8(s); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + + __builtin_prefetch(d); + + s = src_ptr + 8; + + do { + t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15 + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t0)); + + int16x8_t s1 = vextq_s16(s0, s8, 1); // a1 a2 a3 a4 a5 a6 a7 a8 + int16x8_t s2 = vextq_s16(s0, s8, 2); // a2 a3 a4 a5 a6 a7 a8 a9 + int16x8_t s3 = vextq_s16(s0, s8, 3); // a3 a4 a5 a6 a7 a8 a9 a10 + int16x8_t s4 = vextq_s16(s0, s8, 4); // a4 a5 a6 a7 a8 a9 a10 a11 + int16x8_t s5 = vextq_s16(s0, s8, 5); // a5 a6 a7 a8 a9 a10 a11 a12 + int16x8_t s6 = vextq_s16(s0, s8, 6); // a6 a7 a8 a9 a10 a11 a12 a13 + int16x8_t s7 = vextq_s16(s0, s8, 7); // a7 a8 a9 a10 a11 a12 a13 a14 + + uint16x8_t d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter, + round_offset_vec); + + vst1q_u16(d, d0); + + s0 = s8; + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + height--; + } + } +} + +void av1_dist_wtd_convolve_x_neon(const uint8_t *src, int src_stride, + uint8_t *dst8, int dst8_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const int subpel_x_qn, + ConvolveParams *conv_params) { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_x_dist_wtd_avg_neon(src, src_stride, dst8, dst8_stride, + w, h, filter_params_x, subpel_x_qn, + conv_params); + } else { + dist_wtd_convolve_x_avg_neon(src, src_stride, dst8, dst8_stride, w, h, + filter_params_x, subpel_x_qn, conv_params); + } + } else { + dist_wtd_convolve_x_neon(src, src_stride, w, h, filter_params_x, + subpel_x_qn, conv_params); + } +} + +static INLINE uint16x4_t convolve6_4_y(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t s4, const int16x4_t s5, + const int16x8_t y_filter, + const int16x4_t round_offset) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + // Filter values at indices 0 and 7 are 0. + int16x4_t sum = vmul_lane_s16(s0, y_filter_0_3, 1); + sum = vmla_lane_s16(sum, s1, y_filter_0_3, 2); + sum = vmla_lane_s16(sum, s2, y_filter_0_3, 3); + sum = vmla_lane_s16(sum, s3, y_filter_4_7, 0); + sum = vmla_lane_s16(sum, s4, y_filter_4_7, 1); + sum = vmla_lane_s16(sum, s5, y_filter_4_7, 2); + + // We halved the convolution filter values so -1 from the right shift. + int16x4_t res = vrsra_n_s16(round_offset, sum, ROUND0_BITS - 1); + return vreinterpret_u16_s16(res); +} + +static INLINE uint16x8_t convolve6_8_y(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t y_filter, + const int16x8_t round_offset) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + // Filter values at indices 0 and 7 are 0. + int16x8_t sum = vmulq_lane_s16(s0, y_filter_0_3, 1); + sum = vmlaq_lane_s16(sum, s1, y_filter_0_3, 2); + sum = vmlaq_lane_s16(sum, s2, y_filter_0_3, 3); + sum = vmlaq_lane_s16(sum, s3, y_filter_4_7, 0); + sum = vmlaq_lane_s16(sum, s4, y_filter_4_7, 1); + sum = vmlaq_lane_s16(sum, s5, y_filter_4_7, 2); + + // We halved the convolution filter values so -1 from the right shift. + int16x8_t res = vrsraq_n_s16(round_offset, sum, ROUND0_BITS - 1); + return vreinterpretq_u16_s16(res); +} + +static INLINE void dist_wtd_convolve_y_6tap_dist_wtd_avg_neon( + const uint8_t *src_ptr, int src_stride, uint8_t *dst8_ptr, + const int dst8_stride, int w, int h, const int16x8_t y_filter, + ConvolveParams *conv_params) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + const uint16_t fwd_offset = conv_params->fwd_offset; + const uint16_t bck_offset = conv_params->bck_offset; + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + int width = w; + + if (w == 4 || h == 4) { + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + uint8x8_t t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + uint8x8_t t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + uint8x8_t t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + uint8x8_t t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + uint8x8_t t4 = load_unaligned_u8_4x1(s + 4 * src_stride); + + int16x4_t s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + int16x4_t s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t4))); + + s += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + + uint16x4_t d0 = convolve6_4_y(s0, s1, s2, s3, s4, s5, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d1 = convolve6_4_y(s1, s2, s3, s4, s5, s6, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d2 = convolve6_4_y(s2, s3, s4, s5, s6, s7, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d3 = convolve6_4_y(s3, s4, s5, s6, s7, s8, y_filter, + vget_low_s16(round_offset_vec)); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01, d23; + compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d01, &d23); + + store_u8x4_strided_x2(d_u8 + 0 * dst8_stride, dst8_stride, d01); + store_u8x4_strided_x2(d_u8 + 2 * dst8_stride, dst8_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + d_u8 += 4 * dst8_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s); + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + + uint16x4_t d0 = convolve6_4_y(s0, s1, s2, s3, s4, s5, y_filter, + vget_low_s16(round_offset_vec)); + + uint16x4_t dd0 = vld1_u16(d); + + uint8x8_t d01; + compute_dist_wtd_avg_4x1(dd0, d0, fwd_offset, bck_offset, + vget_low_s16(round_offset_vec), &d01); + + store_u8_4x1(d_u8, d01); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s += src_stride; + d += dst_stride; + d_u8 += dst8_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 4; + dst_ptr += 4; + dst8_ptr += 4; + width -= 4; + } while (width != 0); + } else { + do { + const uint8_t *s = src_ptr + (5 * src_stride); + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + uint8x8_t t0, t1, t2, t3, t4; + load_u8_8x5(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t5, t6, t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + uint16x8_t d0 = + convolve6_8_y(s0, s1, s2, s3, s4, s5, y_filter, round_offset_vec); + uint16x8_t d1 = + convolve6_8_y(s1, s2, s3, s4, s5, s6, y_filter, round_offset_vec); + uint16x8_t d2 = + convolve6_8_y(s2, s3, s4, s5, s6, s7, y_filter, round_offset_vec); + uint16x8_t d3 = + convolve6_8_y(s3, s4, s5, s6, s7, s8, y_filter, round_offset_vec); + uint16x8_t d4 = + convolve6_8_y(s4, s5, s6, s7, s8, s9, y_filter, round_offset_vec); + uint16x8_t d5 = + convolve6_8_y(s5, s6, s7, s8, s9, s10, y_filter, round_offset_vec); + uint16x8_t d6 = + convolve6_8_y(s6, s7, s8, s9, s10, s11, y_filter, round_offset_vec); + uint16x8_t d7 = convolve6_8_y(s7, s8, s9, s10, s11, s12, y_filter, + round_offset_vec); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d0_u8, &d1_u8, + &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + d_u8 += 4 * dst8_stride; + + uint16x8_t dd4, dd5, dd6, dd7; + load_u16_8x4(d + 4 * dst_stride, dst_stride, &dd4, &dd5, &dd6, &dd7); + + uint8x8_t d4_u8, d5_u8, d6_u8, d7_u8; + compute_dist_wtd_avg_8x4(dd4, dd5, dd6, dd7, d4, d5, d6, d7, fwd_offset, + bck_offset, round_offset_vec, &d4_u8, &d5_u8, + &d6_u8, &d7_u8); + + store_u8_8x4(d_u8, dst8_stride, d4_u8, d5_u8, d6_u8, d7_u8); + d_u8 += 4 * dst8_stride; + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s += 8 * src_stride; + d += 8 * dst_stride; + height -= 8; +#else // !AOM_ARCH_AARCH64 + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s))); + + uint16x8_t d0 = + convolve6_8_y(s0, s1, s2, s3, s4, s5, y_filter, round_offset_vec); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_dist_wtd_avg_8x1(dd0, d0, fwd_offset, bck_offset, + round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + d_u8 += dst8_stride; + + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + dst8_ptr += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE void dist_wtd_convolve_y_6tap_avg_neon( + const uint8_t *src_ptr, int src_stride, uint8_t *dst8_ptr, + const int dst8_stride, int w, int h, const int16x8_t y_filter, + ConvolveParams *conv_params) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + int width = w; + + if (w == 4 || h == 4) { + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + uint8x8_t t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + uint8x8_t t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + uint8x8_t t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + uint8x8_t t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + uint8x8_t t4 = load_unaligned_u8_4x1(s + 4 * src_stride); + + int16x4_t s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + int16x4_t s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t4))); + + s += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + + uint16x4_t d0 = convolve6_4_y(s0, s1, s2, s3, s4, s5, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d1 = convolve6_4_y(s1, s2, s3, s4, s5, s6, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d2 = convolve6_4_y(s2, s3, s4, s5, s6, s7, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d3 = convolve6_4_y(s3, s4, s5, s6, s7, s8, y_filter, + vget_low_s16(round_offset_vec)); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01, d23; + compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d01, &d23); + + store_u8x4_strided_x2(d_u8 + 0 * dst8_stride, dst8_stride, d01); + store_u8x4_strided_x2(d_u8 + 2 * dst8_stride, dst8_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + d_u8 += 4 * dst8_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s); + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + + uint16x4_t d0 = convolve6_4_y(s0, s1, s2, s3, s4, s5, y_filter, + vget_low_s16(round_offset_vec)); + + uint16x4_t dd0 = vld1_u16(d); + + uint8x8_t d01; + compute_basic_avg_4x1(dd0, d0, vget_low_s16(round_offset_vec), &d01); + + store_u8_4x1(d_u8, d01); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s += src_stride; + d += dst_stride; + d_u8 += dst8_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 4; + dst_ptr += 4; + dst8_ptr += 4; + width -= 4; + } while (width != 0); + } else { + do { + const uint8_t *s = src_ptr + (5 * src_stride); + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + uint8x8_t t0, t1, t2, t3, t4; + load_u8_8x5(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t5, t6, t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + uint16x8_t d0 = + convolve6_8_y(s0, s1, s2, s3, s4, s5, y_filter, round_offset_vec); + uint16x8_t d1 = + convolve6_8_y(s1, s2, s3, s4, s5, s6, y_filter, round_offset_vec); + uint16x8_t d2 = + convolve6_8_y(s2, s3, s4, s5, s6, s7, y_filter, round_offset_vec); + uint16x8_t d3 = + convolve6_8_y(s3, s4, s5, s6, s7, s8, y_filter, round_offset_vec); + uint16x8_t d4 = + convolve6_8_y(s4, s5, s6, s7, s8, s9, y_filter, round_offset_vec); + uint16x8_t d5 = + convolve6_8_y(s5, s6, s7, s8, s9, s10, y_filter, round_offset_vec); + uint16x8_t d6 = + convolve6_8_y(s6, s7, s8, s9, s10, s11, y_filter, round_offset_vec); + uint16x8_t d7 = convolve6_8_y(s7, s8, s9, s10, s11, s12, y_filter, + round_offset_vec); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + d_u8 += 4 * dst8_stride; + + uint16x8_t dd4, dd5, dd6, dd7; + load_u16_8x4(d + 4 * dst_stride, dst_stride, &dd4, &dd5, &dd6, &dd7); + + uint8x8_t d4_u8, d5_u8, d6_u8, d7_u8; + compute_basic_avg_8x4(dd4, dd5, dd6, dd7, d4, d5, d6, d7, + round_offset_vec, &d4_u8, &d5_u8, &d6_u8, &d7_u8); + + store_u8_8x4(d_u8, dst8_stride, d4_u8, d5_u8, d6_u8, d7_u8); + d_u8 += 4 * dst8_stride; + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s += 8 * src_stride; + d += 8 * dst_stride; + height -= 8; +#else // !AOM_ARCH_AARCH64 + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s))); + + uint16x8_t d0 = + convolve6_8_y(s0, s1, s2, s3, s4, s5, y_filter, round_offset_vec); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_basic_avg_8x1(dd0, d0, round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + d_u8 += dst8_stride; + + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + dst8_ptr += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE void dist_wtd_convolve_y_6tap_neon(const uint8_t *src_ptr, + int src_stride, int w, int h, + const int16x8_t y_filter, + ConvolveParams *conv_params) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + int width = w; + + if (w == 4 || h == 4) { + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + int height = h; + + uint8x8_t t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + uint8x8_t t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + uint8x8_t t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + uint8x8_t t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + uint8x8_t t4 = load_unaligned_u8_4x1(s + 4 * src_stride); + + int16x4_t s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + int16x4_t s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t4))); + + s += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + + uint16x4_t d0 = convolve6_4_y(s0, s1, s2, s3, s4, s5, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d1 = convolve6_4_y(s1, s2, s3, s4, s5, s6, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d2 = convolve6_4_y(s2, s3, s4, s5, s6, s7, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d3 = convolve6_4_y(s3, s4, s5, s6, s7, s8, y_filter, + vget_low_s16(round_offset_vec)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s); + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + + uint16x4_t d0 = convolve6_4_y(s0, s1, s2, s3, s4, s5, y_filter, + vget_low_s16(round_offset_vec)); + + vst1_u16(d, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 4; + dst_ptr += 4; + width -= 4; + } while (width != 0); + } else { + do { + const uint8_t *s = src_ptr + (5 * src_stride); + CONV_BUF_TYPE *d = dst_ptr; + int height = h; + + uint8x8_t t0, t1, t2, t3, t4; + load_u8_8x5(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t5, t6, t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + uint16x8_t d0 = + convolve6_8_y(s0, s1, s2, s3, s4, s5, y_filter, round_offset_vec); + uint16x8_t d1 = + convolve6_8_y(s1, s2, s3, s4, s5, s6, y_filter, round_offset_vec); + uint16x8_t d2 = + convolve6_8_y(s2, s3, s4, s5, s6, s7, y_filter, round_offset_vec); + uint16x8_t d3 = + convolve6_8_y(s3, s4, s5, s6, s7, s8, y_filter, round_offset_vec); + uint16x8_t d4 = + convolve6_8_y(s4, s5, s6, s7, s8, s9, y_filter, round_offset_vec); + uint16x8_t d5 = + convolve6_8_y(s5, s6, s7, s8, s9, s10, y_filter, round_offset_vec); + uint16x8_t d6 = + convolve6_8_y(s6, s7, s8, s9, s10, s11, y_filter, round_offset_vec); + uint16x8_t d7 = convolve6_8_y(s7, s8, s9, s10, s11, s12, y_filter, + round_offset_vec); + + store_u16_8x8(d, dst_stride, d0, d1, d2, d3, d4, d5, d6, d7); + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s += 8 * src_stride; + d += 8 * dst_stride; + height -= 8; +#else // !AOM_ARCH_AARCH64 + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s))); + + uint16x8_t d0 = + convolve6_8_y(s0, s1, s2, s3, s4, s5, y_filter, round_offset_vec); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + + vst1q_u16(d, d0); + + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE uint16x4_t convolve8_4_y(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, + const int16x8_t y_filter, + const int16x4_t round_offset) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int16x4_t sum = vmul_lane_s16(s0, y_filter_0_3, 0); + sum = vmla_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmla_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmla_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmla_lane_s16(sum, s4, y_filter_4_7, 0); + sum = vmla_lane_s16(sum, s5, y_filter_4_7, 1); + sum = vmla_lane_s16(sum, s6, y_filter_4_7, 2); + sum = vmla_lane_s16(sum, s7, y_filter_4_7, 3); + + // We halved the convolution filter values so -1 from the right shift. + int16x4_t res = vrsra_n_s16(round_offset, sum, ROUND0_BITS - 1); + return vreinterpret_u16_s16(res); +} + +static INLINE uint16x8_t convolve8_8_y(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t y_filter, + const int16x8_t round_offset) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int16x8_t sum = vmulq_lane_s16(s0, y_filter_0_3, 0); + sum = vmlaq_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmlaq_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmlaq_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmlaq_lane_s16(sum, s4, y_filter_4_7, 0); + sum = vmlaq_lane_s16(sum, s5, y_filter_4_7, 1); + sum = vmlaq_lane_s16(sum, s6, y_filter_4_7, 2); + sum = vmlaq_lane_s16(sum, s7, y_filter_4_7, 3); + + // We halved the convolution filter values so -1 from the right shift. + int16x8_t res = vrsraq_n_s16(round_offset, sum, ROUND0_BITS - 1); + return vreinterpretq_u16_s16(res); +} + +static INLINE void dist_wtd_convolve_y_8tap_dist_wtd_avg_neon( + const uint8_t *src_ptr, int src_stride, uint8_t *dst8_ptr, + const int dst8_stride, int w, int h, const int16x8_t y_filter, + ConvolveParams *conv_params) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + const uint16_t fwd_offset = conv_params->fwd_offset; + const uint16_t bck_offset = conv_params->bck_offset; + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + int width = w; + + if (w == 4 || h == 4) { + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + __builtin_prefetch(s + 0 * src_stride); + __builtin_prefetch(s + 1 * src_stride); + __builtin_prefetch(s + 2 * src_stride); + __builtin_prefetch(s + 3 * src_stride); + + uint8x8_t t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + uint8x8_t t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + uint8x8_t t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + uint8x8_t t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + uint8x8_t t4 = load_unaligned_u8_4x1(s + 4 * src_stride); + uint8x8_t t5 = load_unaligned_u8_4x1(s + 5 * src_stride); + uint8x8_t t6 = load_unaligned_u8_4x1(s + 6 * src_stride); + + int16x4_t s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + int16x4_t s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t4))); + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t5))); + int16x4_t s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t6))); + + __builtin_prefetch(d + 0 * dst_stride); + __builtin_prefetch(d + 1 * dst_stride); + __builtin_prefetch(d + 2 * dst_stride); + __builtin_prefetch(d + 3 * dst_stride); + + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s10 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + + uint16x4_t d0 = convolve8_4_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d1 = convolve8_4_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d2 = convolve8_4_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d3 = convolve8_4_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter, + vget_low_s16(round_offset_vec)); + + __builtin_prefetch(d + 0 * dst_stride); + __builtin_prefetch(d + 1 * dst_stride); + __builtin_prefetch(d + 2 * dst_stride); + __builtin_prefetch(d + 3 * dst_stride); + + __builtin_prefetch(d_u8 + 0 * dst8_stride); + __builtin_prefetch(d_u8 + 1 * dst8_stride); + __builtin_prefetch(d_u8 + 2 * dst8_stride); + __builtin_prefetch(d_u8 + 3 * dst8_stride); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01, d23; + compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d01, &d23); + + store_u8x4_strided_x2(d_u8 + 0 * dst8_stride, dst8_stride, d01); + store_u8x4_strided_x2(d_u8 + 2 * dst8_stride, dst8_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + d_u8 += 4 * dst8_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s); + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + + uint16x4_t d0 = convolve8_4_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + vget_low_s16(round_offset_vec)); + + __builtin_prefetch(d); + + uint16x4_t dd0 = vld1_u16(d); + + uint8x8_t d01; + compute_dist_wtd_avg_4x1(dd0, d0, fwd_offset, bck_offset, + vget_low_s16(round_offset_vec), &d01); + + store_u8_4x1(d_u8, d01); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + s += src_stride; + d += dst_stride; + d_u8 += dst8_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 4; + dst_ptr += 4; + dst8_ptr += 4; + width -= 4; + } while (width != 0); + } else { + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + __builtin_prefetch(s + 0 * src_stride); + __builtin_prefetch(s + 1 * src_stride); + __builtin_prefetch(s + 2 * src_stride); + __builtin_prefetch(s + 3 * src_stride); + __builtin_prefetch(s + 4 * src_stride); + __builtin_prefetch(s + 5 * src_stride); + __builtin_prefetch(s + 6 * src_stride); + __builtin_prefetch(s + 7 * src_stride); + + uint8x8_t t0, t1, t2, t3, t4, t5, t6; + load_u8_8x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + __builtin_prefetch(dst_ptr + 0 * dst_stride); + __builtin_prefetch(dst_ptr + 1 * dst_stride); + __builtin_prefetch(dst_ptr + 2 * dst_stride); + __builtin_prefetch(dst_ptr + 3 * dst_stride); + + uint16x8_t d0 = convolve8_8_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + round_offset_vec); + uint16x8_t d1 = convolve8_8_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + round_offset_vec); + uint16x8_t d2 = convolve8_8_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + round_offset_vec); + uint16x8_t d3 = convolve8_8_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter, + round_offset_vec); + uint16x8_t d4 = convolve8_8_y(s4, s5, s6, s7, s8, s9, s10, s11, + y_filter, round_offset_vec); + uint16x8_t d5 = convolve8_8_y(s5, s6, s7, s8, s9, s10, s11, s12, + y_filter, round_offset_vec); + uint16x8_t d6 = convolve8_8_y(s6, s7, s8, s9, s10, s11, s12, s13, + y_filter, round_offset_vec); + uint16x8_t d7 = convolve8_8_y(s7, s8, s9, s10, s11, s12, s13, s14, + y_filter, round_offset_vec); + + __builtin_prefetch(d + 0 * dst8_stride); + __builtin_prefetch(d + 1 * dst8_stride); + __builtin_prefetch(d + 2 * dst8_stride); + __builtin_prefetch(d + 3 * dst8_stride); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d0_u8, &d1_u8, + &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + d_u8 += 4 * dst8_stride; + + uint16x8_t dd4, dd5, dd6, dd7; + load_u16_8x4(d + 4 * dst_stride, dst_stride, &dd4, &dd5, &dd6, &dd7); + + uint8x8_t d4_u8, d5_u8, d6_u8, d7_u8; + compute_dist_wtd_avg_8x4(dd4, dd5, dd6, dd7, d4, d5, d6, d7, fwd_offset, + bck_offset, round_offset_vec, &d4_u8, &d5_u8, + &d6_u8, &d7_u8); + + store_u8_8x4(d_u8, dst8_stride, d4_u8, d5_u8, d6_u8, d7_u8); + d_u8 += 4 * dst8_stride; + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s5 = s13; + s6 = s14; + s += 8 * src_stride; + d += 8 * dst_stride; + height -= 8; +#else // !AOM_ARCH_AARCH64 + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s))); + + __builtin_prefetch(dst_ptr); + + uint16x8_t d0 = convolve8_8_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + round_offset_vec); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + + __builtin_prefetch(d); + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_dist_wtd_avg_8x1(dd0, d0, fwd_offset, bck_offset, + round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + d_u8 += dst8_stride; + + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + dst8_ptr += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE void dist_wtd_convolve_y_8tap_avg_neon( + const uint8_t *src_ptr, int src_stride, uint8_t *dst8_ptr, + const int dst8_stride, int w, int h, const int16x8_t y_filter, + ConvolveParams *conv_params) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + int width = w; + + if (w == 4 || h == 4) { + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + __builtin_prefetch(s + 0 * src_stride); + __builtin_prefetch(s + 1 * src_stride); + __builtin_prefetch(s + 2 * src_stride); + __builtin_prefetch(s + 3 * src_stride); + + uint8x8_t t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + uint8x8_t t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + uint8x8_t t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + uint8x8_t t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + uint8x8_t t4 = load_unaligned_u8_4x1(s + 4 * src_stride); + uint8x8_t t5 = load_unaligned_u8_4x1(s + 5 * src_stride); + uint8x8_t t6 = load_unaligned_u8_4x1(s + 6 * src_stride); + + int16x4_t s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + int16x4_t s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t4))); + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t5))); + int16x4_t s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t6))); + + __builtin_prefetch(d + 0 * dst_stride); + __builtin_prefetch(d + 1 * dst_stride); + __builtin_prefetch(d + 2 * dst_stride); + __builtin_prefetch(d + 3 * dst_stride); + + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s10 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + + uint16x4_t d0 = convolve8_4_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d1 = convolve8_4_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d2 = convolve8_4_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d3 = convolve8_4_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter, + vget_low_s16(round_offset_vec)); + + __builtin_prefetch(d + 0 * dst_stride); + __builtin_prefetch(d + 1 * dst_stride); + __builtin_prefetch(d + 2 * dst_stride); + __builtin_prefetch(d + 3 * dst_stride); + + __builtin_prefetch(d_u8 + 0 * dst8_stride); + __builtin_prefetch(d_u8 + 1 * dst8_stride); + __builtin_prefetch(d_u8 + 2 * dst8_stride); + __builtin_prefetch(d_u8 + 3 * dst8_stride); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01, d23; + compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d01, &d23); + + store_u8x4_strided_x2(d_u8 + 0 * dst8_stride, dst8_stride, d01); + store_u8x4_strided_x2(d_u8 + 2 * dst8_stride, dst8_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + d_u8 += 4 * dst8_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s); + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + + uint16x4_t d0 = convolve8_4_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + vget_low_s16(round_offset_vec)); + + __builtin_prefetch(d); + + uint16x4_t dd0 = vld1_u16(d); + + uint8x8_t d01; + compute_basic_avg_4x1(dd0, d0, vget_low_s16(round_offset_vec), &d01); + + store_u8_4x1(d_u8, d01); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + s += src_stride; + d += dst_stride; + d_u8 += dst8_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 4; + dst_ptr += 4; + dst8_ptr += 4; + width -= 4; + } while (width != 0); + } else { + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + __builtin_prefetch(s + 0 * src_stride); + __builtin_prefetch(s + 1 * src_stride); + __builtin_prefetch(s + 2 * src_stride); + __builtin_prefetch(s + 3 * src_stride); + __builtin_prefetch(s + 4 * src_stride); + __builtin_prefetch(s + 5 * src_stride); + __builtin_prefetch(s + 6 * src_stride); + __builtin_prefetch(s + 7 * src_stride); + + uint8x8_t t0, t1, t2, t3, t4, t5, t6; + load_u8_8x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + __builtin_prefetch(dst_ptr + 0 * dst_stride); + __builtin_prefetch(dst_ptr + 1 * dst_stride); + __builtin_prefetch(dst_ptr + 2 * dst_stride); + __builtin_prefetch(dst_ptr + 3 * dst_stride); + + uint16x8_t d0 = convolve8_8_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + round_offset_vec); + uint16x8_t d1 = convolve8_8_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + round_offset_vec); + uint16x8_t d2 = convolve8_8_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + round_offset_vec); + uint16x8_t d3 = convolve8_8_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter, + round_offset_vec); + uint16x8_t d4 = convolve8_8_y(s4, s5, s6, s7, s8, s9, s10, s11, + y_filter, round_offset_vec); + uint16x8_t d5 = convolve8_8_y(s5, s6, s7, s8, s9, s10, s11, s12, + y_filter, round_offset_vec); + uint16x8_t d6 = convolve8_8_y(s6, s7, s8, s9, s10, s11, s12, s13, + y_filter, round_offset_vec); + uint16x8_t d7 = convolve8_8_y(s7, s8, s9, s10, s11, s12, s13, s14, + y_filter, round_offset_vec); + + __builtin_prefetch(d + 0 * dst8_stride); + __builtin_prefetch(d + 1 * dst8_stride); + __builtin_prefetch(d + 2 * dst8_stride); + __builtin_prefetch(d + 3 * dst8_stride); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + d_u8 += 4 * dst8_stride; + + uint16x8_t dd4, dd5, dd6, dd7; + load_u16_8x4(d + 4 * dst_stride, dst_stride, &dd4, &dd5, &dd6, &dd7); + + uint8x8_t d4_u8, d5_u8, d6_u8, d7_u8; + compute_basic_avg_8x4(dd4, dd5, dd6, dd7, d4, d5, d6, d7, + round_offset_vec, &d4_u8, &d5_u8, &d6_u8, &d7_u8); + + store_u8_8x4(d_u8, dst8_stride, d4_u8, d5_u8, d6_u8, d7_u8); + d_u8 += 4 * dst8_stride; + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s5 = s13; + s6 = s14; + s += 8 * src_stride; + d += 8 * dst_stride; + height -= 8; +#else // !AOM_ARCH_AARCH64 + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s))); + + __builtin_prefetch(dst_ptr); + + uint16x8_t d0 = convolve8_8_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + round_offset_vec); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + + __builtin_prefetch(d); + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_basic_avg_8x1(dd0, d0, round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + d_u8 += dst8_stride; + + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + dst8_ptr += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE void dist_wtd_convolve_y_8tap_neon(const uint8_t *src_ptr, + int src_stride, int w, int h, + const int16x8_t y_filter, + ConvolveParams *conv_params) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + int width = w; + + if (w == 4 || h == 4) { + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + int height = h; + + __builtin_prefetch(s + 0 * src_stride); + __builtin_prefetch(s + 1 * src_stride); + __builtin_prefetch(s + 2 * src_stride); + __builtin_prefetch(s + 3 * src_stride); + + uint8x8_t t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + uint8x8_t t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + uint8x8_t t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + uint8x8_t t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + uint8x8_t t4 = load_unaligned_u8_4x1(s + 4 * src_stride); + uint8x8_t t5 = load_unaligned_u8_4x1(s + 5 * src_stride); + uint8x8_t t6 = load_unaligned_u8_4x1(s + 6 * src_stride); + + int16x4_t s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + int16x4_t s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t4))); + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t5))); + int16x4_t s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t6))); + + __builtin_prefetch(d + 0 * dst_stride); + __builtin_prefetch(d + 1 * dst_stride); + __builtin_prefetch(d + 2 * dst_stride); + __builtin_prefetch(d + 3 * dst_stride); + + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s + 0 * src_stride); + t1 = load_unaligned_u8_4x1(s + 1 * src_stride); + t2 = load_unaligned_u8_4x1(s + 2 * src_stride); + t3 = load_unaligned_u8_4x1(s + 3 * src_stride); + + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s10 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + + uint16x4_t d0 = convolve8_4_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d1 = convolve8_4_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d2 = convolve8_4_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + vget_low_s16(round_offset_vec)); + uint16x4_t d3 = convolve8_4_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter, + vget_low_s16(round_offset_vec)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + t0 = load_unaligned_u8_4x1(s); + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + + uint16x4_t d0 = convolve8_4_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + vget_low_s16(round_offset_vec)); + + vst1_u16(d, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 4; + dst_ptr += 4; + width -= 4; + } while (width != 0); + } else { + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + int height = h; + + __builtin_prefetch(s + 0 * src_stride); + __builtin_prefetch(s + 1 * src_stride); + __builtin_prefetch(s + 2 * src_stride); + __builtin_prefetch(s + 3 * src_stride); + __builtin_prefetch(s + 4 * src_stride); + __builtin_prefetch(s + 5 * src_stride); + __builtin_prefetch(s + 6 * src_stride); + __builtin_prefetch(s + 7 * src_stride); + + uint8x8_t t0, t1, t2, t3, t4, t5, t6; + load_u8_8x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + __builtin_prefetch(dst_ptr + 0 * dst_stride); + __builtin_prefetch(dst_ptr + 1 * dst_stride); + __builtin_prefetch(dst_ptr + 2 * dst_stride); + __builtin_prefetch(dst_ptr + 3 * dst_stride); + + uint16x8_t d0 = convolve8_8_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + round_offset_vec); + uint16x8_t d1 = convolve8_8_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + round_offset_vec); + uint16x8_t d2 = convolve8_8_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + round_offset_vec); + uint16x8_t d3 = convolve8_8_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter, + round_offset_vec); + uint16x8_t d4 = convolve8_8_y(s4, s5, s6, s7, s8, s9, s10, s11, + y_filter, round_offset_vec); + uint16x8_t d5 = convolve8_8_y(s5, s6, s7, s8, s9, s10, s11, s12, + y_filter, round_offset_vec); + uint16x8_t d6 = convolve8_8_y(s6, s7, s8, s9, s10, s11, s12, s13, + y_filter, round_offset_vec); + uint16x8_t d7 = convolve8_8_y(s7, s8, s9, s10, s11, s12, s13, s14, + y_filter, round_offset_vec); + + store_u16_8x8(d, dst_stride, d0, d1, d2, d3, d4, d5, d6, d7); + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s5 = s13; + s6 = s14; + s += 8 * src_stride; + d += 8 * dst_stride; + height -= 8; +#else // !AOM_ARCH_AARCH64 + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s))); + + __builtin_prefetch(dst_ptr); + + uint16x8_t d0 = convolve8_8_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + round_offset_vec); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + + vst1q_u16(d, d0); + + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + width -= 8; + } while (width != 0); + } +} + +void av1_dist_wtd_convolve_y_neon(const uint8_t *src, int src_stride, + uint8_t *dst8, int dst8_stride, int w, int h, + const InterpFilterParams *filter_params_y, + const int subpel_y_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + // Vertical filter. + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + // Filter values are even, so downshift by 1 to reduce intermediate + // precision requirements. + const int16x8_t y_filter = vshrq_n_s16(vld1q_s16(y_filter_ptr), 1); + + const int vert_offset = filter_params_y->taps / 2 - 1; + const uint8_t *src_ptr = src - (vert_offset * src_stride); + + if (get_filter_tap(filter_params_y, subpel_y_qn) <= 6) { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_y_6tap_dist_wtd_avg_neon( + src_ptr + src_stride, src_stride, dst8, dst8_stride, w, h, y_filter, + conv_params); + } else { + dist_wtd_convolve_y_6tap_avg_neon(src_ptr + src_stride, src_stride, + dst8, dst8_stride, w, h, y_filter, + conv_params); + } + } else { + dist_wtd_convolve_y_6tap_neon(src_ptr + src_stride, src_stride, w, h, + y_filter, conv_params); + } + } else { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_y_8tap_dist_wtd_avg_neon(src_ptr, src_stride, dst8, + dst8_stride, w, h, y_filter, + conv_params); + } else { + dist_wtd_convolve_y_8tap_avg_neon(src_ptr, src_stride, dst8, + dst8_stride, w, h, y_filter, + conv_params); + } + } else { + dist_wtd_convolve_y_8tap_neon(src_ptr, src_stride, w, h, y_filter, + conv_params); + } + } +} diff --git a/third_party/aom/av1/common/arm/compound_convolve_neon.h b/third_party/aom/av1/common/arm/compound_convolve_neon.h new file mode 100644 index 0000000000..d719680a32 --- /dev/null +++ b/third_party/aom/av1/common/arm/compound_convolve_neon.h @@ -0,0 +1,1164 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ +#ifndef AOM_AV1_COMMON_ARM_COMPOUND_CONVOLVE_NEON_H_ +#define AOM_AV1_COMMON_ARM_COMPOUND_CONVOLVE_NEON_H_ + +#include <arm_neon.h> + +#include "av1/common/convolve.h" +#include "av1/common/enums.h" +#include "av1/common/filter.h" + +static INLINE void compute_dist_wtd_avg_4x1(uint16x4_t dd0, uint16x4_t d0, + const uint16_t fwd_offset, + const uint16_t bck_offset, + const int16x4_t round_offset, + uint8x8_t *d0_u8) { + uint32x4_t blend0 = vmull_n_u16(dd0, fwd_offset); + blend0 = vmlal_n_u16(blend0, d0, bck_offset); + + uint16x4_t avg0 = vshrn_n_u32(blend0, DIST_PRECISION_BITS); + + int16x4_t dst0 = vsub_s16(vreinterpret_s16_u16(avg0), round_offset); + + int16x8_t dst0q = vcombine_s16(dst0, vdup_n_s16(0)); + + *d0_u8 = vqrshrun_n_s16(dst0q, FILTER_BITS - ROUND0_BITS); +} + +static INLINE void compute_basic_avg_4x1(uint16x4_t dd0, uint16x4_t d0, + const int16x4_t round_offset, + uint8x8_t *d0_u8) { + uint16x4_t avg0 = vhadd_u16(dd0, d0); + + int16x4_t dst0 = vsub_s16(vreinterpret_s16_u16(avg0), round_offset); + + int16x8_t dst0q = vcombine_s16(dst0, vdup_n_s16(0)); + + *d0_u8 = vqrshrun_n_s16(dst0q, FILTER_BITS - ROUND0_BITS); +} + +static INLINE void compute_dist_wtd_avg_8x1(uint16x8_t dd0, uint16x8_t d0, + const uint16_t fwd_offset, + const uint16_t bck_offset, + const int16x8_t round_offset, + uint8x8_t *d0_u8) { + uint32x4_t blend0_lo = vmull_n_u16(vget_low_u16(dd0), fwd_offset); + blend0_lo = vmlal_n_u16(blend0_lo, vget_low_u16(d0), bck_offset); + uint32x4_t blend0_hi = vmull_n_u16(vget_high_u16(dd0), fwd_offset); + blend0_hi = vmlal_n_u16(blend0_hi, vget_high_u16(d0), bck_offset); + + uint16x8_t avg0 = vcombine_u16(vshrn_n_u32(blend0_lo, DIST_PRECISION_BITS), + vshrn_n_u32(blend0_hi, DIST_PRECISION_BITS)); + + int16x8_t dst0 = vsubq_s16(vreinterpretq_s16_u16(avg0), round_offset); + + *d0_u8 = vqrshrun_n_s16(dst0, FILTER_BITS - ROUND0_BITS); +} + +static INLINE void compute_basic_avg_8x1(uint16x8_t dd0, uint16x8_t d0, + const int16x8_t round_offset, + uint8x8_t *d0_u8) { + uint16x8_t avg0 = vhaddq_u16(dd0, d0); + + int16x8_t dst0 = vsubq_s16(vreinterpretq_s16_u16(avg0), round_offset); + + *d0_u8 = vqrshrun_n_s16(dst0, FILTER_BITS - ROUND0_BITS); +} + +static INLINE void compute_dist_wtd_avg_4x4( + uint16x4_t dd0, uint16x4_t dd1, uint16x4_t dd2, uint16x4_t dd3, + uint16x4_t d0, uint16x4_t d1, uint16x4_t d2, uint16x4_t d3, + const uint16_t fwd_offset, const uint16_t bck_offset, + const int16x8_t round_offset, uint8x8_t *d01_u8, uint8x8_t *d23_u8) { + uint32x4_t blend0 = vmull_n_u16(dd0, fwd_offset); + blend0 = vmlal_n_u16(blend0, d0, bck_offset); + uint32x4_t blend1 = vmull_n_u16(dd1, fwd_offset); + blend1 = vmlal_n_u16(blend1, d1, bck_offset); + uint32x4_t blend2 = vmull_n_u16(dd2, fwd_offset); + blend2 = vmlal_n_u16(blend2, d2, bck_offset); + uint32x4_t blend3 = vmull_n_u16(dd3, fwd_offset); + blend3 = vmlal_n_u16(blend3, d3, bck_offset); + + uint16x4_t avg0 = vshrn_n_u32(blend0, DIST_PRECISION_BITS); + uint16x4_t avg1 = vshrn_n_u32(blend1, DIST_PRECISION_BITS); + uint16x4_t avg2 = vshrn_n_u32(blend2, DIST_PRECISION_BITS); + uint16x4_t avg3 = vshrn_n_u32(blend3, DIST_PRECISION_BITS); + + int16x8_t dst_01 = vreinterpretq_s16_u16(vcombine_u16(avg0, avg1)); + int16x8_t dst_23 = vreinterpretq_s16_u16(vcombine_u16(avg2, avg3)); + + dst_01 = vsubq_s16(dst_01, round_offset); + dst_23 = vsubq_s16(dst_23, round_offset); + + *d01_u8 = vqrshrun_n_s16(dst_01, FILTER_BITS - ROUND0_BITS); + *d23_u8 = vqrshrun_n_s16(dst_23, FILTER_BITS - ROUND0_BITS); +} + +static INLINE void compute_basic_avg_4x4(uint16x4_t dd0, uint16x4_t dd1, + uint16x4_t dd2, uint16x4_t dd3, + uint16x4_t d0, uint16x4_t d1, + uint16x4_t d2, uint16x4_t d3, + const int16x8_t round_offset, + uint8x8_t *d01_u8, uint8x8_t *d23_u8) { + uint16x4_t avg0 = vhadd_u16(dd0, d0); + uint16x4_t avg1 = vhadd_u16(dd1, d1); + uint16x4_t avg2 = vhadd_u16(dd2, d2); + uint16x4_t avg3 = vhadd_u16(dd3, d3); + + int16x8_t dst_01 = vreinterpretq_s16_u16(vcombine_u16(avg0, avg1)); + int16x8_t dst_23 = vreinterpretq_s16_u16(vcombine_u16(avg2, avg3)); + + dst_01 = vsubq_s16(dst_01, round_offset); + dst_23 = vsubq_s16(dst_23, round_offset); + + *d01_u8 = vqrshrun_n_s16(dst_01, FILTER_BITS - ROUND0_BITS); + *d23_u8 = vqrshrun_n_s16(dst_23, FILTER_BITS - ROUND0_BITS); +} + +static INLINE void compute_dist_wtd_avg_8x4( + uint16x8_t dd0, uint16x8_t dd1, uint16x8_t dd2, uint16x8_t dd3, + uint16x8_t d0, uint16x8_t d1, uint16x8_t d2, uint16x8_t d3, + const uint16_t fwd_offset, const uint16_t bck_offset, + const int16x8_t round_offset, uint8x8_t *d0_u8, uint8x8_t *d1_u8, + uint8x8_t *d2_u8, uint8x8_t *d3_u8) { + uint32x4_t blend0_lo = vmull_n_u16(vget_low_u16(dd0), fwd_offset); + blend0_lo = vmlal_n_u16(blend0_lo, vget_low_u16(d0), bck_offset); + uint32x4_t blend0_hi = vmull_n_u16(vget_high_u16(dd0), fwd_offset); + blend0_hi = vmlal_n_u16(blend0_hi, vget_high_u16(d0), bck_offset); + + uint32x4_t blend1_lo = vmull_n_u16(vget_low_u16(dd1), fwd_offset); + blend1_lo = vmlal_n_u16(blend1_lo, vget_low_u16(d1), bck_offset); + uint32x4_t blend1_hi = vmull_n_u16(vget_high_u16(dd1), fwd_offset); + blend1_hi = vmlal_n_u16(blend1_hi, vget_high_u16(d1), bck_offset); + + uint32x4_t blend2_lo = vmull_n_u16(vget_low_u16(dd2), fwd_offset); + blend2_lo = vmlal_n_u16(blend2_lo, vget_low_u16(d2), bck_offset); + uint32x4_t blend2_hi = vmull_n_u16(vget_high_u16(dd2), fwd_offset); + blend2_hi = vmlal_n_u16(blend2_hi, vget_high_u16(d2), bck_offset); + + uint32x4_t blend3_lo = vmull_n_u16(vget_low_u16(dd3), fwd_offset); + blend3_lo = vmlal_n_u16(blend3_lo, vget_low_u16(d3), bck_offset); + uint32x4_t blend3_hi = vmull_n_u16(vget_high_u16(dd3), fwd_offset); + blend3_hi = vmlal_n_u16(blend3_hi, vget_high_u16(d3), bck_offset); + + uint16x8_t avg0 = vcombine_u16(vshrn_n_u32(blend0_lo, DIST_PRECISION_BITS), + vshrn_n_u32(blend0_hi, DIST_PRECISION_BITS)); + uint16x8_t avg1 = vcombine_u16(vshrn_n_u32(blend1_lo, DIST_PRECISION_BITS), + vshrn_n_u32(blend1_hi, DIST_PRECISION_BITS)); + uint16x8_t avg2 = vcombine_u16(vshrn_n_u32(blend2_lo, DIST_PRECISION_BITS), + vshrn_n_u32(blend2_hi, DIST_PRECISION_BITS)); + uint16x8_t avg3 = vcombine_u16(vshrn_n_u32(blend3_lo, DIST_PRECISION_BITS), + vshrn_n_u32(blend3_hi, DIST_PRECISION_BITS)); + + int16x8_t dst0 = vsubq_s16(vreinterpretq_s16_u16(avg0), round_offset); + int16x8_t dst1 = vsubq_s16(vreinterpretq_s16_u16(avg1), round_offset); + int16x8_t dst2 = vsubq_s16(vreinterpretq_s16_u16(avg2), round_offset); + int16x8_t dst3 = vsubq_s16(vreinterpretq_s16_u16(avg3), round_offset); + + *d0_u8 = vqrshrun_n_s16(dst0, FILTER_BITS - ROUND0_BITS); + *d1_u8 = vqrshrun_n_s16(dst1, FILTER_BITS - ROUND0_BITS); + *d2_u8 = vqrshrun_n_s16(dst2, FILTER_BITS - ROUND0_BITS); + *d3_u8 = vqrshrun_n_s16(dst3, FILTER_BITS - ROUND0_BITS); +} + +static INLINE void compute_basic_avg_8x4(uint16x8_t dd0, uint16x8_t dd1, + uint16x8_t dd2, uint16x8_t dd3, + uint16x8_t d0, uint16x8_t d1, + uint16x8_t d2, uint16x8_t d3, + const int16x8_t round_offset, + uint8x8_t *d0_u8, uint8x8_t *d1_u8, + uint8x8_t *d2_u8, uint8x8_t *d3_u8) { + uint16x8_t avg0 = vhaddq_u16(dd0, d0); + uint16x8_t avg1 = vhaddq_u16(dd1, d1); + uint16x8_t avg2 = vhaddq_u16(dd2, d2); + uint16x8_t avg3 = vhaddq_u16(dd3, d3); + + int16x8_t dst0 = vsubq_s16(vreinterpretq_s16_u16(avg0), round_offset); + int16x8_t dst1 = vsubq_s16(vreinterpretq_s16_u16(avg1), round_offset); + int16x8_t dst2 = vsubq_s16(vreinterpretq_s16_u16(avg2), round_offset); + int16x8_t dst3 = vsubq_s16(vreinterpretq_s16_u16(avg3), round_offset); + + *d0_u8 = vqrshrun_n_s16(dst0, FILTER_BITS - ROUND0_BITS); + *d1_u8 = vqrshrun_n_s16(dst1, FILTER_BITS - ROUND0_BITS); + *d2_u8 = vqrshrun_n_s16(dst2, FILTER_BITS - ROUND0_BITS); + *d3_u8 = vqrshrun_n_s16(dst3, FILTER_BITS - ROUND0_BITS); +} + +static INLINE uint16x4_t +convolve6_4_2d_v(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x8_t y_filter, const int32x4_t offset_const) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum = offset_const; + // Filter values at indices 0 and 7 are 0. + sum = vmlal_lane_s16(sum, s0, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s3, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 2); + + return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS); +} + +static INLINE uint16x8_t +convolve6_8_2d_v(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t y_filter, const int32x4_t offset_const) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum0 = offset_const; + // Filter values at indices 0 and 7 are 0. + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s0), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 2); + + int32x4_t sum1 = offset_const; + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s0), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 2); + + return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS), + vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS)); +} + +static INLINE void dist_wtd_convolve_2d_vert_6tap_dist_wtd_avg_neon( + int16_t *src_ptr, const int src_stride, uint8_t *dst8_ptr, int dst8_stride, + ConvolveParams *conv_params, const int16x8_t y_filter, int h, int w) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int32x4_t offset_const = vdupq_n_s32(1 << offset_bits); + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + const uint16_t fwd_offset = conv_params->fwd_offset; + const uint16_t bck_offset = conv_params->bck_offset; + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + + if (w == 4) { + int16x4_t s0, s1, s2, s3, s4; + load_s16_4x5(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4); + src_ptr += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x4_t s5, s6, s7, s8; + load_s16_4x4(src_ptr, src_stride, &s5, &s6, &s7, &s8); + + uint16x4_t d0 = + convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + uint16x4_t d1 = + convolve6_4_2d_v(s1, s2, s3, s4, s5, s6, y_filter, offset_const); + uint16x4_t d2 = + convolve6_4_2d_v(s2, s3, s4, s5, s6, s7, y_filter, offset_const); + uint16x4_t d3 = + convolve6_4_2d_v(s3, s4, s5, s6, s7, s8, y_filter, offset_const); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01_u8, d23_u8; + compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d01_u8, &d23_u8); + + store_u8x4_strided_x2(dst8_ptr + 0 * dst8_stride, dst8_stride, d01_u8); + store_u8x4_strided_x2(dst8_ptr + 2 * dst8_stride, dst8_stride, d23_u8); + dst8_ptr += 4 * dst8_stride; + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + int16x4_t s5 = vld1_s16(src_ptr); + + uint16x4_t d0 = + convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + + uint16x4_t dd0 = vld1_u16(dst_ptr); + + uint8x8_t d01_u8; + compute_dist_wtd_avg_4x1(dd0, d0, fwd_offset, bck_offset, + vget_low_s16(round_offset_vec), &d01_u8); + + store_u8_4x1(dst8_ptr, d01_u8); + dst8_ptr += dst8_stride; + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + } else { + do { + int16_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x8_t s5, s6, s7, s8; + load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x8_t d0 = + convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + uint16x8_t d1 = + convolve6_8_2d_v(s1, s2, s3, s4, s5, s6, y_filter, offset_const); + uint16x8_t d2 = + convolve6_8_2d_v(s2, s3, s4, s5, s6, s7, y_filter, offset_const); + uint16x8_t d3 = + convolve6_8_2d_v(s3, s4, s5, s6, s7, s8, y_filter, offset_const); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d0_u8, &d1_u8, + &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + d_u8 += 4 * dst8_stride; + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s5 = vld1q_s16(s); + + uint16x8_t d0 = + convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_dist_wtd_avg_8x1(dd0, d0, fwd_offset, bck_offset, + round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + d_u8 += dst8_stride; + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + dst8_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE void dist_wtd_convolve_2d_vert_6tap_avg_neon( + int16_t *src_ptr, const int src_stride, uint8_t *dst8_ptr, int dst8_stride, + ConvolveParams *conv_params, const int16x8_t y_filter, int h, int w) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int32x4_t offset_const = vdupq_n_s32(1 << offset_bits); + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + + if (w == 4) { + int16x4_t s0, s1, s2, s3, s4; + load_s16_4x5(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4); + src_ptr += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x4_t s5, s6, s7, s8; + load_s16_4x4(src_ptr, src_stride, &s5, &s6, &s7, &s8); + + uint16x4_t d0 = + convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + uint16x4_t d1 = + convolve6_4_2d_v(s1, s2, s3, s4, s5, s6, y_filter, offset_const); + uint16x4_t d2 = + convolve6_4_2d_v(s2, s3, s4, s5, s6, s7, y_filter, offset_const); + uint16x4_t d3 = + convolve6_4_2d_v(s3, s4, s5, s6, s7, s8, y_filter, offset_const); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01_u8, d23_u8; + compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d01_u8, &d23_u8); + + store_u8x4_strided_x2(dst8_ptr + 0 * dst8_stride, dst8_stride, d01_u8); + store_u8x4_strided_x2(dst8_ptr + 2 * dst8_stride, dst8_stride, d23_u8); + dst8_ptr += 4 * dst8_stride; + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + int16x4_t s5 = vld1_s16(src_ptr); + + uint16x4_t d0 = + convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + + uint16x4_t dd0 = vld1_u16(dst_ptr); + + uint8x8_t d01_u8; + compute_basic_avg_4x1(dd0, d0, vget_low_s16(round_offset_vec), &d01_u8); + + store_u8_4x1(dst8_ptr, d01_u8); + dst8_ptr += dst8_stride; + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + } else { + do { + int16_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x8_t s5, s6, s7, s8; + load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x8_t d0 = + convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + uint16x8_t d1 = + convolve6_8_2d_v(s1, s2, s3, s4, s5, s6, y_filter, offset_const); + uint16x8_t d2 = + convolve6_8_2d_v(s2, s3, s4, s5, s6, s7, y_filter, offset_const); + uint16x8_t d3 = + convolve6_8_2d_v(s3, s4, s5, s6, s7, s8, y_filter, offset_const); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + d_u8 += 4 * dst8_stride; + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s5 = vld1q_s16(s); + + uint16x8_t d0 = + convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_basic_avg_8x1(dd0, d0, round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + d_u8 += dst8_stride; + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + dst8_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE void dist_wtd_convolve_2d_vert_6tap_neon( + int16_t *src_ptr, const int src_stride, ConvolveParams *conv_params, + const int16x8_t y_filter, int h, int w) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int32x4_t offset_const = vdupq_n_s32(1 << offset_bits); + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + + if (w == 4) { + int16x4_t s0, s1, s2, s3, s4; + load_s16_4x5(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4); + src_ptr += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x4_t s5, s6, s7, s8; + load_s16_4x4(src_ptr, src_stride, &s5, &s6, &s7, &s8); + + uint16x4_t d0 = + convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + uint16x4_t d1 = + convolve6_4_2d_v(s1, s2, s3, s4, s5, s6, y_filter, offset_const); + uint16x4_t d2 = + convolve6_4_2d_v(s2, s3, s4, s5, s6, s7, y_filter, offset_const); + uint16x4_t d3 = + convolve6_4_2d_v(s3, s4, s5, s6, s7, s8, y_filter, offset_const); + + store_u16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + int16x4_t s5 = vld1_s16(src_ptr); + + uint16x4_t d0 = + convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + + vst1_u16(dst_ptr, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + } else { + do { + int16_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + int height = h; + + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x8_t s5, s6, s7, s8; + load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x8_t d0 = + convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + uint16x8_t d1 = + convolve6_8_2d_v(s1, s2, s3, s4, s5, s6, y_filter, offset_const); + uint16x8_t d2 = + convolve6_8_2d_v(s2, s3, s4, s5, s6, s7, y_filter, offset_const); + uint16x8_t d3 = + convolve6_8_2d_v(s3, s4, s5, s6, s7, s8, y_filter, offset_const); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s5 = vld1q_s16(s); + + uint16x8_t d0 = + convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_const); + + vst1q_u16(d, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE uint16x4_t +convolve8_4_2d_v(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, + const int16x8_t y_filter, const int32x4_t offset_const) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum = offset_const; + sum = vmlal_lane_s16(sum, s0, y_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, y_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, y_filter_4_7, 3); + + return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS); +} + +static INLINE uint16x8_t +convolve8_8_2d_v(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t y_filter, const int32x4_t offset_const) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum0 = offset_const; + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s0), y_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_4_7, 3); + + int32x4_t sum1 = offset_const; + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s0), y_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_4_7, 3); + + return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS), + vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS)); +} + +static INLINE void dist_wtd_convolve_2d_vert_8tap_dist_wtd_avg_neon( + int16_t *src_ptr, const int src_stride, uint8_t *dst8_ptr, int dst8_stride, + ConvolveParams *conv_params, const int16x8_t y_filter, int h, int w) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int32x4_t offset_const = vdupq_n_s32(1 << offset_bits); + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + const uint16_t fwd_offset = conv_params->fwd_offset; + const uint16_t bck_offset = conv_params->bck_offset; + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + + if (w == 4) { + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + src_ptr += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x4_t s7, s8, s9, s10; + load_s16_4x4(src_ptr, src_stride, &s7, &s8, &s9, &s10); + + uint16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + offset_const); + uint16x4_t d1 = convolve8_4_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + offset_const); + uint16x4_t d2 = convolve8_4_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + offset_const); + uint16x4_t d3 = convolve8_4_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_const); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01_u8, d23_u8; + compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d01_u8, &d23_u8); + + store_u8x4_strided_x2(dst8_ptr + 0 * dst8_stride, dst8_stride, d01_u8); + store_u8x4_strided_x2(dst8_ptr + 2 * dst8_stride, dst8_stride, d23_u8); + dst8_ptr += 4 * dst8_stride; + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + int16x4_t s7 = vld1_s16(src_ptr); + + uint16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + offset_const); + + uint16x4_t dd0 = vld1_u16(dst_ptr); + + uint8x8_t d01_u8; + compute_dist_wtd_avg_4x1(dd0, d0, fwd_offset, bck_offset, + vget_low_s16(round_offset_vec), &d01_u8); + + store_u8_4x1(dst8_ptr, d01_u8); + dst8_ptr += dst8_stride; + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + } else { + do { + int16_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_const); + uint16x8_t d1 = convolve8_8_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, offset_const); + uint16x8_t d2 = convolve8_8_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, offset_const); + uint16x8_t d3 = convolve8_8_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_const); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d0_u8, &d1_u8, + &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + d_u8 += 4 * dst8_stride; + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s7 = vld1q_s16(s); + + uint16x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_const); + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_dist_wtd_avg_8x1(dd0, d0, fwd_offset, bck_offset, + round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + d_u8 += dst8_stride; + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + dst8_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE void dist_wtd_convolve_2d_vert_8tap_avg_neon( + int16_t *src_ptr, const int src_stride, uint8_t *dst8_ptr, int dst8_stride, + ConvolveParams *conv_params, const int16x8_t y_filter, int h, int w) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int32x4_t offset_const = vdupq_n_s32(1 << offset_bits); + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + + if (w == 4) { + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + src_ptr += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x4_t s7, s8, s9, s10; + load_s16_4x4(src_ptr, src_stride, &s7, &s8, &s9, &s10); + + uint16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + offset_const); + uint16x4_t d1 = convolve8_4_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + offset_const); + uint16x4_t d2 = convolve8_4_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + offset_const); + uint16x4_t d3 = convolve8_4_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_const); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01_u8, d23_u8; + compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d01_u8, &d23_u8); + + store_u8x4_strided_x2(dst8_ptr + 0 * dst8_stride, dst8_stride, d01_u8); + store_u8x4_strided_x2(dst8_ptr + 2 * dst8_stride, dst8_stride, d23_u8); + dst8_ptr += 4 * dst8_stride; + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + int16x4_t s7 = vld1_s16(src_ptr); + + uint16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + offset_const); + + uint16x4_t dd0 = vld1_u16(dst_ptr); + + uint8x8_t d01_u8; + compute_basic_avg_4x1(dd0, d0, vget_low_s16(round_offset_vec), &d01_u8); + + store_u8_4x1(dst8_ptr, d01_u8); + dst8_ptr += dst8_stride; + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + } else { + do { + int16_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int height = h; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_const); + uint16x8_t d1 = convolve8_8_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, offset_const); + uint16x8_t d2 = convolve8_8_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, offset_const); + uint16x8_t d3 = convolve8_8_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_const); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + d_u8 += 4 * dst8_stride; + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s7 = vld1q_s16(s); + + uint16x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_const); + + uint16x8_t dd0 = vld1q_u16(d); + + uint8x8_t d0_u8; + compute_basic_avg_8x1(dd0, d0, round_offset_vec, &d0_u8); + + vst1_u8(d_u8, d0_u8); + d_u8 += dst8_stride; + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + dst8_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE void dist_wtd_convolve_2d_vert_8tap_neon( + int16_t *src_ptr, const int src_stride, ConvolveParams *conv_params, + const int16x8_t y_filter, int h, int w) { + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int32x4_t offset_const = vdupq_n_s32(1 << offset_bits); + + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + + if (w == 4) { + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + src_ptr += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x4_t s7, s8, s9, s10; + load_s16_4x4(src_ptr, src_stride, &s7, &s8, &s9, &s10); + + uint16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + offset_const); + uint16x4_t d1 = convolve8_4_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + offset_const); + uint16x4_t d2 = convolve8_4_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + offset_const); + uint16x4_t d3 = convolve8_4_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_const); + + store_u16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + int16x4_t s7 = vld1_s16(src_ptr); + + uint16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + offset_const); + + vst1_u16(dst_ptr, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + } else { + do { + int16_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + int height = h; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_const); + uint16x8_t d1 = convolve8_8_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, offset_const); + uint16x8_t d2 = convolve8_8_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, offset_const); + uint16x8_t d3 = convolve8_8_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_const); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s7 = vld1q_s16(s); + + uint16x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_const); + + vst1q_u16(d, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +#endif // AOM_AV1_COMMON_ARM_COMPOUND_CONVOLVE_NEON_H_ diff --git a/third_party/aom/av1/common/arm/compound_convolve_neon_dotprod.c b/third_party/aom/av1/common/arm/compound_convolve_neon_dotprod.c new file mode 100644 index 0000000000..3aeffbb0e6 --- /dev/null +++ b/third_party/aom/av1/common/arm/compound_convolve_neon_dotprod.c @@ -0,0 +1,675 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "aom_dsp/arm/mem_neon.h" +#include "av1/common/arm/compound_convolve_neon.h" +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +DECLARE_ALIGNED(16, static const uint8_t, dot_prod_permute_tbl[48]) = { + 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, + 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, + 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 +}; + +static INLINE int16x4_t convolve4_4_2d_h(uint8x16_t samples, + const int8x8_t x_filter, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16_t permute_tbl) { + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + int8x16_t clamped_samples = + vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + int8x16_t permuted_samples = vqtbl1q_s8(clamped_samples, permute_tbl); + + // Accumulate dot product into 'correction' to account for range clamp. + int32x4_t sum = vdotq_lane_s32(correction, permuted_samples, x_filter, 0); + + // We halved the convolution filter values so -1 from the right shift. + return vshrn_n_s32(sum, ROUND0_BITS - 1); +} + +static INLINE int16x8_t convolve8_8_2d_h(uint8x16_t samples, + const int8x8_t x_filter, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16x3_t permute_tbl) { + int8x16_t clamped_samples, permuted_samples[3]; + int32x4_t sum[2]; + + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. */ + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_s8(clamped_samples, permute_tbl.val[2]); + + // Accumulate dot product into 'correction' to account for range clamp. + // First 4 output values. + sum[0] = vdotq_lane_s32(correction, permuted_samples[0], x_filter, 0); + sum[0] = vdotq_lane_s32(sum[0], permuted_samples[1], x_filter, 1); + // Second 4 output values. + sum[1] = vdotq_lane_s32(correction, permuted_samples[1], x_filter, 0); + sum[1] = vdotq_lane_s32(sum[1], permuted_samples[2], x_filter, 1); + + // Narrow and re-pack. + // We halved the convolution filter values so -1 from the right shift. + return vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1), + vshrn_n_s32(sum[1], ROUND0_BITS - 1)); +} + +static INLINE void dist_wtd_convolve_2d_horiz_neon_dotprod( + const uint8_t *src, int src_stride, int16_t *im_block, const int im_stride, + const int16_t *x_filter_ptr, const int im_h, int w) { + const int bd = 8; + const int32_t horiz_const = (1 << (bd + FILTER_BITS - 2)); + // Dot product constants and other shims. + const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr); + const int32_t correction_s32 = + vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1)); + // Fold horiz_const into the dot-product filter correction constant. The + // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non- + // rounding shifts - which are generally faster than rounding shifts on + // modern CPUs. (The extra -1 is needed because we halved the filter values.) + const int32x4_t correction = vdupq_n_s32(correction_s32 + horiz_const + + (1 << ((ROUND0_BITS - 1) - 1))); + const uint8x16_t range_limit = vdupq_n_u8(128); + + const uint8_t *src_ptr = src; + int16_t *dst_ptr = im_block; + int dst_stride = im_stride; + int height = im_h; + + if (w == 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = + convolve4_4_2d_h(s0, x_filter, correction, range_limit, permute_tbl); + int16x4_t d1 = + convolve4_4_2d_h(s1, x_filter, correction, range_limit, permute_tbl); + int16x4_t d2 = + convolve4_4_2d_h(s2, x_filter, correction, range_limit, permute_tbl); + int16x4_t d3 = + convolve4_4_2d_h(s3, x_filter, correction, range_limit, permute_tbl); + + store_s16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + uint8x16_t s0 = vld1q_u8(src_ptr); + + int16x4_t d0 = + convolve4_4_2d_h(s0, x_filter, correction, range_limit, permute_tbl); + + vst1_s16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + int16x8_t d0 = convolve8_8_2d_h(s0, x_filter, correction, range_limit, + permute_tbl); + int16x8_t d1 = convolve8_8_2d_h(s1, x_filter, correction, range_limit, + permute_tbl); + int16x8_t d2 = convolve8_8_2d_h(s2, x_filter, correction, range_limit, + permute_tbl); + int16x8_t d3 = convolve8_8_2d_h(s3, x_filter, correction, range_limit, + permute_tbl); + + store_s16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width > 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0 = vld1q_u8(s); + + int16x8_t d0 = convolve8_8_2d_h(s0, x_filter, correction, range_limit, + permute_tbl); + + vst1q_s16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width > 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +void av1_dist_wtd_convolve_2d_neon_dotprod( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, const int subpel_x_qn, + const int subpel_y_qn, ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + DECLARE_ALIGNED(16, int16_t, + im_block[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE]); + + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 6 ? 6 : y_filter_taps; + + const int im_h = h + clamped_y_taps - 1; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + dist_wtd_convolve_2d_horiz_neon_dotprod(src_ptr, src_stride, im_block, + im_stride, x_filter_ptr, im_h, w); + + if (clamped_y_taps == 6) { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_2d_vert_6tap_dist_wtd_avg_neon( + im_block, im_stride, dst8, dst8_stride, conv_params, y_filter, h, + w); + } else { + dist_wtd_convolve_2d_vert_6tap_avg_neon(im_block, im_stride, dst8, + dst8_stride, conv_params, + y_filter, h, w); + } + } else { + dist_wtd_convolve_2d_vert_6tap_neon(im_block, im_stride, conv_params, + y_filter, h, w); + } + } else { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_2d_vert_8tap_dist_wtd_avg_neon( + im_block, im_stride, dst8, dst8_stride, conv_params, y_filter, h, + w); + } else { + dist_wtd_convolve_2d_vert_8tap_avg_neon(im_block, im_stride, dst8, + dst8_stride, conv_params, + y_filter, h, w); + } + } else { + dist_wtd_convolve_2d_vert_8tap_neon(im_block, im_stride, conv_params, + y_filter, h, w); + } + } +} + +static INLINE uint16x4_t convolve4_4_x(uint8x16_t samples, + const int8x8_t x_filter, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16_t permute_tbl) { + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + int8x16_t clamped_samples = + vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + int8x16_t permuted_samples = vqtbl1q_s8(clamped_samples, permute_tbl); + + // Accumulate dot product into 'correction' to account for range clamp. + int32x4_t sum = vdotq_lane_s32(correction, permuted_samples, x_filter, 0); + + // We halved the convolution filter values so -1 from the right shift. + return vreinterpret_u16_s16(vshrn_n_s32(sum, ROUND0_BITS - 1)); +} + +static INLINE uint16x8_t convolve8_8_x(uint8x16_t samples, + const int8x8_t x_filter, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16x3_t permute_tbl) { + int8x16_t clamped_samples, permuted_samples[3]; + int32x4_t sum[2]; + + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. */ + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_s8(clamped_samples, permute_tbl.val[2]); + + // Accumulate dot product into 'correction' to account for range clamp. + // First 4 output values. + sum[0] = vdotq_lane_s32(correction, permuted_samples[0], x_filter, 0); + sum[0] = vdotq_lane_s32(sum[0], permuted_samples[1], x_filter, 1); + // Second 4 output values. + sum[1] = vdotq_lane_s32(correction, permuted_samples[1], x_filter, 0); + sum[1] = vdotq_lane_s32(sum[1], permuted_samples[2], x_filter, 1); + + // Narrow and re-pack. + // We halved the convolution filter values so -1 from the right shift. + int16x8_t res = vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1), + vshrn_n_s32(sum[1], ROUND0_BITS - 1)); + return vreinterpretq_u16_s16(res); +} + +static INLINE void dist_wtd_convolve_x_dist_wtd_avg_neon_dotprod( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + const uint16_t fwd_offset = conv_params->fwd_offset; + const uint16_t bck_offset = conv_params->bck_offset; + + // Horizontal filter. + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr); + + // Dot-product constants and other shims. + const uint8x16_t range_limit = vdupq_n_u8(128); + const int32_t correction_s32 = + vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1)); + // Fold round_offset into the dot-product filter correction constant. The + // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non- + // rounding shifts - which are generally faster than rounding shifts on + // modern CPUs. (The extra -1 is needed because we halved the filter values.) + int32x4_t correction = + vdupq_n_s32(correction_s32 + (round_offset << (ROUND0_BITS - 1)) + + (1 << ((ROUND0_BITS - 1) - 1))); + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - horiz_offset; + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + uint8_t *dst8_ptr = dst8; + int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = + convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl); + uint16x4_t d1 = + convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl); + uint16x4_t d2 = + convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl); + uint16x4_t d3 = + convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01_u8, d23_u8; + compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d01_u8, &d23_u8); + + store_u8x4_strided_x2(dst8_ptr + 0 * dst8_stride, dst8_stride, d01_u8); + store_u8x4_strided_x2(dst8_ptr + 2 * dst8_stride, dst8_stride, d23_u8); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + dst8_ptr += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1); + + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x8_t d0 = + convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl); + uint16x8_t d1 = + convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl); + uint16x8_t d2 = + convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl); + uint16x8_t d3 = + convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d0_u8, &d1_u8, + &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + dst8_ptr += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE void dist_wtd_convolve_x_avg_neon_dotprod( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + + // Horizontal filter. + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr); + + // Dot-product constants and other shims. + const uint8x16_t range_limit = vdupq_n_u8(128); + const int32_t correction_s32 = + vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1)); + // Fold round_offset into the dot-product filter correction constant. The + // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non- + // rounding shifts - which are generally faster than rounding shifts on + // modern CPUs. (The extra -1 is needed because we halved the filter values.) + int32x4_t correction = + vdupq_n_s32(correction_s32 + (round_offset << (ROUND0_BITS - 1)) + + (1 << ((ROUND0_BITS - 1) - 1))); + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - horiz_offset; + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + uint8_t *dst8_ptr = dst8; + int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = + convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl); + uint16x4_t d1 = + convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl); + uint16x4_t d2 = + convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl); + uint16x4_t d3 = + convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01_u8, d23_u8; + compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d01_u8, &d23_u8); + + store_u8x4_strided_x2(dst8_ptr + 0 * dst8_stride, dst8_stride, d01_u8); + store_u8x4_strided_x2(dst8_ptr + 2 * dst8_stride, dst8_stride, d23_u8); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + dst8_ptr += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1); + + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x8_t d0 = + convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl); + uint16x8_t d1 = + convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl); + uint16x8_t d2 = + convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl); + uint16x8_t d3 = + convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + dst8_ptr += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE void dist_wtd_convolve_x_neon_dotprod( + const uint8_t *src, int src_stride, int w, int h, + const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + + // Horizontal filter. + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr); + + // Dot-product constants and other shims. + const uint8x16_t range_limit = vdupq_n_u8(128); + const int32_t correction_s32 = + vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1)); + // Fold round_offset into the dot-product filter correction constant. The + // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non- + // rounding shifts - which are generally faster than rounding shifts on + // modern CPUs. (The extra -1 is needed because we halved the filter values.) + int32x4_t correction = + vdupq_n_s32(correction_s32 + (round_offset << (ROUND0_BITS - 1)) + + (1 << ((ROUND0_BITS - 1) - 1))); + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - horiz_offset; + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = + convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl); + uint16x4_t d1 = + convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl); + uint16x4_t d2 = + convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl); + uint16x4_t d3 = + convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl); + + store_u16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1); + + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x8_t d0 = + convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl); + uint16x8_t d1 = + convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl); + uint16x8_t d2 = + convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl); + uint16x8_t d3 = + convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +void av1_dist_wtd_convolve_x_neon_dotprod( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_x_dist_wtd_avg_neon_dotprod( + src, src_stride, dst8, dst8_stride, w, h, filter_params_x, + subpel_x_qn, conv_params); + } else { + dist_wtd_convolve_x_avg_neon_dotprod(src, src_stride, dst8, dst8_stride, + w, h, filter_params_x, subpel_x_qn, + conv_params); + } + } else { + dist_wtd_convolve_x_neon_dotprod(src, src_stride, w, h, filter_params_x, + subpel_x_qn, conv_params); + } +} diff --git a/third_party/aom/av1/common/arm/compound_convolve_neon_i8mm.c b/third_party/aom/av1/common/arm/compound_convolve_neon_i8mm.c new file mode 100644 index 0000000000..a72af9e36a --- /dev/null +++ b/third_party/aom/av1/common/arm/compound_convolve_neon_i8mm.c @@ -0,0 +1,614 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "aom_dsp/arm/mem_neon.h" +#include "av1/common/arm/compound_convolve_neon.h" +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +DECLARE_ALIGNED(16, static const uint8_t, dot_prod_permute_tbl[48]) = { + 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, + 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, + 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 +}; + +static INLINE int16x4_t convolve4_4_2d_h(uint8x16_t samples, + const int8x8_t x_filter, + const uint8x16_t permute_tbl, + const int32x4_t horiz_const) { + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + uint8x16_t permuted_samples = vqtbl1q_u8(samples, permute_tbl); + + // First 4 output values. + int32x4_t sum = vusdotq_lane_s32(horiz_const, permuted_samples, x_filter, 0); + + // We halved the convolution filter values so -1 from the right shift. + return vshrn_n_s32(sum, ROUND0_BITS - 1); +} + +static INLINE int16x8_t convolve8_8_2d_h(uint8x16_t samples, + const int8x8_t x_filter, + const uint8x16x3_t permute_tbl, + const int32x4_t horiz_const) { + uint8x16_t permuted_samples[3]; + int32x4_t sum[2]; + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_u8(samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_u8(samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_u8(samples, permute_tbl.val[2]); + + // First 4 output values. + sum[0] = vusdotq_lane_s32(horiz_const, permuted_samples[0], x_filter, 0); + sum[0] = vusdotq_lane_s32(sum[0], permuted_samples[1], x_filter, 1); + // Second 4 output values. + sum[1] = vusdotq_lane_s32(horiz_const, permuted_samples[1], x_filter, 0); + sum[1] = vusdotq_lane_s32(sum[1], permuted_samples[2], x_filter, 1); + + // Narrow and re-pack. + // We halved the convolution filter values so -1 from the right shift. + return vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1), + vshrn_n_s32(sum[1], ROUND0_BITS - 1)); +} + +static INLINE void dist_wtd_convolve_2d_horiz_neon_i8mm( + const uint8_t *src, int src_stride, int16_t *im_block, const int im_stride, + const int16_t *x_filter_ptr, const int im_h, int w) { + const int bd = 8; + // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // (The extra -1 is needed because we halved the filter values.) + const int32x4_t horiz_const = vdupq_n_s32((1 << (bd + FILTER_BITS - 2)) + + (1 << ((ROUND0_BITS - 1) - 1))); + + const uint8_t *src_ptr = src; + int16_t *dst_ptr = im_block; + int dst_stride = im_stride; + int height = im_h; + + if (w == 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = convolve4_4_2d_h(s0, x_filter, permute_tbl, horiz_const); + int16x4_t d1 = convolve4_4_2d_h(s1, x_filter, permute_tbl, horiz_const); + int16x4_t d2 = convolve4_4_2d_h(s2, x_filter, permute_tbl, horiz_const); + int16x4_t d3 = convolve4_4_2d_h(s3, x_filter, permute_tbl, horiz_const); + + store_s16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + uint8x16_t s0 = vld1q_u8(src_ptr); + + int16x4_t d0 = convolve4_4_2d_h(s0, x_filter, permute_tbl, horiz_const); + + vst1_s16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + int16x8_t d0 = convolve8_8_2d_h(s0, x_filter, permute_tbl, horiz_const); + int16x8_t d1 = convolve8_8_2d_h(s1, x_filter, permute_tbl, horiz_const); + int16x8_t d2 = convolve8_8_2d_h(s2, x_filter, permute_tbl, horiz_const); + int16x8_t d3 = convolve8_8_2d_h(s3, x_filter, permute_tbl, horiz_const); + + store_s16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width > 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0 = vld1q_u8(s); + + int16x8_t d0 = convolve8_8_2d_h(s0, x_filter, permute_tbl, horiz_const); + + vst1q_s16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width > 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +void av1_dist_wtd_convolve_2d_neon_i8mm( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, const int subpel_x_qn, + const int subpel_y_qn, ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + DECLARE_ALIGNED(16, int16_t, + im_block[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE]); + + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 6 ? 6 : y_filter_taps; + + const int im_h = h + clamped_y_taps - 1; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + dist_wtd_convolve_2d_horiz_neon_i8mm(src_ptr, src_stride, im_block, im_stride, + x_filter_ptr, im_h, w); + + if (clamped_y_taps == 6) { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_2d_vert_6tap_dist_wtd_avg_neon( + im_block, im_stride, dst8, dst8_stride, conv_params, y_filter, h, + w); + } else { + dist_wtd_convolve_2d_vert_6tap_avg_neon(im_block, im_stride, dst8, + dst8_stride, conv_params, + y_filter, h, w); + } + } else { + dist_wtd_convolve_2d_vert_6tap_neon(im_block, im_stride, conv_params, + y_filter, h, w); + } + } else { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_2d_vert_8tap_dist_wtd_avg_neon( + im_block, im_stride, dst8, dst8_stride, conv_params, y_filter, h, + w); + } else { + dist_wtd_convolve_2d_vert_8tap_avg_neon(im_block, im_stride, dst8, + dst8_stride, conv_params, + y_filter, h, w); + } + } else { + dist_wtd_convolve_2d_vert_8tap_neon(im_block, im_stride, conv_params, + y_filter, h, w); + } + } +} + +static INLINE uint16x4_t convolve4_4_x(uint8x16_t samples, + const int8x8_t x_filter, + const uint8x16_t permute_tbl, + const int32x4_t round_offset) { + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + uint8x16_t permuted_samples = vqtbl1q_u8(samples, permute_tbl); + + // First 4 output values. + int32x4_t sum = vusdotq_lane_s32(round_offset, permuted_samples, x_filter, 0); + + // We halved the convolution filter values so -1 from the right shift. + return vreinterpret_u16_s16(vshrn_n_s32(sum, ROUND0_BITS - 1)); +} + +static INLINE uint16x8_t convolve8_8_x(uint8x16_t samples, + const int8x8_t x_filter, + const uint8x16x3_t permute_tbl, + const int32x4_t round_offset) { + uint8x16_t permuted_samples[3]; + int32x4_t sum[2]; + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_u8(samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_u8(samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_u8(samples, permute_tbl.val[2]); + + // First 4 output values. + sum[0] = vusdotq_lane_s32(round_offset, permuted_samples[0], x_filter, 0); + sum[0] = vusdotq_lane_s32(sum[0], permuted_samples[1], x_filter, 1); + // Second 4 output values. + sum[1] = vusdotq_lane_s32(round_offset, permuted_samples[1], x_filter, 0); + sum[1] = vusdotq_lane_s32(sum[1], permuted_samples[2], x_filter, 1); + + // Narrow and re-pack. + // We halved the convolution filter values so -1 from the right shift. + int16x8_t res = vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1), + vshrn_n_s32(sum[1], ROUND0_BITS - 1)); + return vreinterpretq_u16_s16(res); +} + +static INLINE void dist_wtd_convolve_x_dist_wtd_avg_neon_i8mm( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // (The extra -1 is needed because we halved the filter values.) + const int32x4_t round_offset_shim = vdupq_n_s32( + (round_offset << (ROUND0_BITS - 1)) + (1 << ((ROUND0_BITS - 1) - 1))); + + const uint16_t fwd_offset = conv_params->fwd_offset; + const uint16_t bck_offset = conv_params->bck_offset; + + // Horizontal filter. + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - horiz_offset; + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + uint8_t *dst8_ptr = dst8; + int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = + convolve4_4_x(s0, x_filter, permute_tbl, round_offset_shim); + uint16x4_t d1 = + convolve4_4_x(s1, x_filter, permute_tbl, round_offset_shim); + uint16x4_t d2 = + convolve4_4_x(s2, x_filter, permute_tbl, round_offset_shim); + uint16x4_t d3 = + convolve4_4_x(s3, x_filter, permute_tbl, round_offset_shim); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01_u8, d23_u8; + compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d01_u8, &d23_u8); + + store_u8x4_strided_x2(dst8_ptr + 0 * dst8_stride, dst8_stride, d01_u8); + store_u8x4_strided_x2(dst8_ptr + 2 * dst8_stride, dst8_stride, d23_u8); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + dst8_ptr += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1); + + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x8_t d0 = + convolve8_8_x(s0, x_filter, permute_tbl, round_offset_shim); + uint16x8_t d1 = + convolve8_8_x(s1, x_filter, permute_tbl, round_offset_shim); + uint16x8_t d2 = + convolve8_8_x(s2, x_filter, permute_tbl, round_offset_shim); + uint16x8_t d3 = + convolve8_8_x(s3, x_filter, permute_tbl, round_offset_shim); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset, + bck_offset, round_offset_vec, &d0_u8, &d1_u8, + &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + dst8_ptr += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE void dist_wtd_convolve_x_avg_neon_i8mm( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + const int16x8_t round_offset_vec = vdupq_n_s16(round_offset); + // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // (The extra -1 is needed because we halved the filter values.) + const int32x4_t round_offset_shim = vdupq_n_s32( + (round_offset << (ROUND0_BITS - 1)) + (1 << ((ROUND0_BITS - 1) - 1))); + + // Horizontal filter. + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - horiz_offset; + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + uint8_t *dst8_ptr = dst8; + int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = + convolve4_4_x(s0, x_filter, permute_tbl, round_offset_shim); + uint16x4_t d1 = + convolve4_4_x(s1, x_filter, permute_tbl, round_offset_shim); + uint16x4_t d2 = + convolve4_4_x(s2, x_filter, permute_tbl, round_offset_shim); + uint16x4_t d3 = + convolve4_4_x(s3, x_filter, permute_tbl, round_offset_shim); + + uint16x4_t dd0, dd1, dd2, dd3; + load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d01_u8, d23_u8; + compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d01_u8, &d23_u8); + + store_u8x4_strided_x2(dst8_ptr + 0 * dst8_stride, dst8_stride, d01_u8); + store_u8x4_strided_x2(dst8_ptr + 2 * dst8_stride, dst8_stride, d23_u8); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + dst8_ptr += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1); + + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + uint8_t *d_u8 = dst8_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x8_t d0 = + convolve8_8_x(s0, x_filter, permute_tbl, round_offset_shim); + uint16x8_t d1 = + convolve8_8_x(s1, x_filter, permute_tbl, round_offset_shim); + uint16x8_t d2 = + convolve8_8_x(s2, x_filter, permute_tbl, round_offset_shim); + uint16x8_t d3 = + convolve8_8_x(s3, x_filter, permute_tbl, round_offset_shim); + + uint16x8_t dd0, dd1, dd2, dd3; + load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); + + uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8; + compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, + round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8); + + store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8); + + s += 8; + d += 8; + d_u8 += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + dst8_ptr += 4 * dst8_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE void dist_wtd_convolve_x_neon_i8mm( + const uint8_t *src, int src_stride, int w, int h, + const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(w % 4 == 0); + assert(h % 4 == 0); + + const int bd = 8; + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // (The extra -1 is needed because we halved the filter values.) + const int32x4_t round_offset_shim = vdupq_n_s32( + (round_offset << (ROUND0_BITS - 1)) + (1 << ((ROUND0_BITS - 1) - 1))); + + // Horizontal filter. + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - horiz_offset; + CONV_BUF_TYPE *dst_ptr = conv_params->dst; + int dst_stride = conv_params->dst_stride; + int height = h; + + if (w == 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = + convolve4_4_x(s0, x_filter, permute_tbl, round_offset_shim); + uint16x4_t d1 = + convolve4_4_x(s1, x_filter, permute_tbl, round_offset_shim); + uint16x4_t d2 = + convolve4_4_x(s2, x_filter, permute_tbl, round_offset_shim); + uint16x4_t d3 = + convolve4_4_x(s3, x_filter, permute_tbl, round_offset_shim); + + store_u16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1); + + do { + const uint8_t *s = src_ptr; + CONV_BUF_TYPE *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x8_t d0 = + convolve8_8_x(s0, x_filter, permute_tbl, round_offset_shim); + uint16x8_t d1 = + convolve8_8_x(s1, x_filter, permute_tbl, round_offset_shim); + uint16x8_t d2 = + convolve8_8_x(s2, x_filter, permute_tbl, round_offset_shim); + uint16x8_t d3 = + convolve8_8_x(s3, x_filter, permute_tbl, round_offset_shim); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +void av1_dist_wtd_convolve_x_neon_i8mm( + const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params) { + if (conv_params->do_average) { + if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) { + dist_wtd_convolve_x_dist_wtd_avg_neon_i8mm( + src, src_stride, dst8, dst8_stride, w, h, filter_params_x, + subpel_x_qn, conv_params); + } else { + dist_wtd_convolve_x_avg_neon_i8mm(src, src_stride, dst8, dst8_stride, w, + h, filter_params_x, subpel_x_qn, + conv_params); + } + } else { + dist_wtd_convolve_x_neon_i8mm(src, src_stride, w, h, filter_params_x, + subpel_x_qn, conv_params); + } +} diff --git a/third_party/aom/av1/common/arm/convolve_neon.c b/third_party/aom/av1/common/arm/convolve_neon.c new file mode 100644 index 0000000000..10442f9bf9 --- /dev/null +++ b/third_party/aom/av1/common/arm/convolve_neon.c @@ -0,0 +1,1659 @@ +/* + * + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <assert.h> +#include <arm_neon.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" +#include "av1/common/arm/convolve_neon.h" + +static INLINE int16x4_t convolve12_4_x(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, + const int16x4_t s8, const int16x4_t s9, + const int16x4_t s10, const int16x4_t s11, + const int16x8_t x_filter_0_7, + const int16x4_t x_filter_8_11, + const int32x4_t horiz_const) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter_0_7); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter_0_7); + + int32x4_t sum = horiz_const; + sum = vmlal_lane_s16(sum, s0, x_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, x_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, x_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, x_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, x_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, x_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, x_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, x_filter_4_7, 3); + sum = vmlal_lane_s16(sum, s8, x_filter_8_11, 0); + sum = vmlal_lane_s16(sum, s9, x_filter_8_11, 1); + sum = vmlal_lane_s16(sum, s10, x_filter_8_11, 2); + sum = vmlal_lane_s16(sum, s11, x_filter_8_11, 3); + + return vqrshrn_n_s32(sum, FILTER_BITS); +} + +static INLINE void convolve_x_sr_12tap_neon(const uint8_t *src_ptr, + int src_stride, uint8_t *dst_ptr, + const int dst_stride, int w, int h, + const int16_t *x_filter_ptr) { + const int16x8_t x_filter_0_7 = vld1q_s16(x_filter_ptr); + const int16x4_t x_filter_8_11 = vld1_s16(x_filter_ptr + 8); + + // A shim of 1 << (ROUND0_BITS - 1) enables us to use a single rounding right + // shift by FILTER_BITS - instead of a first rounding right shift by + // ROUND0_BITS, followed by second rounding right shift by FILTER_BITS - + // ROUND0_BITS. + const int32x4_t horiz_const = vdupq_n_s32(1 << (ROUND0_BITS - 1)); + +#if AOM_ARCH_AARCH64 + do { + const uint8_t *s = src_ptr; + uint8_t *d = dst_ptr; + int width = w; + + uint8x8_t t0, t1, t2, t3; + load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3); + transpose_elems_inplace_u8_8x4(&t0, &t1, &t2, &t3); + + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s1 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s2 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + int16x4_t s3 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3))); + int16x4_t s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s5 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s6 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + int16x4_t s7 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t3))); + + load_u8_8x4(s + 8, src_stride, &t0, &t1, &t2, &t3); + transpose_elems_inplace_u8_8x4(&t0, &t1, &t2, &t3); + + int16x4_t s8 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s9 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s10 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + + s += 11; + + do { + load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3); + transpose_elems_inplace_u8_8x4(&t0, &t1, &t2, &t3); + + int16x4_t s11 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s12 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s13 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + int16x4_t s14 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3))); + + int16x4_t d0 = + convolve12_4_x(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + x_filter_0_7, x_filter_8_11, horiz_const); + int16x4_t d1 = + convolve12_4_x(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + x_filter_0_7, x_filter_8_11, horiz_const); + int16x4_t d2 = + convolve12_4_x(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, + x_filter_0_7, x_filter_8_11, horiz_const); + int16x4_t d3 = + convolve12_4_x(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, + x_filter_0_7, x_filter_8_11, horiz_const); + + transpose_elems_inplace_s16_4x4(&d0, &d1, &d2, &d3); + + uint8x8_t d01 = vqmovun_s16(vcombine_s16(d0, d1)); + uint8x8_t d23 = vqmovun_s16(vcombine_s16(d2, d3)); + + store_u8x4_strided_x2(d, dst_stride, d01); + store_u8x4_strided_x2(d + 2 * dst_stride, dst_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + s += 4; + d += 4; + width -= 4; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; + } while (h != 0); + +#else // !AOM_ARCH_AARCH64 + do { + const uint8_t *s = src_ptr; + uint8_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t t0 = vld1q_u8(s); + int16x8_t tt0 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(t0))); + int16x8_t tt8 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(t0))); + + int16x4_t s0 = vget_low_s16(tt0); + int16x4_t s4 = vget_high_s16(tt0); + int16x4_t s8 = vget_low_s16(tt8); + int16x4_t s12 = vget_high_s16(tt8); + + int16x4_t s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4 + int16x4_t s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5 + int16x4_t s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6 + int16x4_t s5 = vext_s16(s4, s8, 1); // a5 a6 a7 a8 + int16x4_t s6 = vext_s16(s4, s8, 2); // a6 a7 a8 a9 + int16x4_t s7 = vext_s16(s4, s8, 3); // a7 a8 a9 a10 + int16x4_t s9 = vext_s16(s8, s12, 1); // a9 a10 a11 a12 + int16x4_t s10 = vext_s16(s8, s12, 2); // a10 a11 a12 a13 + int16x4_t s11 = vext_s16(s8, s12, 3); // a11 a12 a13 a14 + + int16x4_t d0 = + convolve12_4_x(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + x_filter_0_7, x_filter_8_11, horiz_const); + + uint8x8_t dd0 = vqmovun_s16(vcombine_s16(d0, vdup_n_s16(0))); + + store_u8_4x1(d, dd0); + + s += 4; + d += 4; + width -= 4; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); +#endif // AOM_ARCH_AARCH64 +} + +static INLINE uint8x8_t convolve4_4_x(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t filter, + const int16x4_t horiz_const) { + int16x4_t sum = horiz_const; + sum = vmla_lane_s16(sum, s0, filter, 0); + sum = vmla_lane_s16(sum, s1, filter, 1); + sum = vmla_lane_s16(sum, s2, filter, 2); + sum = vmla_lane_s16(sum, s3, filter, 3); + + // We halved the convolution filter values so - 1 from the right shift. + return vqrshrun_n_s16(vcombine_s16(sum, vdup_n_s16(0)), FILTER_BITS - 1); +} + +static INLINE uint8x8_t convolve8_8_x(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t filter, + const int16x8_t horiz_const) { + const int16x4_t filter_lo = vget_low_s16(filter); + const int16x4_t filter_hi = vget_high_s16(filter); + + int16x8_t sum = horiz_const; + sum = vmlaq_lane_s16(sum, s0, filter_lo, 0); + sum = vmlaq_lane_s16(sum, s1, filter_lo, 1); + sum = vmlaq_lane_s16(sum, s2, filter_lo, 2); + sum = vmlaq_lane_s16(sum, s3, filter_lo, 3); + sum = vmlaq_lane_s16(sum, s4, filter_hi, 0); + sum = vmlaq_lane_s16(sum, s5, filter_hi, 1); + sum = vmlaq_lane_s16(sum, s6, filter_hi, 2); + sum = vmlaq_lane_s16(sum, s7, filter_hi, 3); + + // We halved the convolution filter values so - 1 from the right shift. + return vqrshrun_n_s16(sum, FILTER_BITS - 1); +} + +void av1_convolve_x_sr_neon(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const int subpel_x_qn, + ConvolveParams *conv_params) { + if (w == 2 || h == 2) { + av1_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h, filter_params_x, + subpel_x_qn, conv_params); + return; + } + + const uint8_t horiz_offset = filter_params_x->taps / 2 - 1; + src -= horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + if (filter_params_x->taps > 8) { + convolve_x_sr_12tap_neon(src, src_stride, dst, dst_stride, w, h, + x_filter_ptr); + return; + } + + // This shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use a single + // rounding right shift by FILTER_BITS - instead of a first rounding right + // shift by ROUND0_BITS, followed by second rounding right shift by + // FILTER_BITS - ROUND0_BITS. + // The outermost -1 is needed because we will halve the filter values. + const int16x8_t horiz_const = vdupq_n_s16(1 << ((ROUND0_BITS - 1) - 1)); + + if (w <= 4) { + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x4_t x_filter = vshr_n_s16(vld1_s16(x_filter_ptr + 2), 1); + + src += 2; + + do { + uint8x8_t t0 = vld1_u8(src); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + + int16x4_t s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4 + int16x4_t s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5 + int16x4_t s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6 + + uint8x8_t d0 = + convolve4_4_x(s0, s1, s2, s3, x_filter, vget_low_s16(horiz_const)); + + store_u8_4x1(dst, d0); + + src += src_stride; + dst += dst_stride; + } while (--h != 0); + } else { + // Filter values are even so halve to reduce precision requirements. + const int16x8_t x_filter = vshrq_n_s16(vld1q_s16(x_filter_ptr), 1); + +#if AOM_ARCH_AARCH64 + while (h >= 8) { + uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7; + load_u8_8x8(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + int width = w; + const uint8_t *s = src + 7; + uint8_t *d = dst; + + __builtin_prefetch(d + 0 * dst_stride); + __builtin_prefetch(d + 1 * dst_stride); + __builtin_prefetch(d + 2 * dst_stride); + __builtin_prefetch(d + 3 * dst_stride); + __builtin_prefetch(d + 4 * dst_stride); + __builtin_prefetch(d + 5 * dst_stride); + __builtin_prefetch(d + 6 * dst_stride); + __builtin_prefetch(d + 7 * dst_stride); + + do { + uint8x8_t t8, t9, t10, t11, t12, t13, t14; + load_u8_8x8(s, src_stride, &t7, &t8, &t9, &t10, &t11, &t12, &t13, &t14); + + transpose_elems_inplace_u8_8x8(&t7, &t8, &t9, &t10, &t11, &t12, &t13, + &t14); + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t7)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t8)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t9)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t10)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t11)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t12)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t13)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t14)); + + uint8x8_t d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter, + horiz_const); + uint8x8_t d1 = convolve8_8_x(s1, s2, s3, s4, s5, s6, s7, s8, x_filter, + horiz_const); + uint8x8_t d2 = convolve8_8_x(s2, s3, s4, s5, s6, s7, s8, s9, x_filter, + horiz_const); + uint8x8_t d3 = convolve8_8_x(s3, s4, s5, s6, s7, s8, s9, s10, x_filter, + horiz_const); + uint8x8_t d4 = convolve8_8_x(s4, s5, s6, s7, s8, s9, s10, s11, x_filter, + horiz_const); + uint8x8_t d5 = convolve8_8_x(s5, s6, s7, s8, s9, s10, s11, s12, + x_filter, horiz_const); + uint8x8_t d6 = convolve8_8_x(s6, s7, s8, s9, s10, s11, s12, s13, + x_filter, horiz_const); + uint8x8_t d7 = convolve8_8_x(s7, s8, s9, s10, s11, s12, s13, s14, + x_filter, horiz_const); + + transpose_elems_inplace_u8_8x8(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7); + + store_u8_8x8(d, dst_stride, d0, d1, d2, d3, d4, d5, d6, d7); + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s5 = s13; + s6 = s14; + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src += 8 * src_stride; + dst += 8 * dst_stride; + h -= 8; + } +#endif // AOM_ARCH_AARCH64 + + while (h-- != 0) { + uint8x8_t t0 = vld1_u8(src); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + + int width = w; + const uint8_t *s = src + 8; + uint8_t *d = dst; + + __builtin_prefetch(d); + + do { + uint8x8_t t8 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15 + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t8)); + + int16x8_t s1 = vextq_s16(s0, s8, 1); // a1 a2 a3 a4 a5 a6 a7 a8 + int16x8_t s2 = vextq_s16(s0, s8, 2); // a2 a3 a4 a5 a6 a7 a8 a9 + int16x8_t s3 = vextq_s16(s0, s8, 3); // a3 a4 a5 a6 a7 a8 a9 a10 + int16x8_t s4 = vextq_s16(s0, s8, 4); // a4 a5 a6 a7 a8 a9 a10 a11 + int16x8_t s5 = vextq_s16(s0, s8, 5); // a5 a6 a7 a8 a9 a10 a11 a12 + int16x8_t s6 = vextq_s16(s0, s8, 6); // a6 a7 a8 a9 a10 a11 a12 a13 + int16x8_t s7 = vextq_s16(s0, s8, 7); // a7 a8 a9 a10 a11 a12 a13 a14 + + uint8x8_t d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter, + horiz_const); + + vst1_u8(d, d0); + + s0 = s8; + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src += src_stride; + dst += dst_stride; + } + } +} + +static INLINE int16x4_t convolve6_4_y(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t s4, const int16x4_t s5, + const int16x8_t y_filter_0_7) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7); + + // Filter values at indices 0 and 7 are 0. + int16x4_t sum = vmul_lane_s16(s0, y_filter_0_3, 1); + sum = vmla_lane_s16(sum, s1, y_filter_0_3, 2); + sum = vmla_lane_s16(sum, s2, y_filter_0_3, 3); + sum = vmla_lane_s16(sum, s3, y_filter_4_7, 0); + sum = vmla_lane_s16(sum, s4, y_filter_4_7, 1); + sum = vmla_lane_s16(sum, s5, y_filter_4_7, 2); + + return sum; +} + +static INLINE uint8x8_t convolve6_8_y(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t y_filters) { + const int16x4_t y_filter_lo = vget_low_s16(y_filters); + const int16x4_t y_filter_hi = vget_high_s16(y_filters); + + // Filter values at indices 0 and 7 are 0. + int16x8_t sum = vmulq_lane_s16(s0, y_filter_lo, 1); + sum = vmlaq_lane_s16(sum, s1, y_filter_lo, 2); + sum = vmlaq_lane_s16(sum, s2, y_filter_lo, 3); + sum = vmlaq_lane_s16(sum, s3, y_filter_hi, 0); + sum = vmlaq_lane_s16(sum, s4, y_filter_hi, 1); + sum = vmlaq_lane_s16(sum, s5, y_filter_hi, 2); + // We halved the convolution filter values so -1 from the right shift. + return vqrshrun_n_s16(sum, FILTER_BITS - 1); +} + +static INLINE void convolve_y_sr_6tap_neon(const uint8_t *src_ptr, + int src_stride, uint8_t *dst_ptr, + const int dst_stride, int w, int h, + const int16x8_t y_filter) { + if (w <= 4) { + uint8x8_t t0 = load_unaligned_u8_4x1(src_ptr + 0 * src_stride); + uint8x8_t t1 = load_unaligned_u8_4x1(src_ptr + 1 * src_stride); + uint8x8_t t2 = load_unaligned_u8_4x1(src_ptr + 2 * src_stride); + uint8x8_t t3 = load_unaligned_u8_4x1(src_ptr + 3 * src_stride); + uint8x8_t t4 = load_unaligned_u8_4x1(src_ptr + 4 * src_stride); + + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s1 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s2 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + int16x4_t s3 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3))); + int16x4_t s4 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t4))); + + src_ptr += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t5 = load_unaligned_u8_4x1(src_ptr + 0 * src_stride); + uint8x8_t t6 = load_unaligned_u8_4x1(src_ptr + 1 * src_stride); + uint8x8_t t7 = load_unaligned_u8_4x1(src_ptr + 2 * src_stride); + uint8x8_t t8 = load_unaligned_u8_4x1(src_ptr + 3 * src_stride); + + int16x4_t s5 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t5))); + int16x4_t s6 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t6))); + int16x4_t s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t7))); + int16x4_t s8 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t8))); + + int16x4_t d0 = convolve6_4_y(s0, s1, s2, s3, s4, s5, y_filter); + int16x4_t d1 = convolve6_4_y(s1, s2, s3, s4, s5, s6, y_filter); + int16x4_t d2 = convolve6_4_y(s2, s3, s4, s5, s6, s7, y_filter); + int16x4_t d3 = convolve6_4_y(s3, s4, s5, s6, s7, s8, y_filter); + + // We halved the convolution filter values so -1 from the right shift. + uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS - 1); + uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS - 1); + + store_u8x4_strided_x2(dst_ptr, dst_stride, d01); + store_u8x4_strided_x2(dst_ptr + 2 * dst_stride, dst_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + uint8x8_t t5 = load_unaligned_u8_4x1(src_ptr); + int16x4_t s5 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t5))); + + int16x4_t d0 = convolve6_4_y(s0, s1, s2, s3, s4, s5, y_filter); + // We halved the convolution filter values so -1 from the right shift. + uint8x8_t d01 = + vqrshrun_n_s16(vcombine_s16(d0, vdup_n_s16(0)), FILTER_BITS - 1); + + store_u8_4x1(dst_ptr, d01); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + + } else { + do { + const uint8_t *s = src_ptr; + uint8_t *d = dst_ptr; + int height = h; + + uint8x8_t t0, t1, t2, t3, t4; + load_u8_8x5(s, src_stride, &t0, &t1, &t2, &t3, &t4); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + + s += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t5, t6, t7, t8; + load_u8_8x4(s, src_stride, &t5, &t6, &t7, &t8); + + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t7)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t8)); + + uint8x8_t d0 = convolve6_8_y(s0, s1, s2, s3, s4, s5, y_filter); + uint8x8_t d1 = convolve6_8_y(s1, s2, s3, s4, s5, s6, y_filter); + uint8x8_t d2 = convolve6_8_y(s2, s3, s4, s5, s6, s7, y_filter); + uint8x8_t d3 = convolve6_8_y(s3, s4, s5, s6, s7, s8, y_filter); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s))); + + uint8x8_t d0 = convolve6_8_y(s0, s1, s2, s3, s4, s5, y_filter); + + vst1_u8(d, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE int16x4_t convolve8_4_y(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, + const int16x8_t filter) { + const int16x4_t filter_lo = vget_low_s16(filter); + const int16x4_t filter_hi = vget_high_s16(filter); + + int16x4_t sum = vmul_lane_s16(s0, filter_lo, 0); + sum = vmla_lane_s16(sum, s1, filter_lo, 1); + sum = vmla_lane_s16(sum, s2, filter_lo, 2); + sum = vmla_lane_s16(sum, s3, filter_lo, 3); + sum = vmla_lane_s16(sum, s4, filter_hi, 0); + sum = vmla_lane_s16(sum, s5, filter_hi, 1); + sum = vmla_lane_s16(sum, s6, filter_hi, 2); + sum = vmla_lane_s16(sum, s7, filter_hi, 3); + + return sum; +} + +static INLINE uint8x8_t convolve8_8_y(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t filter) { + const int16x4_t filter_lo = vget_low_s16(filter); + const int16x4_t filter_hi = vget_high_s16(filter); + + int16x8_t sum = vmulq_lane_s16(s0, filter_lo, 0); + sum = vmlaq_lane_s16(sum, s1, filter_lo, 1); + sum = vmlaq_lane_s16(sum, s2, filter_lo, 2); + sum = vmlaq_lane_s16(sum, s3, filter_lo, 3); + sum = vmlaq_lane_s16(sum, s4, filter_hi, 0); + sum = vmlaq_lane_s16(sum, s5, filter_hi, 1); + sum = vmlaq_lane_s16(sum, s6, filter_hi, 2); + sum = vmlaq_lane_s16(sum, s7, filter_hi, 3); + + // We halved the convolution filter values so -1 from the right shift. + return vqrshrun_n_s16(sum, FILTER_BITS - 1); +} + +static INLINE void convolve_y_sr_8tap_neon(const uint8_t *src_ptr, + int src_stride, uint8_t *dst_ptr, + const int dst_stride, int w, int h, + const int16x8_t y_filter) { + if (w <= 4) { + uint8x8_t t0 = load_unaligned_u8_4x1(src_ptr + 0 * src_stride); + uint8x8_t t1 = load_unaligned_u8_4x1(src_ptr + 1 * src_stride); + uint8x8_t t2 = load_unaligned_u8_4x1(src_ptr + 2 * src_stride); + uint8x8_t t3 = load_unaligned_u8_4x1(src_ptr + 3 * src_stride); + uint8x8_t t4 = load_unaligned_u8_4x1(src_ptr + 4 * src_stride); + uint8x8_t t5 = load_unaligned_u8_4x1(src_ptr + 5 * src_stride); + uint8x8_t t6 = load_unaligned_u8_4x1(src_ptr + 6 * src_stride); + + int16x4_t s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t0))); + int16x4_t s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t1))); + int16x4_t s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t2))); + int16x4_t s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t3))); + int16x4_t s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t4))); + int16x4_t s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t5))); + int16x4_t s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t6))); + + src_ptr += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t7 = load_unaligned_u8_4x1(src_ptr + 0 * src_stride); + uint8x8_t t8 = load_unaligned_u8_4x1(src_ptr + 1 * src_stride); + uint8x8_t t9 = load_unaligned_u8_4x1(src_ptr + 2 * src_stride); + uint8x8_t t10 = load_unaligned_u8_4x1(src_ptr + 3 * src_stride); + + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t7))); + int16x4_t s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t8))); + int16x4_t s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t9))); + int16x4_t s10 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t10))); + + int16x4_t d0 = convolve8_4_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter); + int16x4_t d1 = convolve8_4_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter); + int16x4_t d2 = convolve8_4_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter); + int16x4_t d3 = convolve8_4_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter); + + // We halved the convolution filter values so -1 from the right shift. + uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS - 1); + uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS - 1); + + store_u8x4_strided_x2(dst_ptr, dst_stride, d01); + store_u8x4_strided_x2(dst_ptr + 2 * dst_stride, dst_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + uint8x8_t t7 = load_unaligned_u8_4x1(src_ptr); + int16x4_t s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(t7))); + + int16x4_t d0 = convolve8_4_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter); + // We halved the convolution filter values so -1 from the right shift. + uint8x8_t d01 = + vqrshrun_n_s16(vcombine_s16(d0, vdup_n_s16(0)), FILTER_BITS - 1); + + store_u8_4x1(dst_ptr, d01); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + } else { + do { + const uint8_t *s = src_ptr; + uint8_t *d = dst_ptr; + int height = h; + + uint8x8_t t0, t1, t2, t3, t4, t5, t6; + load_u8_8x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + uint8x8_t t7, t8, t9, t10; + load_u8_8x4(s, src_stride, &t7, &t8, &t9, &t10); + + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t7)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t8)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t9)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t10)); + + uint8x8_t d0 = convolve8_8_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter); + uint8x8_t d1 = convolve8_8_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter); + uint8x8_t d2 = convolve8_8_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter); + uint8x8_t d3 = convolve8_8_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s))); + + uint8x8_t d0 = convolve8_8_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter); + + vst1_u8(d, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE int16x4_t convolve12_4_y(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, + const int16x4_t s8, const int16x4_t s9, + const int16x4_t s10, const int16x4_t s11, + const int16x8_t y_filter_0_7, + const int16x4_t y_filter_8_11) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7); + int16x4_t sum; + + sum = vmul_lane_s16(s0, y_filter_0_3, 0); + sum = vmla_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmla_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmla_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmla_lane_s16(sum, s4, y_filter_4_7, 0); + + sum = vmla_lane_s16(sum, s7, y_filter_4_7, 3); + sum = vmla_lane_s16(sum, s8, y_filter_8_11, 0); + sum = vmla_lane_s16(sum, s9, y_filter_8_11, 1); + sum = vmla_lane_s16(sum, s10, y_filter_8_11, 2); + sum = vmla_lane_s16(sum, s11, y_filter_8_11, 3); + + // Saturating addition is required for the largest filter taps to avoid + // overflow (while staying in 16-bit elements.) + sum = vqadd_s16(sum, vmul_lane_s16(s5, y_filter_4_7, 1)); + sum = vqadd_s16(sum, vmul_lane_s16(s6, y_filter_4_7, 2)); + + return sum; +} + +static INLINE uint8x8_t convolve12_8_y(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t s8, const int16x8_t s9, + const int16x8_t s10, const int16x8_t s11, + const int16x8_t y_filter_0_7, + const int16x4_t y_filter_8_11) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7); + int16x8_t sum; + + sum = vmulq_lane_s16(s0, y_filter_0_3, 0); + sum = vmlaq_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmlaq_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmlaq_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmlaq_lane_s16(sum, s4, y_filter_4_7, 0); + + sum = vmlaq_lane_s16(sum, s7, y_filter_4_7, 3); + sum = vmlaq_lane_s16(sum, s8, y_filter_8_11, 0); + sum = vmlaq_lane_s16(sum, s9, y_filter_8_11, 1); + sum = vmlaq_lane_s16(sum, s10, y_filter_8_11, 2); + sum = vmlaq_lane_s16(sum, s11, y_filter_8_11, 3); + + // Saturating addition is required for the largest filter taps to avoid + // overflow (while staying in 16-bit elements.) + sum = vqaddq_s16(sum, vmulq_lane_s16(s5, y_filter_4_7, 1)); + sum = vqaddq_s16(sum, vmulq_lane_s16(s6, y_filter_4_7, 2)); + + return vqrshrun_n_s16(sum, FILTER_BITS); +} + +static INLINE void convolve_y_sr_12tap_neon(const uint8_t *src_ptr, + int src_stride, uint8_t *dst_ptr, + int dst_stride, int w, int h, + const int16_t *y_filter_ptr) { + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x4_t y_filter_8_11 = vld1_s16(y_filter_ptr + 8); + + if (w <= 4) { + uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10; + load_u8_8x11(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7, + &t8, &t9, &t10); + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s1 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s2 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + int16x4_t s3 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3))); + int16x4_t s4 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t4))); + int16x4_t s5 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t5))); + int16x4_t s6 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t6))); + int16x4_t s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t7))); + int16x4_t s8 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t8))); + int16x4_t s9 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t9))); + int16x4_t s10 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t10))); + + src_ptr += 11 * src_stride; + + do { + uint8x8_t t11, t12, t13, t14; + load_u8_8x4(src_ptr, src_stride, &t11, &t12, &t13, &t14); + + int16x4_t s11 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t11))); + int16x4_t s12 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t12))); + int16x4_t s13 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t13))); + int16x4_t s14 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t14))); + + int16x4_t d0 = convolve12_4_y(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, + s11, y_filter_0_7, y_filter_8_11); + int16x4_t d1 = convolve12_4_y(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, + s11, s12, y_filter_0_7, y_filter_8_11); + int16x4_t d2 = convolve12_4_y(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + s12, s13, y_filter_0_7, y_filter_8_11); + int16x4_t d3 = convolve12_4_y(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + s13, s14, y_filter_0_7, y_filter_8_11); + + uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS); + uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS); + + store_u8x4_strided_x2(dst_ptr, dst_stride, d01); + store_u8x4_strided_x2(dst_ptr + 2 * dst_stride, dst_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; + } while (h != 0); + + } else { + do { + const uint8_t *s = src_ptr; + uint8_t *d = dst_ptr; + int height = h; + + uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10; + load_u8_8x11(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7, &t8, + &t9, &t10); + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t7)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t8)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t9)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t10)); + + s += 11 * src_stride; + + do { + uint8x8_t t11, t12, t13, t14; + load_u8_8x4(s, src_stride, &t11, &t12, &t13, &t14); + + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t11)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t12)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t13)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t14)); + + uint8x8_t d0 = convolve12_8_y(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, + s10, s11, y_filter_0_7, y_filter_8_11); + uint8x8_t d1 = convolve12_8_y(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, + s11, s12, y_filter_0_7, y_filter_8_11); + uint8x8_t d2 = convolve12_8_y(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + s12, s13, y_filter_0_7, y_filter_8_11); + uint8x8_t d3 = convolve12_8_y(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + s13, s14, y_filter_0_7, y_filter_8_11); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +void av1_convolve_y_sr_neon(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride, int w, int h, + const InterpFilterParams *filter_params_y, + const int subpel_y_qn) { + if (w == 2 || h == 2) { + av1_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h, filter_params_y, + subpel_y_qn); + return; + } + + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 6 ? 6 : y_filter_taps; + const int vert_offset = clamped_y_taps / 2 - 1; + + src -= vert_offset * src_stride; + + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + if (y_filter_taps > 8) { + convolve_y_sr_12tap_neon(src, src_stride, dst, dst_stride, w, h, + y_filter_ptr); + return; + } + + // Filter values are even so halve to reduce precision requirements. + const int16x8_t y_filter = vshrq_n_s16(vld1q_s16(y_filter_ptr), 1); + + if (y_filter_taps < 8) { + convolve_y_sr_6tap_neon(src, src_stride, dst, dst_stride, w, h, y_filter); + } else { + convolve_y_sr_8tap_neon(src, src_stride, dst, dst_stride, w, h, y_filter); + } +} + +static INLINE int16x4_t +convolve12_4_2d_h(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x4_t s8, + const int16x4_t s9, const int16x4_t s10, const int16x4_t s11, + const int16x8_t x_filter_0_7, const int16x4_t x_filter_8_11, + const int32x4_t horiz_const) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter_0_7); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter_0_7); + + int32x4_t sum = horiz_const; + sum = vmlal_lane_s16(sum, s0, x_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, x_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, x_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, x_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, x_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, x_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, x_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, x_filter_4_7, 3); + sum = vmlal_lane_s16(sum, s8, x_filter_8_11, 0); + sum = vmlal_lane_s16(sum, s9, x_filter_8_11, 1); + sum = vmlal_lane_s16(sum, s10, x_filter_8_11, 2); + sum = vmlal_lane_s16(sum, s11, x_filter_8_11, 3); + + return vshrn_n_s32(sum, ROUND0_BITS); +} + +static INLINE void convolve_2d_sr_horiz_12tap_neon( + const uint8_t *src_ptr, int src_stride, int16_t *dst_ptr, + const int dst_stride, int w, int h, const int16x8_t x_filter_0_7, + const int16x4_t x_filter_8_11) { + const int bd = 8; + // A shim of 1 << (ROUND0_BITS - 1) enables us to use non-rounding shifts - + // which are generally faster than rounding shifts on modern CPUs. + const int32x4_t horiz_const = + vdupq_n_s32((1 << (bd + FILTER_BITS - 1)) + (1 << (ROUND0_BITS - 1))); + +#if AOM_ARCH_AARCH64 + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + uint8x8_t t0, t1, t2, t3; + load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3); + transpose_elems_inplace_u8_8x4(&t0, &t1, &t2, &t3); + + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s1 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s2 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + int16x4_t s3 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3))); + int16x4_t s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s5 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s6 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + int16x4_t s7 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t3))); + + load_u8_8x4(s + 8, src_stride, &t0, &t1, &t2, &t3); + transpose_elems_inplace_u8_8x4(&t0, &t1, &t2, &t3); + + int16x4_t s8 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s9 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s10 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + + s += 11; + + do { + load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3); + transpose_elems_inplace_u8_8x4(&t0, &t1, &t2, &t3); + + int16x4_t s11 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s12 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1))); + int16x4_t s13 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); + int16x4_t s14 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3))); + + int16x4_t d0 = + convolve12_4_2d_h(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + x_filter_0_7, x_filter_8_11, horiz_const); + int16x4_t d1 = + convolve12_4_2d_h(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + x_filter_0_7, x_filter_8_11, horiz_const); + int16x4_t d2 = + convolve12_4_2d_h(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, + x_filter_0_7, x_filter_8_11, horiz_const); + int16x4_t d3 = + convolve12_4_2d_h(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, + x_filter_0_7, x_filter_8_11, horiz_const); + + transpose_elems_inplace_s16_4x4(&d0, &d1, &d2, &d3); + store_s16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + s += 4; + d += 4; + width -= 4; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; + } while (h > 4); +#endif // AOM_ARCH_AARCH64 + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t t0 = vld1q_u8(s); + int16x8_t tt0 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(t0))); + int16x8_t tt1 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(t0))); + + int16x4_t s0 = vget_low_s16(tt0); + int16x4_t s4 = vget_high_s16(tt0); + int16x4_t s8 = vget_low_s16(tt1); + int16x4_t s12 = vget_high_s16(tt1); + + int16x4_t s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4 + int16x4_t s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5 + int16x4_t s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6 + int16x4_t s5 = vext_s16(s4, s8, 1); // a5 a6 a7 a8 + int16x4_t s6 = vext_s16(s4, s8, 2); // a6 a7 a8 a9 + int16x4_t s7 = vext_s16(s4, s8, 3); // a7 a8 a9 a10 + int16x4_t s9 = vext_s16(s8, s12, 1); // a9 a10 a11 a12 + int16x4_t s10 = vext_s16(s8, s12, 2); // a10 a11 a12 a13 + int16x4_t s11 = vext_s16(s8, s12, 3); // a11 a12 a13 a14 + + int16x4_t d0 = + convolve12_4_2d_h(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + x_filter_0_7, x_filter_8_11, horiz_const); + vst1_s16(d, d0); + + s += 4; + d += 4; + width -= 4; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); +} + +static INLINE int16x4_t convolve4_4_2d_h(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t filter, + const int16x4_t horiz_const) { + int16x4_t sum = horiz_const; + sum = vmla_lane_s16(sum, s0, filter, 0); + sum = vmla_lane_s16(sum, s1, filter, 1); + sum = vmla_lane_s16(sum, s2, filter, 2); + sum = vmla_lane_s16(sum, s3, filter, 3); + + // We halved the convolution filter values so -1 from the right shift. + return vshr_n_s16(sum, ROUND0_BITS - 1); +} + +static INLINE int16x8_t convolve8_8_2d_h(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t filter, + const int16x8_t horiz_const) { + const int16x4_t filter_lo = vget_low_s16(filter); + const int16x4_t filter_hi = vget_high_s16(filter); + + int16x8_t sum = horiz_const; + sum = vmlaq_lane_s16(sum, s0, filter_lo, 0); + sum = vmlaq_lane_s16(sum, s1, filter_lo, 1); + sum = vmlaq_lane_s16(sum, s2, filter_lo, 2); + sum = vmlaq_lane_s16(sum, s3, filter_lo, 3); + sum = vmlaq_lane_s16(sum, s4, filter_hi, 0); + sum = vmlaq_lane_s16(sum, s5, filter_hi, 1); + sum = vmlaq_lane_s16(sum, s6, filter_hi, 2); + sum = vmlaq_lane_s16(sum, s7, filter_hi, 3); + + // We halved the convolution filter values so -1 from the right shift. + return vshrq_n_s16(sum, ROUND0_BITS - 1); +} + +static INLINE void convolve_2d_sr_horiz_neon(const uint8_t *src, int src_stride, + int16_t *im_block, int im_stride, + int w, int im_h, + const int16_t *x_filter_ptr) { + const int bd = 8; + + const uint8_t *src_ptr = src; + int16_t *dst_ptr = im_block; + int dst_stride = im_stride; + int height = im_h; + + if (w <= 4) { + // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // (The extra -1 is needed because we halved the filter values.) + const int16x4_t horiz_const = vdup_n_s16((1 << (bd + FILTER_BITS - 2)) + + (1 << ((ROUND0_BITS - 1) - 1))); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x4_t x_filter = vshr_n_s16(vld1_s16(x_filter_ptr + 2), 1); + + src_ptr += 2; + + do { + uint8x8_t t0 = vld1_u8(src_ptr); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x4_t s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + int16x4_t s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); + + int16x4_t s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4 + int16x4_t s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5 + int16x4_t s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6 + + int16x4_t d0 = convolve4_4_2d_h(s0, s1, s2, s3, x_filter, horiz_const); + + vst1_s16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } else { + // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // (The extra -1 is needed because we halved the filter values.) + const int16x8_t horiz_const = vdupq_n_s16((1 << (bd + FILTER_BITS - 2)) + + (1 << ((ROUND0_BITS - 1) - 1))); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int16x8_t x_filter = vshrq_n_s16(vld1q_s16(x_filter_ptr), 1); + +#if AOM_ARCH_AARCH64 + while (height > 8) { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7; + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s1 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s4 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s5 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s6 = vreinterpretq_s16_u16(vmovl_u8(t6)); + + s += 7; + + do { + load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + transpose_elems_inplace_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); + + int16x8_t s7 = vreinterpretq_s16_u16(vmovl_u8(t0)); + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); + int16x8_t s9 = vreinterpretq_s16_u16(vmovl_u8(t2)); + int16x8_t s10 = vreinterpretq_s16_u16(vmovl_u8(t3)); + int16x8_t s11 = vreinterpretq_s16_u16(vmovl_u8(t4)); + int16x8_t s12 = vreinterpretq_s16_u16(vmovl_u8(t5)); + int16x8_t s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); + int16x8_t s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); + + int16x8_t d0 = convolve8_8_2d_h(s0, s1, s2, s3, s4, s5, s6, s7, + x_filter, horiz_const); + int16x8_t d1 = convolve8_8_2d_h(s1, s2, s3, s4, s5, s6, s7, s8, + x_filter, horiz_const); + int16x8_t d2 = convolve8_8_2d_h(s2, s3, s4, s5, s6, s7, s8, s9, + x_filter, horiz_const); + int16x8_t d3 = convolve8_8_2d_h(s3, s4, s5, s6, s7, s8, s9, s10, + x_filter, horiz_const); + int16x8_t d4 = convolve8_8_2d_h(s4, s5, s6, s7, s8, s9, s10, s11, + x_filter, horiz_const); + int16x8_t d5 = convolve8_8_2d_h(s5, s6, s7, s8, s9, s10, s11, s12, + x_filter, horiz_const); + int16x8_t d6 = convolve8_8_2d_h(s6, s7, s8, s9, s10, s11, s12, s13, + x_filter, horiz_const); + int16x8_t d7 = convolve8_8_2d_h(s7, s8, s9, s10, s11, s12, s13, s14, + x_filter, horiz_const); + + transpose_elems_inplace_s16_8x8(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7); + + store_s16_8x8(d, dst_stride, d0, d1, d2, d3, d4, d5, d6, d7); + + s0 = s8; + s1 = s9; + s2 = s10; + s3 = s11; + s4 = s12; + s5 = s13; + s6 = s14; + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 8 * src_stride; + dst_ptr += 8 * dst_stride; + height -= 8; + } +#endif // AOM_ARCH_AARCH64 + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + uint8x8_t t0 = vld1_u8(s); // a0 a1 a2 a3 a4 a5 a6 a7 + int16x8_t s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); + + do { + uint8x8_t t1 = vld1_u8(s + 8); // a8 a9 a10 a11 a12 a13 a14 a15 + int16x8_t s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); + + int16x8_t s1 = vextq_s16(s0, s8, 1); // a1 a2 a3 a4 a5 a6 a7 a8 + int16x8_t s2 = vextq_s16(s0, s8, 2); // a2 a3 a4 a5 a6 a7 a8 a9 + int16x8_t s3 = vextq_s16(s0, s8, 3); // a3 a4 a5 a6 a7 a8 a9 a10 + int16x8_t s4 = vextq_s16(s0, s8, 4); // a4 a5 a6 a7 a8 a9 a10 a11 + int16x8_t s5 = vextq_s16(s0, s8, 5); // a5 a6 a7 a8 a9 a10 a11 a12 + int16x8_t s6 = vextq_s16(s0, s8, 6); // a6 a7 a8 a9 a10 a11 a12 a13 + int16x8_t s7 = vextq_s16(s0, s8, 7); // a7 a8 a9 a10 a11 a12 a13 a14 + + int16x8_t d0 = convolve8_8_2d_h(s0, s1, s2, s3, s4, s5, s6, s7, + x_filter, horiz_const); + + vst1q_s16(d, d0); + + s0 = s8; + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +void av1_convolve_2d_sr_neon(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, + const int subpel_x_qn, const int subpel_y_qn, + ConvolveParams *conv_params) { + if (w == 2 || h == 2) { + av1_convolve_2d_sr_c(src, src_stride, dst, dst_stride, w, h, + filter_params_x, filter_params_y, subpel_x_qn, + subpel_y_qn, conv_params); + return; + } + + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 6 ? 6 : y_filter_taps; + const int im_h = h + clamped_y_taps - 1; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + if (filter_params_x->taps > 8) { + DECLARE_ALIGNED(16, int16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]); + + const int16x8_t x_filter_0_7 = vld1q_s16(x_filter_ptr); + const int16x4_t x_filter_8_11 = vld1_s16(x_filter_ptr + 8); + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x4_t y_filter_8_11 = vld1_s16(y_filter_ptr + 8); + + convolve_2d_sr_horiz_12tap_neon(src_ptr, src_stride, im_block, im_stride, w, + im_h, x_filter_0_7, x_filter_8_11); + + convolve_2d_sr_vert_12tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter_0_7, y_filter_8_11); + } else { + DECLARE_ALIGNED(16, int16_t, + im_block[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE]); + + convolve_2d_sr_horiz_neon(src_ptr, src_stride, im_block, im_stride, w, im_h, + x_filter_ptr); + + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + if (clamped_y_taps <= 6) { + convolve_2d_sr_vert_6tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter); + } else { + convolve_2d_sr_vert_8tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter); + } + } +} + +void av1_convolve_x_sr_intrabc_neon(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const int subpel_x_qn, + ConvolveParams *conv_params) { + assert(subpel_x_qn == 8); + assert(filter_params_x->taps == 2); + assert((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS); + (void)filter_params_x; + (void)subpel_x_qn; + (void)conv_params; + + if (w <= 4) { + do { + uint8x8_t s0_0 = vld1_u8(src); + uint8x8_t s0_1 = vld1_u8(src + 1); + uint8x8_t s1_0 = vld1_u8(src + src_stride); + uint8x8_t s1_1 = vld1_u8(src + src_stride + 1); + + uint8x8_t d0 = vrhadd_u8(s0_0, s0_1); + uint8x8_t d1 = vrhadd_u8(s1_0, s1_1); + + if (w == 2) { + store_u8_2x1(dst + 0 * dst_stride, d0); + store_u8_2x1(dst + 1 * dst_stride, d1); + } else { + store_u8_4x1(dst + 0 * dst_stride, d0); + store_u8_4x1(dst + 1 * dst_stride, d1); + } + + src += 2 * src_stride; + dst += 2 * dst_stride; + h -= 2; + } while (h != 0); + } else if (w == 8) { + do { + uint8x8_t s0_0 = vld1_u8(src); + uint8x8_t s0_1 = vld1_u8(src + 1); + uint8x8_t s1_0 = vld1_u8(src + src_stride); + uint8x8_t s1_1 = vld1_u8(src + src_stride + 1); + + uint8x8_t d0 = vrhadd_u8(s0_0, s0_1); + uint8x8_t d1 = vrhadd_u8(s1_0, s1_1); + + vst1_u8(dst, d0); + vst1_u8(dst + dst_stride, d1); + + src += 2 * src_stride; + dst += 2 * dst_stride; + h -= 2; + } while (h != 0); + } else { + do { + const uint8_t *src_ptr = src; + uint8_t *dst_ptr = dst; + int width = w; + + do { + uint8x16_t s0 = vld1q_u8(src_ptr); + uint8x16_t s1 = vld1q_u8(src_ptr + 1); + + uint8x16_t d0 = vrhaddq_u8(s0, s1); + + vst1q_u8(dst_ptr, d0); + + src_ptr += 16; + dst_ptr += 16; + width -= 16; + } while (width != 0); + src += src_stride; + dst += dst_stride; + } while (--h != 0); + } +} + +void av1_convolve_y_sr_intrabc_neon(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_y, + const int subpel_y_qn) { + assert(subpel_y_qn == 8); + assert(filter_params_y->taps == 2); + (void)filter_params_y; + (void)subpel_y_qn; + + if (w <= 4) { + do { + uint8x8_t s0 = load_unaligned_u8_4x1(src); + uint8x8_t s1 = load_unaligned_u8_4x1(src + src_stride); + uint8x8_t s2 = load_unaligned_u8_4x1(src + 2 * src_stride); + + uint8x8_t d0 = vrhadd_u8(s0, s1); + uint8x8_t d1 = vrhadd_u8(s1, s2); + + if (w == 2) { + store_u8_2x1(dst + 0 * dst_stride, d0); + store_u8_2x1(dst + 1 * dst_stride, d1); + } else { + store_u8_4x1(dst + 0 * dst_stride, d0); + store_u8_4x1(dst + 1 * dst_stride, d1); + } + + src += 2 * src_stride; + dst += 2 * dst_stride; + h -= 2; + } while (h != 0); + } else if (w == 8) { + do { + uint8x8_t s0 = vld1_u8(src); + uint8x8_t s1 = vld1_u8(src + src_stride); + uint8x8_t s2 = vld1_u8(src + 2 * src_stride); + + uint8x8_t d0 = vrhadd_u8(s0, s1); + uint8x8_t d1 = vrhadd_u8(s1, s2); + + vst1_u8(dst, d0); + vst1_u8(dst + dst_stride, d1); + + src += 2 * src_stride; + dst += 2 * dst_stride; + h -= 2; + } while (h != 0); + } else { + do { + const uint8_t *src_ptr = src; + uint8_t *dst_ptr = dst; + int height = h; + + do { + uint8x16_t s0 = vld1q_u8(src_ptr); + uint8x16_t s1 = vld1q_u8(src_ptr + src_stride); + + uint8x16_t d0 = vrhaddq_u8(s0, s1); + + vst1q_u8(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + src += 16; + dst += 16; + w -= 16; + } while (w != 0); + } +} + +void av1_convolve_2d_sr_intrabc_neon(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, + const int subpel_x_qn, + const int subpel_y_qn, + ConvolveParams *conv_params) { + assert(subpel_x_qn == 8); + assert(subpel_y_qn == 8); + assert(filter_params_x->taps == 2 && filter_params_y->taps == 2); + assert((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS); + (void)filter_params_x; + (void)subpel_x_qn; + (void)filter_params_y; + (void)subpel_y_qn; + (void)conv_params; + + uint16_t im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]; + int im_h = h + 1; + int im_stride = w; + assert(w <= MAX_SB_SIZE && h <= MAX_SB_SIZE); + + uint16_t *im = im_block; + + // Horizontal filter. + if (w <= 4) { + do { + uint8x8_t s0 = vld1_u8(src); + uint8x8_t s1 = vld1_u8(src + 1); + + uint16x4_t sum = vget_low_u16(vaddl_u8(s0, s1)); + + // Safe to store the whole vector, the im buffer is big enough. + vst1_u16(im, sum); + + src += src_stride; + im += im_stride; + } while (--im_h != 0); + } else { + do { + const uint8_t *src_ptr = src; + uint16_t *im_ptr = im; + int width = w; + + do { + uint8x8_t s0 = vld1_u8(src_ptr); + uint8x8_t s1 = vld1_u8(src_ptr + 1); + + uint16x8_t sum = vaddl_u8(s0, s1); + + vst1q_u16(im_ptr, sum); + + src_ptr += 8; + im_ptr += 8; + width -= 8; + } while (width != 0); + src += src_stride; + im += im_stride; + } while (--im_h != 0); + } + + im = im_block; + + // Vertical filter. + if (w <= 4) { + do { + uint16x4_t s0 = vld1_u16(im); + uint16x4_t s1 = vld1_u16(im + im_stride); + uint16x4_t s2 = vld1_u16(im + 2 * im_stride); + + uint16x4_t sum0 = vadd_u16(s0, s1); + uint16x4_t sum1 = vadd_u16(s1, s2); + + uint8x8_t d0 = vqrshrn_n_u16(vcombine_u16(sum0, vdup_n_u16(0)), 2); + uint8x8_t d1 = vqrshrn_n_u16(vcombine_u16(sum1, vdup_n_u16(0)), 2); + + if (w == 2) { + store_u8_2x1(dst + 0 * dst_stride, d0); + store_u8_2x1(dst + 1 * dst_stride, d1); + } else { + store_u8_4x1(dst + 0 * dst_stride, d0); + store_u8_4x1(dst + 1 * dst_stride, d1); + } + + im += 2 * im_stride; + dst += 2 * dst_stride; + h -= 2; + } while (h != 0); + } else { + do { + uint16_t *im_ptr = im; + uint8_t *dst_ptr = dst; + int height = h; + + do { + uint16x8_t s0 = vld1q_u16(im_ptr); + uint16x8_t s1 = vld1q_u16(im_ptr + im_stride); + + uint16x8_t sum = vaddq_u16(s0, s1); + uint8x8_t d0 = vqrshrn_n_u16(sum, 2); + + vst1_u8(dst_ptr, d0); + + im_ptr += im_stride; + dst_ptr += dst_stride; + } while (--height != 0); + im += 8; + dst += 8; + w -= 8; + } while (w != 0); + } +} diff --git a/third_party/aom/av1/common/arm/convolve_neon.h b/third_party/aom/av1/common/arm/convolve_neon.h new file mode 100644 index 0000000000..9fbf8aa12f --- /dev/null +++ b/third_party/aom/av1/common/arm/convolve_neon.h @@ -0,0 +1,538 @@ +/* + * Copyright (c) 2018, Alliance for Open Media. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AOM_AV1_COMMON_ARM_CONVOLVE_NEON_H_ +#define AOM_AV1_COMMON_ARM_CONVOLVE_NEON_H_ + +#include <arm_neon.h> + +#include "config/aom_config.h" + +#include "aom_dsp/arm/mem_neon.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" + +static INLINE int32x4_t +convolve12_4_2d_v(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x4_t s8, + const int16x4_t s9, const int16x4_t s10, const int16x4_t s11, + const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7); + + int32x4_t sum = vmull_lane_s16(s0, y_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, y_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, y_filter_4_7, 3); + sum = vmlal_lane_s16(sum, s8, y_filter_8_11, 0); + sum = vmlal_lane_s16(sum, s9, y_filter_8_11, 1); + sum = vmlal_lane_s16(sum, s10, y_filter_8_11, 2); + sum = vmlal_lane_s16(sum, s11, y_filter_8_11, 3); + + return sum; +} + +static INLINE uint8x8_t +convolve12_8_2d_v(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, const int16x8_t s8, + const int16x8_t s9, const int16x8_t s10, const int16x8_t s11, + const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11, + const int16x8_t sub_const) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7); + + int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_4_7, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s8), y_filter_8_11, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s9), y_filter_8_11, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s10), y_filter_8_11, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s11), y_filter_8_11, 3); + + int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_4_7, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s8), y_filter_8_11, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s9), y_filter_8_11, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s10), y_filter_8_11, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s11), y_filter_8_11, 3); + + int16x8_t res = + vcombine_s16(vqrshrn_n_s32(sum0, 2 * FILTER_BITS - ROUND0_BITS), + vqrshrn_n_s32(sum1, 2 * FILTER_BITS - ROUND0_BITS)); + res = vsubq_s16(res, sub_const); + + return vqmovun_s16(res); +} + +static INLINE void convolve_2d_sr_vert_12tap_neon( + int16_t *src_ptr, int src_stride, uint8_t *dst_ptr, int dst_stride, int w, + int h, const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11) { + const int bd = 8; + const int16x8_t sub_const = vdupq_n_s16(1 << (bd - 1)); + + if (w <= 4) { + int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; + load_s16_4x11(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, + &s8, &s9, &s10); + src_ptr += 11 * src_stride; + + do { + int16x4_t s11, s12, s13, s14; + load_s16_4x4(src_ptr, src_stride, &s11, &s12, &s13, &s14); + + int32x4_t d0 = convolve12_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, + s10, s11, y_filter_0_7, y_filter_8_11); + int32x4_t d1 = convolve12_4_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, + s11, s12, y_filter_0_7, y_filter_8_11); + int32x4_t d2 = convolve12_4_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + s12, s13, y_filter_0_7, y_filter_8_11); + int32x4_t d3 = + convolve12_4_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, + y_filter_0_7, y_filter_8_11); + + int16x8_t dd01 = + vcombine_s16(vqrshrn_n_s32(d0, 2 * FILTER_BITS - ROUND0_BITS), + vqrshrn_n_s32(d1, 2 * FILTER_BITS - ROUND0_BITS)); + int16x8_t dd23 = + vcombine_s16(vqrshrn_n_s32(d2, 2 * FILTER_BITS - ROUND0_BITS), + vqrshrn_n_s32(d3, 2 * FILTER_BITS - ROUND0_BITS)); + + dd01 = vsubq_s16(dd01, sub_const); + dd23 = vsubq_s16(dd23, sub_const); + + uint8x8_t d01 = vqmovun_s16(dd01); + uint8x8_t d23 = vqmovun_s16(dd23); + + store_u8x4_strided_x2(dst_ptr + 0 * dst_stride, dst_stride, d01); + store_u8x4_strided_x2(dst_ptr + 2 * dst_stride, dst_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; + } while (h != 0); + + } else { + do { + int height = h; + int16_t *s = src_ptr; + uint8_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; + load_s16_8x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, + &s9, &s10); + s += 11 * src_stride; + + do { + int16x8_t s11, s12, s13, s14; + load_s16_8x4(s, src_stride, &s11, &s12, &s13, &s14); + + uint8x8_t d0 = + convolve12_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + y_filter_0_7, y_filter_8_11, sub_const); + uint8x8_t d1 = + convolve12_8_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + y_filter_0_7, y_filter_8_11, sub_const); + uint8x8_t d2 = + convolve12_8_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + s13, y_filter_0_7, y_filter_8_11, sub_const); + uint8x8_t d3 = + convolve12_8_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, + s14, y_filter_0_7, y_filter_8_11, sub_const); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE int16x4_t convolve8_4_2d_v(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, + const int16x8_t y_filter) { + const int16x4_t y_filter_lo = vget_low_s16(y_filter); + const int16x4_t y_filter_hi = vget_high_s16(y_filter); + + int32x4_t sum = vmull_lane_s16(s0, y_filter_lo, 0); + sum = vmlal_lane_s16(sum, s1, y_filter_lo, 1); + sum = vmlal_lane_s16(sum, s2, y_filter_lo, 2); + sum = vmlal_lane_s16(sum, s3, y_filter_lo, 3); + sum = vmlal_lane_s16(sum, s4, y_filter_hi, 0); + sum = vmlal_lane_s16(sum, s5, y_filter_hi, 1); + sum = vmlal_lane_s16(sum, s6, y_filter_hi, 2); + sum = vmlal_lane_s16(sum, s7, y_filter_hi, 3); + + return vqrshrn_n_s32(sum, 2 * FILTER_BITS - ROUND0_BITS); +} + +static INLINE uint8x8_t convolve8_8_2d_v(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t y_filter, + const int16x8_t sub_const) { + const int16x4_t y_filter_lo = vget_low_s16(y_filter); + const int16x4_t y_filter_hi = vget_high_s16(y_filter); + + int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_lo, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_lo, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_lo, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_lo, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_hi, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_hi, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_hi, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_hi, 3); + + int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_lo, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_lo, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_lo, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_lo, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_hi, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_hi, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_hi, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_hi, 3); + + int16x8_t res = + vcombine_s16(vqrshrn_n_s32(sum0, 2 * FILTER_BITS - ROUND0_BITS), + vqrshrn_n_s32(sum1, 2 * FILTER_BITS - ROUND0_BITS)); + res = vsubq_s16(res, sub_const); + + return vqmovun_s16(res); +} + +static INLINE void convolve_2d_sr_vert_8tap_neon(int16_t *src_ptr, + int src_stride, + uint8_t *dst_ptr, + int dst_stride, int w, int h, + const int16x8_t y_filter) { + const int bd = 8; + const int16x8_t sub_const = vdupq_n_s16(1 << (bd - 1)); + + if (w <= 4) { + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + src_ptr += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x4_t s7, s8, s9, s10; + load_s16_4x4(src_ptr, src_stride, &s7, &s8, &s9, &s10); + + int16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter); + int16x4_t d1 = convolve8_4_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, y_filter); + int16x4_t d2 = convolve8_4_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, y_filter); + int16x4_t d3 = + convolve8_4_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, y_filter); + + uint8x8_t d01 = vqmovun_s16(vsubq_s16(vcombine_s16(d0, d1), sub_const)); + uint8x8_t d23 = vqmovun_s16(vsubq_s16(vcombine_s16(d2, d3), sub_const)); + + store_u8x4_strided_x2(dst_ptr + 0 * dst_stride, dst_stride, d01); + store_u8x4_strided_x2(dst_ptr + 2 * dst_stride, dst_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + int16x4_t s7 = vld1_s16(src_ptr); + int16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter); + uint8x8_t d01 = + vqmovun_s16(vsubq_s16(vcombine_s16(d0, vdup_n_s16(0)), sub_const)); + + store_u8_4x1(dst_ptr, d01); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + } else { + // Width is a multiple of 8 and height is a multiple of 4. + do { + int height = h; + int16_t *s = src_ptr; + uint8_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint8x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, sub_const); + uint8x8_t d1 = convolve8_8_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, sub_const); + uint8x8_t d2 = convolve8_8_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, sub_const); + uint8x8_t d3 = convolve8_8_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, sub_const); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s7 = vld1q_s16(s); + uint8x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, sub_const); + vst1_u8(d, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s5 = s6; + s6 = s7; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE int16x4_t convolve6_4_2d_v(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t s4, const int16x4_t s5, + const int16x8_t y_filter) { + const int16x4_t y_filter_lo = vget_low_s16(y_filter); + const int16x4_t y_filter_hi = vget_high_s16(y_filter); + + int32x4_t sum = vmull_lane_s16(s0, y_filter_lo, 1); + sum = vmlal_lane_s16(sum, s1, y_filter_lo, 2); + sum = vmlal_lane_s16(sum, s2, y_filter_lo, 3); + sum = vmlal_lane_s16(sum, s3, y_filter_hi, 0); + sum = vmlal_lane_s16(sum, s4, y_filter_hi, 1); + sum = vmlal_lane_s16(sum, s5, y_filter_hi, 2); + + return vqrshrn_n_s32(sum, 2 * FILTER_BITS - ROUND0_BITS); +} + +static INLINE uint8x8_t convolve6_8_2d_v(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t y_filter, + const int16x8_t sub_const) { + const int16x4_t y_filter_lo = vget_low_s16(y_filter); + const int16x4_t y_filter_hi = vget_high_s16(y_filter); + + int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_lo, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_lo, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_lo, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_hi, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_hi, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_hi, 2); + + int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_lo, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_lo, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_lo, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_hi, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_hi, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_hi, 2); + + int16x8_t res = + vcombine_s16(vqrshrn_n_s32(sum0, 2 * FILTER_BITS - ROUND0_BITS), + vqrshrn_n_s32(sum1, 2 * FILTER_BITS - ROUND0_BITS)); + res = vsubq_s16(res, sub_const); + + return vqmovun_s16(res); +} + +static INLINE void convolve_2d_sr_vert_6tap_neon(int16_t *src_ptr, + int src_stride, + uint8_t *dst_ptr, + int dst_stride, int w, int h, + const int16x8_t y_filter) { + const int bd = 8; + const int16x8_t sub_const = vdupq_n_s16(1 << (bd - 1)); + + if (w <= 4) { + int16x4_t s0, s1, s2, s3, s4; + load_s16_4x5(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4); + src_ptr += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x4_t s5, s6, s7, s8; + load_s16_4x4(src_ptr, src_stride, &s5, &s6, &s7, &s8); + + int16x4_t d0 = convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter); + int16x4_t d1 = convolve6_4_2d_v(s1, s2, s3, s4, s5, s6, y_filter); + int16x4_t d2 = convolve6_4_2d_v(s2, s3, s4, s5, s6, s7, y_filter); + int16x4_t d3 = convolve6_4_2d_v(s3, s4, s5, s6, s7, s8, y_filter); + + uint8x8_t d01 = vqmovun_s16(vsubq_s16(vcombine_s16(d0, d1), sub_const)); + uint8x8_t d23 = vqmovun_s16(vsubq_s16(vcombine_s16(d2, d3), sub_const)); + + store_u8x4_strided_x2(dst_ptr + 0 * dst_stride, dst_stride, d01); + store_u8x4_strided_x2(dst_ptr + 2 * dst_stride, dst_stride, d23); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; +#else // !AOM_ARCH_AARCH64 + int16x4_t s5 = vld1_s16(src_ptr); + int16x4_t d0 = convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter); + uint8x8_t d01 = + vqmovun_s16(vsubq_s16(vcombine_s16(d0, vdup_n_s16(0)), sub_const)); + + store_u8_4x1(dst_ptr, d01); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + src_ptr += src_stride; + dst_ptr += dst_stride; + h--; +#endif // AOM_ARCH_AARCH64 + } while (h != 0); + } else { + // Width is a multiple of 8 and height is a multiple of 4. + do { + int height = h; + int16_t *s = src_ptr; + uint8_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { +#if AOM_ARCH_AARCH64 + int16x8_t s5, s6, s7, s8; + load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint8x8_t d0 = + convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, sub_const); + uint8x8_t d1 = + convolve6_8_2d_v(s1, s2, s3, s4, s5, s6, y_filter, sub_const); + uint8x8_t d2 = + convolve6_8_2d_v(s2, s3, s4, s5, s6, s7, y_filter, sub_const); + uint8x8_t d3 = + convolve6_8_2d_v(s3, s4, s5, s6, s7, s8, y_filter, sub_const); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; +#else // !AOM_ARCH_AARCH64 + int16x8_t s5 = vld1q_s16(s); + uint8x8_t d0 = + convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, sub_const); + vst1_u8(d, d0); + + s0 = s1; + s1 = s2; + s2 = s3; + s3 = s4; + s4 = s5; + s += src_stride; + d += dst_stride; + height--; +#endif // AOM_ARCH_AARCH64 + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +#endif // AOM_AV1_COMMON_ARM_CONVOLVE_NEON_H_ diff --git a/third_party/aom/av1/common/arm/convolve_neon_dotprod.c b/third_party/aom/av1/common/arm/convolve_neon_dotprod.c new file mode 100644 index 0000000000..c29229eb09 --- /dev/null +++ b/third_party/aom/av1/common/arm/convolve_neon_dotprod.c @@ -0,0 +1,793 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/arm/convolve_neon.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" + +DECLARE_ALIGNED(16, static const uint8_t, dot_prod_permute_tbl[48]) = { + 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, + 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, + 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 +}; + +static INLINE int16x4_t convolve12_4_x(uint8x16_t samples, + const int8x16_t filter, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16x3_t permute_tbl) { + int8x16_t clamped_samples, permuted_samples[3]; + int32x4_t sum; + + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_s8(clamped_samples, permute_tbl.val[2]); + + // Accumulate dot product into 'correction' to account for range clamp. + // First 4 output values. + sum = vdotq_laneq_s32(correction, permuted_samples[0], filter, 0); + sum = vdotq_laneq_s32(sum, permuted_samples[1], filter, 1); + sum = vdotq_laneq_s32(sum, permuted_samples[2], filter, 2); + + return vqrshrn_n_s32(sum, FILTER_BITS); +} + +static INLINE uint8x8_t convolve12_8_x(uint8x16_t samples[2], + const int8x16_t filter, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16x3_t permute_tbl) { + int8x16_t clamped_samples[2], permuted_samples[4]; + int32x4_t sum[2]; + + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + clamped_samples[0] = vreinterpretq_s8_u8(vsubq_u8(samples[0], range_limit)); + clamped_samples[1] = vreinterpretq_s8_u8(vsubq_u8(samples[1], range_limit)); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_s8(clamped_samples[0], permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_s8(clamped_samples[0], permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_s8(clamped_samples[0], permute_tbl.val[2]); + // {12, 13, 14, 15, 13, 14, 15, 16, 14, 15, 16, 17, 15, 16, 17, 18 } + permuted_samples[3] = vqtbl1q_s8(clamped_samples[1], permute_tbl.val[2]); + + // Accumulate dot product into 'correction' to account for range clamp. + // First 4 output values. + sum[0] = vdotq_laneq_s32(correction, permuted_samples[0], filter, 0); + sum[0] = vdotq_laneq_s32(sum[0], permuted_samples[1], filter, 1); + sum[0] = vdotq_laneq_s32(sum[0], permuted_samples[2], filter, 2); + // Second 4 output values. + sum[1] = vdotq_laneq_s32(correction, permuted_samples[1], filter, 0); + sum[1] = vdotq_laneq_s32(sum[1], permuted_samples[2], filter, 1); + sum[1] = vdotq_laneq_s32(sum[1], permuted_samples[3], filter, 2); + + // Narrow and re-pack. + int16x8_t sum_s16 = vcombine_s16(vqrshrn_n_s32(sum[0], FILTER_BITS), + vqrshrn_n_s32(sum[1], FILTER_BITS)); + return vqmovun_s16(sum_s16); +} + +static INLINE void convolve_x_sr_12tap_neon_dotprod( + const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, + int h, const int16_t *x_filter_ptr) { + const int16x8_t filter_0_7 = vld1q_s16(x_filter_ptr); + const int16x4_t filter_8_11 = vld1_s16(x_filter_ptr + 8); + const int16x8_t filter_8_15 = vcombine_s16(filter_8_11, vdup_n_s16(0)); + const int8x16_t filter = + vcombine_s8(vmovn_s16(filter_0_7), vmovn_s16(filter_8_15)); + + const int32_t correction_s32 = + vaddvq_s32(vaddq_s32(vpaddlq_s16(vshlq_n_s16(filter_0_7, FILTER_BITS)), + vpaddlq_s16(vshlq_n_s16(filter_8_15, FILTER_BITS)))); + // A shim of 1 << (ROUND0_BITS - 1) enables us to use a single rounding right + // shift by FILTER_BITS - instead of a first rounding right shift by + // ROUND0_BITS, followed by second rounding right shift by FILTER_BITS - + // ROUND0_BITS. + int32x4_t correction = vdupq_n_s32(correction_s32 + (1 << (ROUND0_BITS - 1))); + const uint8x16_t range_limit = vdupq_n_u8(128); + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + + // Special case the following no-op filter as 128 won't fit into the + // 8-bit signed dot-product instruction: + // { 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0 } + if (vgetq_lane_s16(filter_0_7, 5) == 128) { + // Undo the horizontal offset in the calling function. + src += 5; + + do { + const uint8_t *s = src; + uint8_t *d = dst; + int width = w; + + do { + uint8x8_t d0 = vld1_u8(s); + if (w == 4) { + store_u8_4x1(d, d0); + } else { + vst1_u8(d, d0); + } + + s += 8; + d += 8; + width -= 8; + } while (width > 0); + src += src_stride; + dst += dst_stride; + } while (--h != 0); + } else { + if (w <= 4) { + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = + convolve12_4_x(s0, filter, correction, range_limit, permute_tbl); + int16x4_t d1 = + convolve12_4_x(s1, filter, correction, range_limit, permute_tbl); + int16x4_t d2 = + convolve12_4_x(s2, filter, correction, range_limit, permute_tbl); + int16x4_t d3 = + convolve12_4_x(s3, filter, correction, range_limit, permute_tbl); + + uint8x8_t d01 = vqmovun_s16(vcombine_s16(d0, d1)); + uint8x8_t d23 = vqmovun_s16(vcombine_s16(d2, d3)); + + store_u8x4_strided_x2(dst + 0 * dst_stride, dst_stride, d01); + store_u8x4_strided_x2(dst + 2 * dst_stride, dst_stride, d23); + + dst += 4 * dst_stride; + src += 4 * src_stride; + h -= 4; + } while (h != 0); + } else { + do { + const uint8_t *s = src; + uint8_t *d = dst; + int width = w; + + do { + uint8x16_t s0[2], s1[2], s2[2], s3[2]; + load_u8_16x4(s, src_stride, &s0[0], &s1[0], &s2[0], &s3[0]); + load_u8_16x4(s + 4, src_stride, &s0[1], &s1[1], &s2[1], &s3[1]); + + uint8x8_t d0 = + convolve12_8_x(s0, filter, correction, range_limit, permute_tbl); + uint8x8_t d1 = + convolve12_8_x(s1, filter, correction, range_limit, permute_tbl); + uint8x8_t d2 = + convolve12_8_x(s2, filter, correction, range_limit, permute_tbl); + uint8x8_t d3 = + convolve12_8_x(s3, filter, correction, range_limit, permute_tbl); + + store_u8_8x4(d + 0 * dst_stride, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + h -= 4; + } while (h != 0); + } + } +} + +static INLINE int16x4_t convolve4_4_x(uint8x16_t samples, const int8x8_t filter, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16_t permute_tbl) { + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + int8x16_t clamped_samples = + vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + int8x16_t permuted_samples = vqtbl1q_s8(clamped_samples, permute_tbl); + + // Accumulate dot product into 'correction' to account for range clamp. + int32x4_t sum = vdotq_lane_s32(correction, permuted_samples, filter, 0); + + // Packing is performed by the caller. + return vmovn_s32(sum); +} + +static INLINE uint8x8_t convolve8_8_x(uint8x16_t samples, const int8x8_t filter, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16x3_t permute_tbl) { + int8x16_t clamped_samples, permuted_samples[3]; + int32x4_t sum[2]; + + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. */ + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_s8(clamped_samples, permute_tbl.val[2]); + + // Accumulate dot product into 'correction' to account for range clamp. + // First 4 output values. + sum[0] = vdotq_lane_s32(correction, permuted_samples[0], filter, 0); + sum[0] = vdotq_lane_s32(sum[0], permuted_samples[1], filter, 1); + // Second 4 output values. + sum[1] = vdotq_lane_s32(correction, permuted_samples[1], filter, 0); + sum[1] = vdotq_lane_s32(sum[1], permuted_samples[2], filter, 1); + + // Narrow and re-pack. + int16x8_t sum_s16 = vcombine_s16(vmovn_s32(sum[0]), vmovn_s32(sum[1])); + // We halved the convolution filter values so - 1 from the right shift. + return vqrshrun_n_s16(sum_s16, FILTER_BITS - 1); +} + +void av1_convolve_x_sr_neon_dotprod(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const int subpel_x_qn, + ConvolveParams *conv_params) { + if (w == 2 || h == 2) { + av1_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h, filter_params_x, + subpel_x_qn, conv_params); + return; + } + + const uint8_t horiz_offset = filter_params_x->taps / 2 - 1; + src -= horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + if (filter_params_x->taps > 8) { + convolve_x_sr_12tap_neon_dotprod(src, src_stride, dst, dst_stride, w, h, + x_filter_ptr); + return; + } + + const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr); + // Dot product constants. + const int32_t correction_s32 = + vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1)); + // This shim of (1 << ((ROUND0_BITS - 1) - 1) enables us to use a single + // rounding right shift by FILTER_BITS - instead of a first rounding right + // shift by ROUND0_BITS, followed by second rounding right shift by + // FILTER_BITS - ROUND0_BITS. + // The outermost -1 is needed because we will halve the filter values. + const int32x4_t correction = + vdupq_n_s32(correction_s32 + (1 << ((ROUND0_BITS - 1) - 1))); + const uint8x16_t range_limit = vdupq_n_u8(128); + + if (w <= 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = + convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl); + int16x4_t d1 = + convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl); + int16x4_t d2 = + convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl); + int16x4_t d3 = + convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl); + + // We halved the convolution filter values so - 1 from the right shift. + uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS - 1); + uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS - 1); + + store_u8x4_strided_x2(dst + 0 * dst_stride, dst_stride, d01); + store_u8x4_strided_x2(dst + 2 * dst_stride, dst_stride, d23); + + src += 4 * src_stride; + dst += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1); + + do { + int width = w; + const uint8_t *s = src; + uint8_t *d = dst; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint8x8_t d0 = + convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl); + uint8x8_t d1 = + convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl); + uint8x8_t d2 = + convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl); + uint8x8_t d3 = + convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + h -= 4; + } while (h != 0); + } +} + +static INLINE int16x4_t convolve12_4_2d_h(uint8x16_t samples, + const int8x16_t filters, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16x3_t permute_tbl) { + int8x16_t clamped_samples, permuted_samples[3]; + int32x4_t sum; + + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_s8(clamped_samples, permute_tbl.val[2]); + + // Accumulate dot product into 'correction' to account for range clamp. + // First 4 output values. + sum = vdotq_laneq_s32(correction, permuted_samples[0], filters, 0); + sum = vdotq_laneq_s32(sum, permuted_samples[1], filters, 1); + sum = vdotq_laneq_s32(sum, permuted_samples[2], filters, 2); + + // Narrow and re-pack. + return vshrn_n_s32(sum, ROUND0_BITS); +} + +static INLINE int16x8_t convolve12_8_2d_h(uint8x16_t samples[2], + const int8x16_t filters, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16x3_t permute_tbl) { + int8x16_t clamped_samples[2], permuted_samples[4]; + int32x4_t sum[2]; + + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + clamped_samples[0] = vreinterpretq_s8_u8(vsubq_u8(samples[0], range_limit)); + clamped_samples[1] = vreinterpretq_s8_u8(vsubq_u8(samples[1], range_limit)); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_s8(clamped_samples[0], permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_s8(clamped_samples[0], permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_s8(clamped_samples[0], permute_tbl.val[2]); + // {12, 13, 14, 15, 13, 14, 15, 16, 14, 15, 16, 17, 15, 16, 17, 18 } + permuted_samples[3] = vqtbl1q_s8(clamped_samples[1], permute_tbl.val[2]); + + // Accumulate dot product into 'correction' to account for range clamp. + // First 4 output values. + sum[0] = vdotq_laneq_s32(correction, permuted_samples[0], filters, 0); + sum[0] = vdotq_laneq_s32(sum[0], permuted_samples[1], filters, 1); + sum[0] = vdotq_laneq_s32(sum[0], permuted_samples[2], filters, 2); + // Second 4 output values. + sum[1] = vdotq_laneq_s32(correction, permuted_samples[1], filters, 0); + sum[1] = vdotq_laneq_s32(sum[1], permuted_samples[2], filters, 1); + sum[1] = vdotq_laneq_s32(sum[1], permuted_samples[3], filters, 2); + + // Narrow and re-pack. + return vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS), + vshrn_n_s32(sum[1], ROUND0_BITS)); +} + +static INLINE void convolve_2d_sr_horiz_12tap_neon_dotprod( + const uint8_t *src_ptr, int src_stride, int16_t *dst_ptr, + const int dst_stride, int w, int h, const int16x8_t x_filter_0_7, + const int16x4_t x_filter_8_11) { + const int bd = 8; + + // Special case the following no-op filter as 128 won't fit into the 8-bit + // signed dot-product instruction: + // { 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0 } + if (vgetq_lane_s16(x_filter_0_7, 5) == 128) { + const uint16x8_t horiz_const = vdupq_n_u16((1 << (bd - 1))); + // Undo the horizontal offset in the calling function. + src_ptr += 5; + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x8_t s0 = vld1_u8(s); + uint16x8_t d0 = vaddw_u8(horiz_const, s0); + d0 = vshlq_n_u16(d0, FILTER_BITS - ROUND0_BITS); + // Store 8 elements to avoid additional branches. This is safe if the + // actual block width is < 8 because the intermediate buffer is large + // enough to accommodate 128x128 blocks. + vst1q_s16(d, vreinterpretq_s16_u16(d0)); + + d += 8; + s += 8; + width -= 8; + } while (width > 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); + + } else { + // Narrow filter values to 8-bit. + const int16x8x2_t x_filter_s16 = { + { x_filter_0_7, vcombine_s16(x_filter_8_11, vdup_n_s16(0)) } + }; + const int8x16_t x_filter = vcombine_s8(vmovn_s16(x_filter_s16.val[0]), + vmovn_s16(x_filter_s16.val[1])); + + // This shim of 1 << (ROUND0_BITS - 1) enables us to use non-rounding shifts + // - which are generally faster than rounding shifts on modern CPUs. + const int32_t horiz_const = + ((1 << (bd + FILTER_BITS - 1)) + (1 << (ROUND0_BITS - 1))); + // Dot product constants. + const int32x4_t correct_tmp = + vaddq_s32(vpaddlq_s16(vshlq_n_s16(x_filter_s16.val[0], 7)), + vpaddlq_s16(vshlq_n_s16(x_filter_s16.val[1], 7))); + const int32x4_t correction = + vdupq_n_s32(vaddvq_s32(correct_tmp) + horiz_const); + const uint8x16_t range_limit = vdupq_n_u8(128); + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + + if (w <= 4) { + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = convolve12_4_2d_h(s0, x_filter, correction, range_limit, + permute_tbl); + int16x4_t d1 = convolve12_4_2d_h(s1, x_filter, correction, range_limit, + permute_tbl); + int16x4_t d2 = convolve12_4_2d_h(s2, x_filter, correction, range_limit, + permute_tbl); + int16x4_t d3 = convolve12_4_2d_h(s3, x_filter, correction, range_limit, + permute_tbl); + + store_s16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; + } while (h > 4); + + do { + uint8x16_t s0 = vld1q_u8(src_ptr); + int16x4_t d0 = convolve12_4_2d_h(s0, x_filter, correction, range_limit, + permute_tbl); + vst1_s16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); + + } else { + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0[2], s1[2], s2[2], s3[2]; + load_u8_16x4(s, src_stride, &s0[0], &s1[0], &s2[0], &s3[0]); + load_u8_16x4(s + 4, src_stride, &s0[1], &s1[1], &s2[1], &s3[1]); + + int16x8_t d0 = convolve12_8_2d_h(s0, x_filter, correction, + range_limit, permute_tbl); + int16x8_t d1 = convolve12_8_2d_h(s1, x_filter, correction, + range_limit, permute_tbl); + int16x8_t d2 = convolve12_8_2d_h(s2, x_filter, correction, + range_limit, permute_tbl); + int16x8_t d3 = convolve12_8_2d_h(s3, x_filter, correction, + range_limit, permute_tbl); + + store_s16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; + } while (h > 4); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0[2]; + s0[0] = vld1q_u8(s); + s0[1] = vld1q_u8(s + 4); + int16x8_t d0 = convolve12_8_2d_h(s0, x_filter, correction, + range_limit, permute_tbl); + vst1q_s16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } + } +} + +static INLINE int16x4_t convolve4_4_2d_h(uint8x16_t samples, + const int8x8_t filters, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16_t permute_tbl) { + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + int8x16_t clamped_samples = + vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + int8x16_t permuted_samples = vqtbl1q_s8(clamped_samples, permute_tbl); + + // Accumulate dot product into 'correction' to account for range clamp. + int32x4_t sum = vdotq_lane_s32(correction, permuted_samples, filters, 0); + + // We halved the convolution filter values so -1 from the right shift. + return vshrn_n_s32(sum, ROUND0_BITS - 1); +} + +static INLINE int16x8_t convolve8_8_2d_h(uint8x16_t samples, + const int8x8_t filters, + const int32x4_t correction, + const uint8x16_t range_limit, + const uint8x16x3_t permute_tbl) { + int8x16_t clamped_samples, permuted_samples[3]; + int32x4_t sum[2]; + + // Clamp sample range to [-128, 127] for 8-bit signed dot product. + clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_s8(clamped_samples, permute_tbl.val[2]); + + // Accumulate dot product into 'correction' to account for range clamp. + // First 4 output values. + sum[0] = vdotq_lane_s32(correction, permuted_samples[0], filters, 0); + sum[0] = vdotq_lane_s32(sum[0], permuted_samples[1], filters, 1); + // Second 4 output values. + sum[1] = vdotq_lane_s32(correction, permuted_samples[1], filters, 0); + sum[1] = vdotq_lane_s32(sum[1], permuted_samples[2], filters, 1); + + // Narrow and re-pack. + // We halved the convolution filter values so -1 from the right shift. + return vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1), + vshrn_n_s32(sum[1], ROUND0_BITS - 1)); +} + +static INLINE void convolve_2d_sr_horiz_neon_dotprod( + const uint8_t *src, int src_stride, int16_t *im_block, int im_stride, int w, + int im_h, const int16_t *x_filter_ptr) { + const int bd = 8; + // This shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // The outermost -1 is needed because we halved the filter values. + const int32_t horiz_const = + ((1 << (bd + FILTER_BITS - 2)) + (1 << ((ROUND0_BITS - 1) - 1))); + // Dot product constants. + const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr); + const int32_t correction_s32 = + vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1)); + const int32x4_t correction = vdupq_n_s32(correction_s32 + horiz_const); + const uint8x16_t range_limit = vdupq_n_u8(128); + + const uint8_t *src_ptr = src; + int16_t *dst_ptr = im_block; + int dst_stride = im_stride; + int height = im_h; + + if (w <= 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = + convolve4_4_2d_h(s0, x_filter, correction, range_limit, permute_tbl); + int16x4_t d1 = + convolve4_4_2d_h(s1, x_filter, correction, range_limit, permute_tbl); + int16x4_t d2 = + convolve4_4_2d_h(s2, x_filter, correction, range_limit, permute_tbl); + int16x4_t d3 = + convolve4_4_2d_h(s3, x_filter, correction, range_limit, permute_tbl); + + store_s16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + uint8x16_t s0 = vld1q_u8(src_ptr); + int16x4_t d0 = + convolve4_4_2d_h(s0, x_filter, correction, range_limit, permute_tbl); + vst1_s16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + int16x8_t d0 = convolve8_8_2d_h(s0, x_filter, correction, range_limit, + permute_tbl); + int16x8_t d1 = convolve8_8_2d_h(s1, x_filter, correction, range_limit, + permute_tbl); + int16x8_t d2 = convolve8_8_2d_h(s2, x_filter, correction, range_limit, + permute_tbl); + int16x8_t d3 = convolve8_8_2d_h(s3, x_filter, correction, range_limit, + permute_tbl); + + store_s16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0 = vld1q_u8(s); + int16x8_t d0 = convolve8_8_2d_h(s0, x_filter, correction, range_limit, + permute_tbl); + vst1q_s16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +void av1_convolve_2d_sr_neon_dotprod(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, + const int subpel_x_qn, + const int subpel_y_qn, + ConvolveParams *conv_params) { + if (w == 2 || h == 2) { + av1_convolve_2d_sr_c(src, src_stride, dst, dst_stride, w, h, + filter_params_x, filter_params_y, subpel_x_qn, + subpel_y_qn, conv_params); + return; + } + + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 6 ? 6 : y_filter_taps; + const int im_h = h + clamped_y_taps - 1; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + if (filter_params_x->taps > 8) { + DECLARE_ALIGNED(16, int16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]); + + const int16x8_t x_filter_0_7 = vld1q_s16(x_filter_ptr); + const int16x4_t x_filter_8_11 = vld1_s16(x_filter_ptr + 8); + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x4_t y_filter_8_11 = vld1_s16(y_filter_ptr + 8); + + convolve_2d_sr_horiz_12tap_neon_dotprod(src_ptr, src_stride, im_block, + im_stride, w, im_h, x_filter_0_7, + x_filter_8_11); + + convolve_2d_sr_vert_12tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter_0_7, y_filter_8_11); + } else { + DECLARE_ALIGNED(16, int16_t, + im_block[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE]); + + convolve_2d_sr_horiz_neon_dotprod(src_ptr, src_stride, im_block, im_stride, + w, im_h, x_filter_ptr); + + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + if (clamped_y_taps <= 6) { + convolve_2d_sr_vert_6tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter); + } else { + convolve_2d_sr_vert_8tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter); + } + } +} diff --git a/third_party/aom/av1/common/arm/convolve_neon_i8mm.c b/third_party/aom/av1/common/arm/convolve_neon_i8mm.c new file mode 100644 index 0000000000..bbcd6f201a --- /dev/null +++ b/third_party/aom/av1/common/arm/convolve_neon_i8mm.c @@ -0,0 +1,702 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/arm/convolve_neon.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" + +DECLARE_ALIGNED(16, static const uint8_t, dot_prod_permute_tbl[48]) = { + 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, + 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, + 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 +}; + +static INLINE int16x4_t convolve12_4_x(uint8x16_t samples, + const int8x16_t filter, + const uint8x16x3_t permute_tbl, + const int32x4_t horiz_const) { + uint8x16_t permuted_samples[3]; + int32x4_t sum; + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_u8(samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_u8(samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_u8(samples, permute_tbl.val[2]); + + // First 4 output values. + sum = vusdotq_laneq_s32(horiz_const, permuted_samples[0], filter, 0); + sum = vusdotq_laneq_s32(sum, permuted_samples[1], filter, 1); + sum = vusdotq_laneq_s32(sum, permuted_samples[2], filter, 2); + + return vqrshrn_n_s32(sum, FILTER_BITS); +} + +static INLINE uint8x8_t convolve12_8_x(uint8x16_t samples[2], + const int8x16_t filter, + const uint8x16x3_t permute_tbl, + const int32x4_t horiz_const) { + uint8x16_t permuted_samples[4]; + int32x4_t sum[2]; + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_u8(samples[0], permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_u8(samples[0], permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_u8(samples[0], permute_tbl.val[2]); + // {12, 13, 14, 15, 13, 14, 15, 16, 14, 15, 16, 17, 15, 16, 17, 18 } + permuted_samples[3] = vqtbl1q_u8(samples[1], permute_tbl.val[2]); + + // First 4 output values. + sum[0] = vusdotq_laneq_s32(horiz_const, permuted_samples[0], filter, 0); + sum[0] = vusdotq_laneq_s32(sum[0], permuted_samples[1], filter, 1); + sum[0] = vusdotq_laneq_s32(sum[0], permuted_samples[2], filter, 2); + // Second 4 output values. + sum[1] = vusdotq_laneq_s32(horiz_const, permuted_samples[1], filter, 0); + sum[1] = vusdotq_laneq_s32(sum[1], permuted_samples[2], filter, 1); + sum[1] = vusdotq_laneq_s32(sum[1], permuted_samples[3], filter, 2); + + // Narrow and re-pack. + int16x8_t sum_s16 = vcombine_s16(vqrshrn_n_s32(sum[0], FILTER_BITS), + vqrshrn_n_s32(sum[1], FILTER_BITS)); + return vqmovun_s16(sum_s16); +} + +static INLINE void convolve_x_sr_12tap_neon_i8mm(const uint8_t *src, + int src_stride, uint8_t *dst, + int dst_stride, int w, int h, + const int16_t *x_filter_ptr) { + const int16x8_t filter_0_7 = vld1q_s16(x_filter_ptr); + const int16x4_t filter_8_11 = vld1_s16(x_filter_ptr + 8); + const int16x8_t filter_8_15 = vcombine_s16(filter_8_11, vdup_n_s16(0)); + const int8x16_t filter = + vcombine_s8(vmovn_s16(filter_0_7), vmovn_s16(filter_8_15)); + + // Special case the following no-op filter as 128 won't fit into the + // 8-bit signed dot-product instruction: + // { 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0 } + if (vgetq_lane_s16(filter_0_7, 5) == 128) { + // Undo the horizontal offset in the calling function. + src += 5; + + do { + const uint8_t *s = src; + uint8_t *d = dst; + int width = w; + + do { + uint8x8_t d0 = vld1_u8(s); + if (w == 4) { + store_u8_4x1(d, d0); + } else { + vst1_u8(d, d0); + } + + s += 8; + d += 8; + width -= 8; + } while (width > 0); + src += src_stride; + dst += dst_stride; + } while (--h != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // This shim of 1 << (ROUND0_BITS - 1) enables us to use a single rounding + // right shift by FILTER_BITS - instead of a first rounding right shift by + // ROUND0_BITS, followed by second rounding right shift by FILTER_BITS - + // ROUND0_BITS. + const int32x4_t horiz_const = vdupq_n_s32(1 << (ROUND0_BITS - 1)); + + if (w <= 4) { + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = convolve12_4_x(s0, filter, permute_tbl, horiz_const); + int16x4_t d1 = convolve12_4_x(s1, filter, permute_tbl, horiz_const); + int16x4_t d2 = convolve12_4_x(s2, filter, permute_tbl, horiz_const); + int16x4_t d3 = convolve12_4_x(s3, filter, permute_tbl, horiz_const); + + uint8x8_t d01 = vqmovun_s16(vcombine_s16(d0, d1)); + uint8x8_t d23 = vqmovun_s16(vcombine_s16(d2, d3)); + + store_u8x4_strided_x2(dst + 0 * dst_stride, dst_stride, d01); + store_u8x4_strided_x2(dst + 2 * dst_stride, dst_stride, d23); + + dst += 4 * dst_stride; + src += 4 * src_stride; + h -= 4; + } while (h != 0); + } else { + do { + const uint8_t *s = src; + uint8_t *d = dst; + int width = w; + + do { + uint8x16_t s0[2], s1[2], s2[2], s3[2]; + load_u8_16x4(s, src_stride, &s0[0], &s1[0], &s2[0], &s3[0]); + load_u8_16x4(s + 4, src_stride, &s0[1], &s1[1], &s2[1], &s3[1]); + + uint8x8_t d0 = convolve12_8_x(s0, filter, permute_tbl, horiz_const); + uint8x8_t d1 = convolve12_8_x(s1, filter, permute_tbl, horiz_const); + uint8x8_t d2 = convolve12_8_x(s2, filter, permute_tbl, horiz_const); + uint8x8_t d3 = convolve12_8_x(s3, filter, permute_tbl, horiz_const); + + store_u8_8x4(d + 0 * dst_stride, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + h -= 4; + } while (h != 0); + } + } +} + +static INLINE int16x4_t convolve4_4_x(uint8x16_t samples, const int8x8_t filter, + const uint8x16_t permute_tbl, + const int32x4_t horiz_const) { + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + uint8x16_t permuted_samples = vqtbl1q_u8(samples, permute_tbl); + + // First 4 output values. + int32x4_t sum = vusdotq_lane_s32(horiz_const, permuted_samples, filter, 0); + + // Packing is performed by the caller. + return vmovn_s32(sum); +} + +static INLINE uint8x8_t convolve8_8_x(uint8x16_t samples, const int8x8_t filter, + const uint8x16x3_t permute_tbl, + const int32x4_t horiz_const) { + uint8x16_t permuted_samples[3]; + int32x4_t sum[2]; + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_u8(samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_u8(samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_u8(samples, permute_tbl.val[2]); + + // First 4 output values. + sum[0] = vusdotq_lane_s32(horiz_const, permuted_samples[0], filter, 0); + sum[0] = vusdotq_lane_s32(sum[0], permuted_samples[1], filter, 1); + // Second 4 output values. + sum[1] = vusdotq_lane_s32(horiz_const, permuted_samples[1], filter, 0); + sum[1] = vusdotq_lane_s32(sum[1], permuted_samples[2], filter, 1); + + int16x8_t sum_s16 = vcombine_s16(vmovn_s32(sum[0]), vmovn_s32(sum[1])); + // We halved the convolution filter values so - 1 from the right shift. + return vqrshrun_n_s16(sum_s16, FILTER_BITS - 1); +} + +void av1_convolve_x_sr_neon_i8mm(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const int subpel_x_qn, + ConvolveParams *conv_params) { + if (w == 2 || h == 2) { + av1_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h, filter_params_x, + subpel_x_qn, conv_params); + return; + } + + const uint8_t horiz_offset = filter_params_x->taps / 2 - 1; + src -= horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + if (filter_params_x->taps > 8) { + convolve_x_sr_12tap_neon_i8mm(src, src_stride, dst, dst_stride, w, h, + x_filter_ptr); + return; + } + + // This shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use a single + // rounding right shift by FILTER_BITS - instead of a first rounding right + // shift by ROUND0_BITS, followed by second rounding right shift by + // FILTER_BITS - ROUND0_BITS. + // The outermost -1 is needed because we will halve the filter values. + const int32x4_t horiz_const = vdupq_n_s32(1 << ((ROUND0_BITS - 1) - 1)); + + if (w <= 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = convolve4_4_x(s0, x_filter, permute_tbl, horiz_const); + int16x4_t d1 = convolve4_4_x(s1, x_filter, permute_tbl, horiz_const); + int16x4_t d2 = convolve4_4_x(s2, x_filter, permute_tbl, horiz_const); + int16x4_t d3 = convolve4_4_x(s3, x_filter, permute_tbl, horiz_const); + + // We halved the convolution filter values so - 1 from the right shift. + uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS - 1); + uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS - 1); + + store_u8x4_strided_x2(dst + 0 * dst_stride, dst_stride, d01); + store_u8x4_strided_x2(dst + 2 * dst_stride, dst_stride, d23); + + src += 4 * src_stride; + dst += 4 * dst_stride; + h -= 4; + } while (h != 0); + + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1); + + do { + const uint8_t *s = src; + uint8_t *d = dst; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint8x8_t d0 = convolve8_8_x(s0, x_filter, permute_tbl, horiz_const); + uint8x8_t d1 = convolve8_8_x(s1, x_filter, permute_tbl, horiz_const); + uint8x8_t d2 = convolve8_8_x(s2, x_filter, permute_tbl, horiz_const); + uint8x8_t d3 = convolve8_8_x(s3, x_filter, permute_tbl, horiz_const); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + h -= 4; + } while (h != 0); + } +} + +static INLINE int16x4_t convolve12_4_2d_h(uint8x16_t samples, + const int8x16_t filters, + const uint8x16x3_t permute_tbl, + int32x4_t horiz_const) { + uint8x16_t permuted_samples[3]; + int32x4_t sum; + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_u8(samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_u8(samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_u8(samples, permute_tbl.val[2]); + + // First 4 output values. + sum = vusdotq_laneq_s32(horiz_const, permuted_samples[0], filters, 0); + sum = vusdotq_laneq_s32(sum, permuted_samples[1], filters, 1); + sum = vusdotq_laneq_s32(sum, permuted_samples[2], filters, 2); + + // Narrow and re-pack. + return vshrn_n_s32(sum, ROUND0_BITS); +} + +static INLINE int16x8_t convolve12_8_2d_h(uint8x16_t samples[2], + const int8x16_t filters, + const uint8x16x3_t permute_tbl, + const int32x4_t horiz_const) { + uint8x16_t permuted_samples[4]; + int32x4_t sum[2]; + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_u8(samples[0], permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_u8(samples[0], permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_u8(samples[0], permute_tbl.val[2]); + // {12, 13, 14, 15, 13, 14, 15, 16, 14, 15, 16, 17, 15, 16, 17, 18 } + permuted_samples[3] = vqtbl1q_u8(samples[1], permute_tbl.val[2]); + + // First 4 output values. + sum[0] = vusdotq_laneq_s32(horiz_const, permuted_samples[0], filters, 0); + sum[0] = vusdotq_laneq_s32(sum[0], permuted_samples[1], filters, 1); + sum[0] = vusdotq_laneq_s32(sum[0], permuted_samples[2], filters, 2); + // Second 4 output values. + sum[1] = vusdotq_laneq_s32(horiz_const, permuted_samples[1], filters, 0); + sum[1] = vusdotq_laneq_s32(sum[1], permuted_samples[2], filters, 1); + sum[1] = vusdotq_laneq_s32(sum[1], permuted_samples[3], filters, 2); + + // Narrow and re-pack. + return vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS), + vshrn_n_s32(sum[1], ROUND0_BITS)); +} + +static INLINE void convolve_2d_sr_horiz_12tap_neon_i8mm( + const uint8_t *src_ptr, int src_stride, int16_t *dst_ptr, + const int dst_stride, int w, int h, const int16x8_t x_filter_0_7, + const int16x4_t x_filter_8_11) { + const int bd = 8; + + // Special case the following no-op filter as 128 won't fit into the + // 8-bit signed dot-product instruction: + // { 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0 } + if (vgetq_lane_s16(x_filter_0_7, 5) == 128) { + const uint16x8_t horiz_const = vdupq_n_u16((1 << (bd - 1))); + // Undo the horizontal offset in the calling function. + src_ptr += 5; + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x8_t s0 = vld1_u8(s); + uint16x8_t d0 = vaddw_u8(horiz_const, s0); + d0 = vshlq_n_u16(d0, FILTER_BITS - ROUND0_BITS); + // Store 8 elements to avoid additional branches. This is safe if the + // actual block width is < 8 because the intermediate buffer is large + // enough to accommodate 128x128 blocks. + vst1q_s16(d, vreinterpretq_s16_u16(d0)); + + d += 8; + s += 8; + width -= 8; + } while (width > 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); + + } else { + // Narrow filter values to 8-bit. + const int16x8x2_t x_filter_s16 = { + { x_filter_0_7, vcombine_s16(x_filter_8_11, vdup_n_s16(0)) } + }; + const int8x16_t x_filter = vcombine_s8(vmovn_s16(x_filter_s16.val[0]), + vmovn_s16(x_filter_s16.val[1])); + // This shim of 1 << (ROUND0_BITS - 1) enables us to use non-rounding shifts + // - which are generally faster than rounding shifts on modern CPUs. + const int32x4_t horiz_const = + vdupq_n_s32((1 << (bd + FILTER_BITS - 1)) + (1 << (ROUND0_BITS - 1))); + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + + if (w <= 4) { + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = + convolve12_4_2d_h(s0, x_filter, permute_tbl, horiz_const); + int16x4_t d1 = + convolve12_4_2d_h(s1, x_filter, permute_tbl, horiz_const); + int16x4_t d2 = + convolve12_4_2d_h(s2, x_filter, permute_tbl, horiz_const); + int16x4_t d3 = + convolve12_4_2d_h(s3, x_filter, permute_tbl, horiz_const); + + store_s16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; + } while (h > 4); + + do { + uint8x16_t s0 = vld1q_u8(src_ptr); + int16x4_t d0 = + convolve12_4_2d_h(s0, x_filter, permute_tbl, horiz_const); + vst1_s16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); + + } else { + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0[2], s1[2], s2[2], s3[2]; + load_u8_16x4(s, src_stride, &s0[0], &s1[0], &s2[0], &s3[0]); + load_u8_16x4(s + 4, src_stride, &s0[1], &s1[1], &s2[1], &s3[1]); + + int16x8_t d0 = + convolve12_8_2d_h(s0, x_filter, permute_tbl, horiz_const); + int16x8_t d1 = + convolve12_8_2d_h(s1, x_filter, permute_tbl, horiz_const); + int16x8_t d2 = + convolve12_8_2d_h(s2, x_filter, permute_tbl, horiz_const); + int16x8_t d3 = + convolve12_8_2d_h(s3, x_filter, permute_tbl, horiz_const); + + store_s16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + h -= 4; + } while (h > 4); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0[2]; + s0[0] = vld1q_u8(s); + s0[1] = vld1q_u8(s + 4); + int16x8_t d0 = + convolve12_8_2d_h(s0, x_filter, permute_tbl, horiz_const); + vst1q_s16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } + } +} + +static INLINE int16x4_t convolve4_4_2d_h(uint8x16_t samples, + const int8x8_t filters, + const uint8x16_t permute_tbl, + const int32x4_t horiz_const) { + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + uint8x16_t permuted_samples = vqtbl1q_u8(samples, permute_tbl); + + // First 4 output values. + int32x4_t sum = vusdotq_lane_s32(horiz_const, permuted_samples, filters, 0); + + // We halved the convolution filter values so -1 from the right shift. + return vshrn_n_s32(sum, ROUND0_BITS - 1); +} + +static INLINE int16x8_t convolve8_8_2d_h(uint8x16_t samples, + const int8x8_t filters, + const uint8x16x3_t permute_tbl, + const int32x4_t horiz_const) { + uint8x16_t permuted_samples[3]; + int32x4_t sum[2]; + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + permuted_samples[0] = vqtbl1q_u8(samples, permute_tbl.val[0]); + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + permuted_samples[1] = vqtbl1q_u8(samples, permute_tbl.val[1]); + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + permuted_samples[2] = vqtbl1q_u8(samples, permute_tbl.val[2]); + + // First 4 output values. + sum[0] = vusdotq_lane_s32(horiz_const, permuted_samples[0], filters, 0); + sum[0] = vusdotq_lane_s32(sum[0], permuted_samples[1], filters, 1); + // Second 4 output values. + sum[1] = vusdotq_lane_s32(horiz_const, permuted_samples[1], filters, 0); + sum[1] = vusdotq_lane_s32(sum[1], permuted_samples[2], filters, 1); + + // Narrow and re-pack. + // We halved the convolution filter values so -1 from the right shift. + return vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1), + vshrn_n_s32(sum[1], ROUND0_BITS - 1)); +} + +static INLINE void convolve_2d_sr_horiz_neon_i8mm( + const uint8_t *src, int src_stride, int16_t *im_block, int im_stride, int w, + int im_h, const int16_t *x_filter_ptr) { + const int bd = 8; + // This shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding + // shifts - which are generally faster than rounding shifts on modern CPUs. + // The outermost -1 is needed because we halved the filter values. + const int32x4_t horiz_const = vdupq_n_s32((1 << (bd + FILTER_BITS - 2)) + + (1 << ((ROUND0_BITS - 1) - 1))); + + const uint8_t *src_ptr = src; + int16_t *dst_ptr = im_block; + int dst_stride = im_stride; + int height = im_h; + + if (w <= 4) { + const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); + // 4-tap filters are used for blocks having width <= 4. + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = + vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1); + + src_ptr += 2; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3); + + int16x4_t d0 = convolve4_4_2d_h(s0, x_filter, permute_tbl, horiz_const); + int16x4_t d1 = convolve4_4_2d_h(s1, x_filter, permute_tbl, horiz_const); + int16x4_t d2 = convolve4_4_2d_h(s2, x_filter, permute_tbl, horiz_const); + int16x4_t d3 = convolve4_4_2d_h(s3, x_filter, permute_tbl, horiz_const); + + store_s16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + uint8x16_t s0 = vld1q_u8(src_ptr); + int16x4_t d0 = convolve4_4_2d_h(s0, x_filter, permute_tbl, horiz_const); + vst1_s16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } else { + const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); + // Filter values are even, so halve to reduce intermediate precision reqs. + const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0, s1, s2, s3; + load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); + + int16x8_t d0 = convolve8_8_2d_h(s0, x_filter, permute_tbl, horiz_const); + int16x8_t d1 = convolve8_8_2d_h(s1, x_filter, permute_tbl, horiz_const); + int16x8_t d2 = convolve8_8_2d_h(s2, x_filter, permute_tbl, horiz_const); + int16x8_t d3 = convolve8_8_2d_h(s3, x_filter, permute_tbl, horiz_const); + + store_s16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + const uint8_t *s = src_ptr; + int16_t *d = dst_ptr; + int width = w; + + do { + uint8x16_t s0 = vld1q_u8(s); + int16x8_t d0 = convolve8_8_2d_h(s0, x_filter, permute_tbl, horiz_const); + vst1q_s16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +void av1_convolve_2d_sr_neon_i8mm(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, + const int subpel_x_qn, const int subpel_y_qn, + ConvolveParams *conv_params) { + if (w == 2 || h == 2) { + av1_convolve_2d_sr_c(src, src_stride, dst, dst_stride, w, h, + filter_params_x, filter_params_y, subpel_x_qn, + subpel_y_qn, conv_params); + return; + } + + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 6 ? 6 : y_filter_taps; + const int im_h = h + clamped_y_taps - 1; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = filter_params_x->taps / 2 - 1; + const uint8_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + if (filter_params_x->taps > 8) { + DECLARE_ALIGNED(16, int16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]); + + const int16x8_t x_filter_0_7 = vld1q_s16(x_filter_ptr); + const int16x4_t x_filter_8_11 = vld1_s16(x_filter_ptr + 8); + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x4_t y_filter_8_11 = vld1_s16(y_filter_ptr + 8); + + convolve_2d_sr_horiz_12tap_neon_i8mm(src_ptr, src_stride, im_block, + im_stride, w, im_h, x_filter_0_7, + x_filter_8_11); + + convolve_2d_sr_vert_12tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter_0_7, y_filter_8_11); + } else { + DECLARE_ALIGNED(16, int16_t, + im_block[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE]); + + convolve_2d_sr_horiz_neon_i8mm(src_ptr, src_stride, im_block, im_stride, w, + im_h, x_filter_ptr); + + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + if (clamped_y_taps <= 6) { + convolve_2d_sr_vert_6tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter); + } else { + convolve_2d_sr_vert_8tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter); + } + } +} diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c new file mode 100644 index 0000000000..fc03a2ee04 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c @@ -0,0 +1,2031 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <assert.h> +#include <arm_neon.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" +#include "av1/common/arm/highbd_convolve_neon.h" + +#define ROUND_SHIFT 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS + +static INLINE void highbd_12_comp_avg_neon(const uint16_t *src_ptr, + int src_stride, uint16_t *dst_ptr, + int dst_stride, int w, int h, + ConvolveParams *conv_params, + const int offset, const int bd) { + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const uint16x4_t offset_vec = vdup_n_u16(offset); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + if (w == 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint16x4_t avg = vhadd_u16(src, ref); + int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec)); + + uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + vst1_u16(dst_ptr, d0_u16); + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint16x8_t avg = vhaddq_u16(s, r); + int32x4_t d0_lo = + vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec)); + int32x4_t d0_hi = + vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec)); + + uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT - 2), + vqrshrun_n_s32(d0_hi, ROUND_SHIFT - 2)); + d0 = vminq_u16(d0, max); + vst1q_u16(dst, d0); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } +} + +static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride, + uint16_t *dst_ptr, int dst_stride, + int w, int h, + ConvolveParams *conv_params, + const int offset, const int bd) { + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const uint16x4_t offset_vec = vdup_n_u16(offset); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + if (w == 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint16x4_t avg = vhadd_u16(src, ref); + int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec)); + + uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + vst1_u16(dst_ptr, d0_u16); + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint16x8_t avg = vhaddq_u16(s, r); + int32x4_t d0_lo = + vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec)); + int32x4_t d0_hi = + vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec)); + + uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT), + vqrshrun_n_s32(d0_hi, ROUND_SHIFT)); + d0 = vminq_u16(d0, max); + vst1q_u16(dst, d0); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } +} + +static INLINE void highbd_12_dist_wtd_comp_avg_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, ConvolveParams *conv_params, const int offset, const int bd) { + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const uint32x4_t offset_vec = vdupq_n_u32(offset); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset); + uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset); + + // Weighted averaging + if (w == 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset); + wtd_avg = vmlal_u16(wtd_avg, src, bck_offset); + wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec)); + + uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + vst1_u16(dst_ptr, d0_u16); + + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset); + wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset); + wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec)); + + uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset); + wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset); + wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS); + int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec)); + + uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT - 2), + vqrshrun_n_s32(d1, ROUND_SHIFT - 2)); + d01 = vminq_u16(d01, max); + vst1q_u16(dst, d01); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } +} + +static INLINE void highbd_dist_wtd_comp_avg_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, ConvolveParams *conv_params, const int offset, const int bd) { + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const uint32x4_t offset_vec = vdupq_n_u32(offset); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset); + uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset); + + // Weighted averaging + if (w == 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset); + wtd_avg = vmlal_u16(wtd_avg, src, bck_offset); + wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec)); + + uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + vst1_u16(dst_ptr, d0_u16); + + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset); + wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset); + wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec)); + + uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset); + wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset); + wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS); + int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec)); + + uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT), + vqrshrun_n_s32(d1, ROUND_SHIFT)); + d01 = vminq_u16(d01, max); + vst1q_u16(dst, d01); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } +} + +static INLINE uint16x4_t highbd_12_convolve6_4( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x8_t filter, const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t filter_0_3 = vget_low_s16(filter); + const int16x4_t filter_4_7 = vget_high_s16(filter); + + int32x4_t sum = vmlal_lane_s16(offset, s0, filter_0_3, 1); + sum = vmlal_lane_s16(sum, s1, filter_0_3, 2); + sum = vmlal_lane_s16(sum, s2, filter_0_3, 3); + sum = vmlal_lane_s16(sum, s3, filter_4_7, 0); + sum = vmlal_lane_s16(sum, s4, filter_4_7, 1); + sum = vmlal_lane_s16(sum, s5, filter_4_7, 2); + + return vqshrun_n_s32(sum, ROUND0_BITS + 2); +} + +static INLINE uint16x4_t +highbd_convolve6_4(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x8_t filter, const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t filter_0_3 = vget_low_s16(filter); + const int16x4_t filter_4_7 = vget_high_s16(filter); + + int32x4_t sum = vmlal_lane_s16(offset, s0, filter_0_3, 1); + sum = vmlal_lane_s16(sum, s1, filter_0_3, 2); + sum = vmlal_lane_s16(sum, s2, filter_0_3, 3); + sum = vmlal_lane_s16(sum, s3, filter_4_7, 0); + sum = vmlal_lane_s16(sum, s4, filter_4_7, 1); + sum = vmlal_lane_s16(sum, s5, filter_4_7, 2); + + return vqshrun_n_s32(sum, ROUND0_BITS); +} + +static INLINE uint16x8_t highbd_12_convolve6_8( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t filter, const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t filter_0_3 = vget_low_s16(filter); + const int16x4_t filter_4_7 = vget_high_s16(filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), filter_4_7, 2); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), filter_4_7, 2); + + return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS + 2), + vqshrun_n_s32(sum1, ROUND0_BITS + 2)); +} + +static INLINE uint16x8_t +highbd_convolve6_8(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t filter, const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t filter_0_3 = vget_low_s16(filter); + const int16x4_t filter_4_7 = vget_high_s16(filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), filter_4_7, 2); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), filter_4_7, 2); + + return vcombine_u16(vqshrun_n_s32(sum0, 3), vqshrun_n_s32(sum1, ROUND0_BITS)); +} + +static INLINE void highbd_12_dist_wtd_convolve_x_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, const int offset) { + const int32x4_t offset_vec = vdupq_n_s32(offset); + + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[6], s1[6], s2[6], s3[6]; + load_s16_8x6(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5]); + load_s16_8x6(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5]); + load_s16_8x6(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5]); + load_s16_8x6(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5]); + + uint16x8_t d0 = highbd_12_convolve6_8(s0[0], s0[1], s0[2], s0[3], s0[4], + s0[5], x_filter, offset_vec); + uint16x8_t d1 = highbd_12_convolve6_8(s1[0], s1[1], s1[2], s1[3], s1[4], + s1[5], x_filter, offset_vec); + uint16x8_t d2 = highbd_12_convolve6_8(s2[0], s2[1], s2[2], s2[3], s2[4], + s2[5], x_filter, offset_vec); + uint16x8_t d3 = highbd_12_convolve6_8(s3[0], s3[1], s3[2], s3[3], s3[4], + s3[5], x_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); +} + +static INLINE void highbd_dist_wtd_convolve_x_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, const int offset) { + const int32x4_t offset_vec = vdupq_n_s32(offset); + + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[6], s1[6], s2[6], s3[6]; + load_s16_8x6(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5]); + load_s16_8x6(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5]); + load_s16_8x6(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5]); + load_s16_8x6(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5]); + + uint16x8_t d0 = highbd_convolve6_8(s0[0], s0[1], s0[2], s0[3], s0[4], + s0[5], x_filter, offset_vec); + uint16x8_t d1 = highbd_convolve6_8(s1[0], s1[1], s1[2], s1[3], s1[4], + s1[5], x_filter, offset_vec); + uint16x8_t d2 = highbd_convolve6_8(s2[0], s2[1], s2[2], s2[3], s2[4], + s2[5], x_filter, offset_vec); + uint16x8_t d3 = highbd_convolve6_8(s3[0], s3[1], s3[2], s3[3], s3[4], + s3[5], x_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); +} + +static INLINE uint16x4_t highbd_12_convolve8_4( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x8_t filter, + const int32x4_t offset) { + const int16x4_t filter_0_3 = vget_low_s16(filter); + const int16x4_t filter_4_7 = vget_high_s16(filter); + + int32x4_t sum = vmlal_lane_s16(offset, s0, filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, filter_4_7, 3); + + return vqshrun_n_s32(sum, ROUND0_BITS + 2); +} + +static INLINE uint16x4_t +highbd_convolve8_4(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, + const int16x8_t filter, const int32x4_t offset) { + const int16x4_t filter_0_3 = vget_low_s16(filter); + const int16x4_t filter_4_7 = vget_high_s16(filter); + + int32x4_t sum = vmlal_lane_s16(offset, s0, filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, filter_4_7, 3); + + return vqshrun_n_s32(sum, ROUND0_BITS); +} + +static INLINE uint16x8_t highbd_12_convolve8_8( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, const int16x8_t filter, + const int32x4_t offset) { + const int16x4_t filter_0_3 = vget_low_s16(filter); + const int16x4_t filter_4_7 = vget_high_s16(filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), filter_4_7, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), filter_4_7, 3); + + return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS + 2), + vqshrun_n_s32(sum1, ROUND0_BITS + 2)); +} + +static INLINE uint16x8_t +highbd_convolve8_8(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t filter, const int32x4_t offset) { + const int16x4_t filter_0_3 = vget_low_s16(filter); + const int16x4_t filter_4_7 = vget_high_s16(filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), filter_4_7, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), filter_4_7, 3); + + return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS), + vqshrun_n_s32(sum1, ROUND0_BITS)); +} + +static INLINE uint16x4_t highbd_12_convolve4_4_x(const int16x4_t s[4], + const int16x4_t x_filter, + const int32x4_t offset) { + int32x4_t sum = vmlal_lane_s16(offset, s[0], x_filter, 0); + sum = vmlal_lane_s16(sum, s[1], x_filter, 1); + sum = vmlal_lane_s16(sum, s[2], x_filter, 2); + sum = vmlal_lane_s16(sum, s[3], x_filter, 3); + + return vqshrun_n_s32(sum, 5); +} + +static INLINE uint16x4_t highbd_convolve4_4_x(const int16x4_t s[4], + const int16x4_t x_filter, + const int32x4_t offset) { + int32x4_t sum = vmlal_lane_s16(offset, s[0], x_filter, 0); + sum = vmlal_lane_s16(sum, s[1], x_filter, 1); + sum = vmlal_lane_s16(sum, s[2], x_filter, 2); + sum = vmlal_lane_s16(sum, s[3], x_filter, 3); + + return vqshrun_n_s32(sum, ROUND0_BITS); +} + +static INLINE void highbd_12_dist_wtd_convolve_x_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, const int offset) { + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + // 4-tap filters are used for blocks having width == 4. + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16_t *s = (const int16_t *)(src_ptr + 2); + uint16_t *d = dst_ptr; + + do { + int16x4_t s0[4], s1[4], s2[4], s3[4]; + load_s16_4x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_4x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_4x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_4x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x4_t d0 = highbd_12_convolve4_4_x(s0, x_filter, offset_vec); + uint16x4_t d1 = highbd_12_convolve4_4_x(s1, x_filter, offset_vec); + uint16x4_t d2 = highbd_12_convolve4_4_x(s2, x_filter, offset_vec); + uint16x4_t d3 = highbd_12_convolve4_4_x(s3, x_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = + highbd_12_convolve8_8(s0[0], s0[1], s0[2], s0[3], s0[4], s0[5], + s0[6], s0[7], x_filter, offset_vec); + uint16x8_t d1 = + highbd_12_convolve8_8(s1[0], s1[1], s1[2], s1[3], s1[4], s1[5], + s1[6], s1[7], x_filter, offset_vec); + uint16x8_t d2 = + highbd_12_convolve8_8(s2[0], s2[1], s2[2], s2[3], s2[4], s2[5], + s2[6], s2[7], x_filter, offset_vec); + uint16x8_t d3 = + highbd_12_convolve8_8(s3[0], s3[1], s3[2], s3[3], s3[4], s3[5], + s3[6], s3[7], x_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE void highbd_dist_wtd_convolve_x_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, const int offset) { + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + // 4-tap filters are used for blocks having width == 4. + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16_t *s = (const int16_t *)(src_ptr + 2); + uint16_t *d = dst_ptr; + + do { + int16x4_t s0[4], s1[4], s2[4], s3[4]; + load_s16_4x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_4x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_4x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_4x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x4_t d0 = highbd_convolve4_4_x(s0, x_filter, offset_vec); + uint16x4_t d1 = highbd_convolve4_4_x(s1, x_filter, offset_vec); + uint16x4_t d2 = highbd_convolve4_4_x(s2, x_filter, offset_vec); + uint16x4_t d3 = highbd_convolve4_4_x(s3, x_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = + highbd_convolve8_8(s0[0], s0[1], s0[2], s0[3], s0[4], s0[5], s0[6], + s0[7], x_filter, offset_vec); + uint16x8_t d1 = + highbd_convolve8_8(s1[0], s1[1], s1[2], s1[3], s1[4], s1[5], s1[6], + s1[7], x_filter, offset_vec); + uint16x8_t d2 = + highbd_convolve8_8(s2[0], s2[1], s2[2], s2[3], s2[4], s2[5], s2[6], + s2[7], x_filter, offset_vec); + uint16x8_t d3 = + highbd_convolve8_8(s3[0], s3[1], s3[2], s3[3], s3[4], s3[5], s3[6], + s3[7], x_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +void av1_highbd_dist_wtd_convolve_x_neon( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params, int bd) { + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + CONV_BUF_TYPE *dst16 = conv_params->dst; + const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn); + int dst16_stride = conv_params->dst_stride; + const int im_stride = MAX_SB_SIZE; + const int horiz_offset = filter_params_x->taps / 2 - 1; + assert(FILTER_BITS == COMPOUND_ROUND1_BITS); + const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; + const int offset_avg = (1 << (offset_bits - conv_params->round_1)) + + (1 << (offset_bits - conv_params->round_1 - 1)); + const int offset_convolve = (1 << (conv_params->round_0 - 1)) + + (1 << (bd + FILTER_BITS)) + + (1 << (bd + FILTER_BITS - 1)); + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + src -= horiz_offset; + + // horizontal filter + if (bd == 12) { + if (conv_params->do_average) { + if (x_filter_taps <= 6 && w != 4) { + highbd_12_dist_wtd_convolve_x_6tap_neon(src + 1, src_stride, im_block, + im_stride, w, h, x_filter_ptr, + offset_convolve); + } else { + highbd_12_dist_wtd_convolve_x_neon(src, src_stride, im_block, im_stride, + w, h, x_filter_ptr, offset_convolve); + } + if (conv_params->use_dist_wtd_comp_avg) { + highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, + w, h, conv_params, offset_avg, bd); + } else { + highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params, offset_avg, bd); + } + } else { + if (x_filter_taps <= 6 && w != 4) { + highbd_12_dist_wtd_convolve_x_6tap_neon(src + 1, src_stride, dst16, + dst16_stride, w, h, + x_filter_ptr, offset_convolve); + } else { + highbd_12_dist_wtd_convolve_x_neon(src, src_stride, dst16, dst16_stride, + w, h, x_filter_ptr, offset_convolve); + } + } + } else { + if (conv_params->do_average) { + if (x_filter_taps <= 6 && w != 4) { + highbd_dist_wtd_convolve_x_6tap_neon(src + 1, src_stride, im_block, + im_stride, w, h, x_filter_ptr, + offset_convolve); + } else { + highbd_dist_wtd_convolve_x_neon(src, src_stride, im_block, im_stride, w, + h, x_filter_ptr, offset_convolve); + } + if (conv_params->use_dist_wtd_comp_avg) { + highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, + h, conv_params, offset_avg, bd); + } else { + highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params, offset_avg, bd); + } + } else { + if (x_filter_taps <= 6 && w != 4) { + highbd_dist_wtd_convolve_x_6tap_neon(src + 1, src_stride, dst16, + dst16_stride, w, h, x_filter_ptr, + offset_convolve); + } else { + highbd_dist_wtd_convolve_x_neon(src, src_stride, dst16, dst16_stride, w, + h, x_filter_ptr, offset_convolve); + } + } + } +} + +static INLINE void highbd_12_dist_wtd_convolve_y_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, const int offset) { + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4; + load_s16_4x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x4_t s5, s6, s7, s8; + load_s16_4x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x4_t d0 = + highbd_12_convolve6_4(s0, s1, s2, s3, s4, s5, y_filter, offset_vec); + uint16x4_t d1 = + highbd_12_convolve6_4(s1, s2, s3, s4, s5, s6, y_filter, offset_vec); + uint16x4_t d2 = + highbd_12_convolve6_4(s2, s3, s4, s5, s6, s7, y_filter, offset_vec); + uint16x4_t d3 = + highbd_12_convolve6_4(s3, s4, s5, s6, s7, s8, y_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x8_t s5, s6, s7, s8; + load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x8_t d0 = + highbd_12_convolve6_8(s0, s1, s2, s3, s4, s5, y_filter, offset_vec); + uint16x8_t d1 = + highbd_12_convolve6_8(s1, s2, s3, s4, s5, s6, y_filter, offset_vec); + uint16x8_t d2 = + highbd_12_convolve6_8(s2, s3, s4, s5, s6, s7, y_filter, offset_vec); + uint16x8_t d3 = + highbd_12_convolve6_8(s3, s4, s5, s6, s7, s8, y_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE void highbd_dist_wtd_convolve_y_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, const int offset) { + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4; + load_s16_4x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x4_t s5, s6, s7, s8; + load_s16_4x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x4_t d0 = + highbd_convolve6_4(s0, s1, s2, s3, s4, s5, y_filter, offset_vec); + uint16x4_t d1 = + highbd_convolve6_4(s1, s2, s3, s4, s5, s6, y_filter, offset_vec); + uint16x4_t d2 = + highbd_convolve6_4(s2, s3, s4, s5, s6, s7, y_filter, offset_vec); + uint16x4_t d3 = + highbd_convolve6_4(s3, s4, s5, s6, s7, s8, y_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x8_t s5, s6, s7, s8; + load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x8_t d0 = + highbd_convolve6_8(s0, s1, s2, s3, s4, s5, y_filter, offset_vec); + uint16x8_t d1 = + highbd_convolve6_8(s1, s2, s3, s4, s5, s6, y_filter, offset_vec); + uint16x8_t d2 = + highbd_convolve6_8(s2, s3, s4, s5, s6, s7, y_filter, offset_vec); + uint16x8_t d3 = + highbd_convolve6_8(s3, s4, s5, s6, s7, s8, y_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE void highbd_12_dist_wtd_convolve_y_8tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, const int offset) { + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x4_t d0 = highbd_12_convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_vec); + uint16x4_t d1 = highbd_12_convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, offset_vec); + uint16x4_t d2 = highbd_12_convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, offset_vec); + uint16x4_t d3 = highbd_12_convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x8_t d0 = highbd_12_convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_vec); + uint16x8_t d1 = highbd_12_convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, offset_vec); + uint16x8_t d2 = highbd_12_convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, offset_vec); + uint16x8_t d3 = highbd_12_convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} +static INLINE void highbd_dist_wtd_convolve_y_8tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, const int offset) { + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x4_t d0 = highbd_convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_vec); + uint16x4_t d1 = highbd_convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, offset_vec); + uint16x4_t d2 = highbd_convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, offset_vec); + uint16x4_t d3 = highbd_convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x8_t d0 = highbd_convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_vec); + uint16x8_t d1 = highbd_convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, offset_vec); + uint16x8_t d2 = highbd_convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, offset_vec); + uint16x8_t d3 = highbd_convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +void av1_highbd_dist_wtd_convolve_y_neon( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn, + ConvolveParams *conv_params, int bd) { + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + CONV_BUF_TYPE *dst16 = conv_params->dst; + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + int dst16_stride = conv_params->dst_stride; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = filter_params_y->taps / 2 - 1; + assert(FILTER_BITS == COMPOUND_ROUND1_BITS); + const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; + const int round_offset_avg = (1 << (offset_bits - conv_params->round_1)) + + (1 << (offset_bits - conv_params->round_1 - 1)); + const int round_offset_conv = (1 << (conv_params->round_0 - 1)) + + (1 << (bd + FILTER_BITS)) + + (1 << (bd + FILTER_BITS - 1)); + + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + src -= vert_offset * src_stride; + + if (bd == 12) { + if (conv_params->do_average) { + if (y_filter_taps <= 6) { + highbd_12_dist_wtd_convolve_y_6tap_neon( + src + src_stride, src_stride, im_block, im_stride, w, h, + y_filter_ptr, round_offset_conv); + } else { + highbd_12_dist_wtd_convolve_y_8tap_neon(src, src_stride, im_block, + im_stride, w, h, y_filter_ptr, + round_offset_conv); + } + if (conv_params->use_dist_wtd_comp_avg) { + highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, + w, h, conv_params, round_offset_avg, + bd); + } else { + highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params, round_offset_avg, bd); + } + } else { + if (y_filter_taps <= 6) { + highbd_12_dist_wtd_convolve_y_6tap_neon( + src + src_stride, src_stride, dst16, dst16_stride, w, h, + y_filter_ptr, round_offset_conv); + } else { + highbd_12_dist_wtd_convolve_y_8tap_neon( + src, src_stride, dst16, dst16_stride, w, h, y_filter_ptr, + round_offset_conv); + } + } + } else { + if (conv_params->do_average) { + if (y_filter_taps <= 6) { + highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride, + im_block, im_stride, w, h, + y_filter_ptr, round_offset_conv); + } else { + highbd_dist_wtd_convolve_y_8tap_neon(src, src_stride, im_block, + im_stride, w, h, y_filter_ptr, + round_offset_conv); + } + if (conv_params->use_dist_wtd_comp_avg) { + highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, + h, conv_params, round_offset_avg, bd); + } else { + highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params, round_offset_avg, bd); + } + } else { + if (y_filter_taps <= 6) { + highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride, + dst16, dst16_stride, w, h, + y_filter_ptr, round_offset_conv); + } else { + highbd_dist_wtd_convolve_y_8tap_neon(src, src_stride, dst16, + dst16_stride, w, h, y_filter_ptr, + round_offset_conv); + } + } + } +} + +static INLINE void highbd_2d_copy_neon(const uint16_t *src_ptr, int src_stride, + uint16_t *dst_ptr, int dst_stride, int w, + int h, const int round_bits, + const int offset) { + if (w <= 4) { + const int16x4_t round_shift_s16 = vdup_n_s16(round_bits); + const uint16x4_t offset_u16 = vdup_n_u16(offset); + + for (int y = 0; y < h; ++y) { + const uint16x4_t s = vld1_u16(src_ptr + y * src_stride); + uint16x4_t d = vshl_u16(s, round_shift_s16); + d = vadd_u16(d, offset_u16); + if (w == 2) { + store_u16_2x1(dst_ptr + y * dst_stride, d); + } else { + vst1_u16(dst_ptr + y * dst_stride, d); + } + } + } else { + const int16x8_t round_shift_s16 = vdupq_n_s16(round_bits); + const uint16x8_t offset_u16 = vdupq_n_u16(offset); + + for (int y = 0; y < h; ++y) { + for (int x = 0; x < w; x += 8) { + const uint16x8_t s = vld1q_u16(src_ptr + y * src_stride + x); + uint16x8_t d = vshlq_u16(s, round_shift_s16); + d = vaddq_u16(d, offset_u16); + vst1q_u16(dst_ptr + y * dst_stride + x, d); + } + } + } +} + +void av1_highbd_dist_wtd_convolve_2d_copy_neon(const uint16_t *src, + int src_stride, uint16_t *dst, + int dst_stride, int w, int h, + ConvolveParams *conv_params, + int bd) { + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + + const int im_stride = MAX_SB_SIZE; + CONV_BUF_TYPE *dst16 = conv_params->dst; + int dst16_stride = conv_params->dst_stride; + const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; + const int round_offset = (1 << (offset_bits - conv_params->round_1)) + + (1 << (offset_bits - conv_params->round_1 - 1)); + const int round_bits = + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; + assert(round_bits >= 0); + + if (conv_params->do_average) { + highbd_2d_copy_neon(src, src_stride, im_block, im_stride, w, h, round_bits, + round_offset); + } else { + highbd_2d_copy_neon(src, src_stride, dst16, dst16_stride, w, h, round_bits, + round_offset); + } + + if (conv_params->do_average) { + if (conv_params->use_dist_wtd_comp_avg) { + if (bd == 12) { + highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, + w, h, conv_params, round_offset, bd); + } else { + highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, + h, conv_params, round_offset, bd); + } + } else { + if (bd == 12) { + highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params, round_offset, bd); + } else { + highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params, round_offset, bd); + } + } + } +} + +static INLINE uint16x4_t highbd_convolve6_4_2d_v( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x8_t y_filter, const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum = vmlal_lane_s16(offset, s0, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s3, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 2); + + return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS); +} + +static INLINE uint16x8_t highbd_convolve6_8_2d_v( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t y_filter, const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 2); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 2); + + return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS), + vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS)); +} + +static INLINE void highbd_dist_wtd_convolve_2d_vert_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, int offset) { + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4; + load_s16_4x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x4_t s5, s6, s7, s8; + load_s16_4x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x4_t d0 = + highbd_convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter, offset_vec); + uint16x4_t d1 = + highbd_convolve6_4_2d_v(s1, s2, s3, s4, s5, s6, y_filter, offset_vec); + uint16x4_t d2 = + highbd_convolve6_4_2d_v(s2, s3, s4, s5, s6, s7, y_filter, offset_vec); + uint16x4_t d3 = + highbd_convolve6_4_2d_v(s3, s4, s5, s6, s7, s8, y_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x8_t s5, s6, s7, s8; + load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x8_t d0 = highbd_convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, + y_filter, offset_vec); + uint16x8_t d1 = highbd_convolve6_8_2d_v(s1, s2, s3, s4, s5, s6, + y_filter, offset_vec); + uint16x8_t d2 = highbd_convolve6_8_2d_v(s2, s3, s4, s5, s6, s7, + y_filter, offset_vec); + uint16x8_t d3 = highbd_convolve6_8_2d_v(s3, s4, s5, s6, s7, s8, + y_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE uint16x4_t highbd_convolve8_4_2d_v( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter, + const int32x4_t offset) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum = vmlal_lane_s16(offset, s0, y_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, y_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, y_filter_4_7, 3); + + return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS); +} + +static INLINE uint16x8_t highbd_convolve8_8_2d_v( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, const int16x8_t y_filter, + const int32x4_t offset) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), y_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_4_7, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), y_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_4_7, 3); + + return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS), + vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS)); +} + +static INLINE void highbd_dist_wtd_convolve_2d_vert_8tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, int offset) { + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w <= 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x4_t d0 = highbd_convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_vec); + uint16x4_t d1 = highbd_convolve8_4_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, offset_vec); + uint16x4_t d2 = highbd_convolve8_4_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, offset_vec); + uint16x4_t d3 = highbd_convolve8_4_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x8_t d0 = highbd_convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, + y_filter, offset_vec); + uint16x8_t d1 = highbd_convolve8_8_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, + y_filter, offset_vec); + uint16x8_t d2 = highbd_convolve8_8_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, + y_filter, offset_vec); + uint16x8_t d3 = highbd_convolve8_8_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, + y_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, const int offset) { + // The smallest block height is 4, and the horizontal convolution needs to + // process an extra (filter_taps/2 - 1) lines for the vertical convolution. + assert(h >= 5); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[6], s1[6], s2[6], s3[6]; + load_s16_8x6(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5]); + load_s16_8x6(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5]); + load_s16_8x6(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5]); + load_s16_8x6(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5]); + + uint16x8_t d0 = highbd_12_convolve6_8(s0[0], s0[1], s0[2], s0[3], s0[4], + s0[5], x_filter, offset_vec); + uint16x8_t d1 = highbd_12_convolve6_8(s1[0], s1[1], s1[2], s1[3], s1[4], + s1[5], x_filter, offset_vec); + uint16x8_t d2 = highbd_12_convolve6_8(s2[0], s2[1], s2[2], s2[3], s2[4], + s2[5], x_filter, offset_vec); + uint16x8_t d3 = highbd_12_convolve6_8(s3[0], s3[1], s3[2], s3[3], s3[4], + s3[5], x_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[6]; + load_s16_8x6(s, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4], &s0[5]); + + uint16x8_t d0 = highbd_12_convolve6_8(s0[0], s0[1], s0[2], s0[3], s0[4], + s0[5], x_filter, offset_vec); + vst1q_u16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); +} + +static INLINE void highbd_dist_wtd_convolve_2d_horiz_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, const int offset) { + // The smallest block height is 4, and the horizontal convolution needs to + // process an extra (filter_taps/2 - 1) lines for the vertical convolution. + assert(h >= 5); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[6], s1[6], s2[6], s3[6]; + load_s16_8x6(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5]); + load_s16_8x6(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5]); + load_s16_8x6(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5]); + load_s16_8x6(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5]); + + uint16x8_t d0 = highbd_convolve6_8(s0[0], s0[1], s0[2], s0[3], s0[4], + s0[5], x_filter, offset_vec); + uint16x8_t d1 = highbd_convolve6_8(s1[0], s1[1], s1[2], s1[3], s1[4], + s1[5], x_filter, offset_vec); + uint16x8_t d2 = highbd_convolve6_8(s2[0], s2[1], s2[2], s2[3], s2[4], + s2[5], x_filter, offset_vec); + uint16x8_t d3 = highbd_convolve6_8(s3[0], s3[1], s3[2], s3[3], s3[4], + s3[5], x_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[6]; + load_s16_8x6(s, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4], &s0[5]); + + uint16x8_t d0 = highbd_convolve6_8(s0[0], s0[1], s0[2], s0[3], s0[4], + s0[5], x_filter, offset_vec); + vst1q_u16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); +} + +static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, const int offset) { + // The smallest block height is 4, and the horizontal convolution needs to + // process an extra (filter_taps/2 - 1) lines for the vertical convolution. + assert(h >= 5); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + // 4-tap filters are used for blocks having width == 4. + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16_t *s = (const int16_t *)(src_ptr + 1); + uint16_t *d = dst_ptr; + + do { + int16x4_t s0[4], s1[4], s2[4], s3[4]; + load_s16_4x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_4x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_4x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_4x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x4_t d0 = highbd_12_convolve4_4_x(s0, x_filter, offset_vec); + uint16x4_t d1 = highbd_12_convolve4_4_x(s1, x_filter, offset_vec); + uint16x4_t d2 = highbd_12_convolve4_4_x(s2, x_filter, offset_vec); + uint16x4_t d3 = highbd_12_convolve4_4_x(s3, x_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h > 4); + + do { + int16x4_t s0[4]; + load_s16_4x4(s, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + + uint16x4_t d0 = highbd_12_convolve4_4_x(s0, x_filter, offset_vec); + vst1_u16(d, d0); + + s += src_stride; + d += dst_stride; + } while (--h != 0); + } else { + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = + highbd_12_convolve8_8(s0[0], s0[1], s0[2], s0[3], s0[4], s0[5], + s0[6], s0[7], x_filter, offset_vec); + uint16x8_t d1 = + highbd_12_convolve8_8(s1[0], s1[1], s1[2], s1[3], s1[4], s1[5], + s1[6], s1[7], x_filter, offset_vec); + uint16x8_t d2 = + highbd_12_convolve8_8(s2[0], s2[1], s2[2], s2[3], s2[4], s2[5], + s2[6], s2[7], x_filter, offset_vec); + uint16x8_t d3 = + highbd_12_convolve8_8(s3[0], s3[1], s3[2], s3[3], s3[4], s3[5], + s3[6], s3[7], x_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + + uint16x8_t d0 = + highbd_12_convolve8_8(s0[0], s0[1], s0[2], s0[3], s0[4], s0[5], + s0[6], s0[7], x_filter, offset_vec); + vst1q_u16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +static INLINE void highbd_dist_wtd_convolve_2d_horiz_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, const int offset) { + // The smallest block height is 4, and the horizontal convolution needs to + // process an extra (filter_taps/2 - 1) lines for the vertical convolution. + assert(h >= 5); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + // 4-tap filters are used for blocks having width == 4. + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16_t *s = (const int16_t *)(src_ptr + 1); + uint16_t *d = dst_ptr; + + do { + int16x4_t s0[4], s1[4], s2[4], s3[4]; + load_s16_4x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_4x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_4x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_4x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x4_t d0 = highbd_convolve4_4_x(s0, x_filter, offset_vec); + uint16x4_t d1 = highbd_convolve4_4_x(s1, x_filter, offset_vec); + uint16x4_t d2 = highbd_convolve4_4_x(s2, x_filter, offset_vec); + uint16x4_t d3 = highbd_convolve4_4_x(s3, x_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h > 4); + + do { + int16x4_t s0[4]; + load_s16_4x4(s, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + + uint16x4_t d0 = highbd_convolve4_4_x(s0, x_filter, offset_vec); + vst1_u16(d, d0); + + s += src_stride; + d += dst_stride; + } while (--h != 0); + } else { + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = + highbd_convolve8_8(s0[0], s0[1], s0[2], s0[3], s0[4], s0[5], s0[6], + s0[7], x_filter, offset_vec); + uint16x8_t d1 = + highbd_convolve8_8(s1[0], s1[1], s1[2], s1[3], s1[4], s1[5], s1[6], + s1[7], x_filter, offset_vec); + uint16x8_t d2 = + highbd_convolve8_8(s2[0], s2[1], s2[2], s2[3], s2[4], s2[5], s2[6], + s2[7], x_filter, offset_vec); + uint16x8_t d3 = + highbd_convolve8_8(s3[0], s3[1], s3[2], s3[3], s3[4], s3[5], s3[6], + s3[7], x_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + + uint16x8_t d0 = + highbd_convolve8_8(s0[0], s0[1], s0[2], s0[3], s0[4], s0[5], s0[6], + s0[7], x_filter, offset_vec); + vst1q_u16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +void av1_highbd_dist_wtd_convolve_2d_neon( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, const int subpel_x_qn, + const int subpel_y_qn, ConvolveParams *conv_params, int bd) { + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + DECLARE_ALIGNED(16, uint16_t, + im_block2[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + + CONV_BUF_TYPE *dst16 = conv_params->dst; + int dst16_stride = conv_params->dst_stride; + const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn); + const int clamped_x_taps = x_filter_taps < 6 ? 6 : x_filter_taps; + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 6 ? 6 : y_filter_taps; + + const int im_h = h + clamped_y_taps - 1; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = clamped_x_taps / 2 - 1; + // The extra shim of (1 << (conv_params->round_0 - 1)) allows us to use a + // faster non-rounding non-saturating left shift. + const int round_offset_conv_x = + (1 << (bd + FILTER_BITS - 1)) + (1 << (conv_params->round_0 - 1)); + const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; + const int round_offset_conv_y = (1 << y_offset_bits); + const int round_offset_avg = + ((1 << (y_offset_bits - conv_params->round_1)) + + (1 << (y_offset_bits - conv_params->round_1 - 1))); + + const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + // horizontal filter + if (bd == 12) { + if (x_filter_taps <= 6 && w != 4) { + highbd_12_dist_wtd_convolve_2d_horiz_6tap_neon( + src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, + round_offset_conv_x); + } else { + highbd_12_dist_wtd_convolve_2d_horiz_neon( + src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, + round_offset_conv_x); + } + } else { + if (x_filter_taps <= 6 && w != 4) { + highbd_dist_wtd_convolve_2d_horiz_6tap_neon( + src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, + round_offset_conv_x); + } else { + highbd_dist_wtd_convolve_2d_horiz_neon(src_ptr, src_stride, im_block, + im_stride, w, im_h, x_filter_ptr, + round_offset_conv_x); + } + } + + // vertical filter + if (y_filter_taps <= 6) { + if (conv_params->do_average) { + highbd_dist_wtd_convolve_2d_vert_6tap_neon(im_block, im_stride, im_block2, + im_stride, w, h, y_filter_ptr, + round_offset_conv_y); + } else { + highbd_dist_wtd_convolve_2d_vert_6tap_neon( + im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr, + round_offset_conv_y); + } + } else { + if (conv_params->do_average) { + highbd_dist_wtd_convolve_2d_vert_8tap_neon(im_block, im_stride, im_block2, + im_stride, w, h, y_filter_ptr, + round_offset_conv_y); + } else { + highbd_dist_wtd_convolve_2d_vert_8tap_neon( + im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr, + round_offset_conv_y); + } + } + + // Do the compound averaging outside the loop, avoids branching within the + // main loop + if (conv_params->do_average) { + if (conv_params->use_dist_wtd_comp_avg) { + if (bd == 12) { + highbd_12_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, + w, h, conv_params, round_offset_avg, + bd); + } else { + highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, + h, conv_params, round_offset_avg, bd); + } + } else { + if (bd == 12) { + highbd_12_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h, + conv_params, round_offset_avg, bd); + } else { + highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h, + conv_params, round_offset_avg, bd); + } + } + } +} diff --git a/third_party/aom/av1/common/arm/highbd_convolve_horiz_rs_neon.c b/third_party/aom/av1/common/arm/highbd_convolve_horiz_rs_neon.c new file mode 100644 index 0000000000..4f1c25d122 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_convolve_horiz_rs_neon.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" +#include "av1/common/arm/highbd_convolve_neon.h" + +#define UPSCALE_NORMATIVE_TAPS 8 + +void av1_highbd_convolve_horiz_rs_neon(const uint16_t *src, int src_stride, + uint16_t *dst, int dst_stride, int w, + int h, const int16_t *x_filters, + int x0_qn, int x_step_qn, int bd) { + const int horiz_offset = UPSCALE_NORMATIVE_TAPS / 2 - 1; + + static const int32_t kIdx[4] = { 0, 1, 2, 3 }; + const int32x4_t idx = vld1q_s32(kIdx); + const int32x4_t subpel_mask = vdupq_n_s32(RS_SCALE_SUBPEL_MASK); + const int32x4_t shift_s32 = vdupq_n_s32(-FILTER_BITS); + const int32x4_t offset_s32 = vdupq_n_s32(0); + const uint16x4_t max = vdup_n_u16((1 << bd) - 1); + + const uint16_t *src_ptr = src - horiz_offset; + uint16_t *dst_ptr = dst; + + if (w <= 4) { + int height = h; + uint16_t *d = dst_ptr; + + do { + int x_qn = x0_qn; + + // Load 4 src vectors at a time, they might be the same, but we have to + // calculate the indices anyway. Doing it in SIMD and then storing the + // indices is faster than having to calculate the expression + // &src_ptr[((x_qn + 0*x_step_qn) >> RS_SCALE_SUBPEL_BITS)] 4 times + // Ideally this should be a gather using the indices, but NEON does not + // have that, so have to emulate + const int32x4_t xqn_idx = vmlaq_n_s32(vdupq_n_s32(x_qn), idx, x_step_qn); + // We have to multiply x2 to get the actual pointer as sizeof(uint16_t) = + // 2 + const int32x4_t src_idx = + vshlq_n_s32(vshrq_n_s32(xqn_idx, RS_SCALE_SUBPEL_BITS), 1); + // Similarly for the filter vector indices, we calculate the filter + // indices for 4 columns. First we calculate the indices: + // x_qn & RS_SCALE_SUBPEL_MASK) >> RS_SCALE_EXTRA_BITS + // Then we calculate the actual pointers, multiplying with + // UPSCALE_UPSCALE_NORMATIVE_TAPS + // again shift left by 1 + const int32x4_t x_filter4_idx = vshlq_n_s32( + vshrq_n_s32(vandq_s32(xqn_idx, subpel_mask), RS_SCALE_EXTRA_BITS), 1); + // Even though pointers are unsigned 32/64-bit ints we do signed + // addition The reason for this is that x_qn can be negative, leading to + // negative offsets. Argon test + // profile0_core/streams/test10573_11003.obu was failing because of + // this. +#if AOM_ARCH_AARCH64 + uint64x2_t tmp4[2]; + tmp4[0] = vreinterpretq_u64_s64(vaddw_s32( + vdupq_n_s64((const int64_t)src_ptr), vget_low_s32(src_idx))); + tmp4[1] = vreinterpretq_u64_s64(vaddw_s32( + vdupq_n_s64((const int64_t)src_ptr), vget_high_s32(src_idx))); + int16_t *src4_ptr[4]; + uint64_t *tmp_ptr = (uint64_t *)&src4_ptr; + vst1q_u64(tmp_ptr, tmp4[0]); + vst1q_u64(tmp_ptr + 2, tmp4[1]); + + // filter vectors + tmp4[0] = vreinterpretq_u64_s64(vmlal_s32( + vdupq_n_s64((const int64_t)x_filters), vget_low_s32(x_filter4_idx), + vdup_n_s32(UPSCALE_NORMATIVE_TAPS))); + tmp4[1] = vreinterpretq_u64_s64(vmlal_s32( + vdupq_n_s64((const int64_t)x_filters), vget_high_s32(x_filter4_idx), + vdup_n_s32(UPSCALE_NORMATIVE_TAPS))); + + const int16_t *x_filter4_ptr[4]; + tmp_ptr = (uint64_t *)&x_filter4_ptr; + vst1q_u64(tmp_ptr, tmp4[0]); + vst1q_u64(tmp_ptr + 2, tmp4[1]); +#else + uint32x4_t tmp4; + tmp4 = vreinterpretq_u32_s32( + vaddq_s32(vdupq_n_s32((const int32_t)src_ptr), src_idx)); + int16_t *src4_ptr[4]; + uint32_t *tmp_ptr = (uint32_t *)&src4_ptr; + vst1q_u32(tmp_ptr, tmp4); + + // filter vectors + tmp4 = vreinterpretq_u32_s32( + vmlaq_s32(vdupq_n_s32((const int32_t)x_filters), x_filter4_idx, + vdupq_n_s32(UPSCALE_NORMATIVE_TAPS))); + + const int16_t *x_filter4_ptr[4]; + tmp_ptr = (uint32_t *)&x_filter4_ptr; + vst1q_u32(tmp_ptr, tmp4); +#endif // AOM_ARCH_AARCH64 + // Load source + int16x8_t s0 = vld1q_s16(src4_ptr[0]); + int16x8_t s1 = vld1q_s16(src4_ptr[1]); + int16x8_t s2 = vld1q_s16(src4_ptr[2]); + int16x8_t s3 = vld1q_s16(src4_ptr[3]); + + // Actually load the filters + const int16x8_t x_filter0 = vld1q_s16(x_filter4_ptr[0]); + const int16x8_t x_filter1 = vld1q_s16(x_filter4_ptr[1]); + const int16x8_t x_filter2 = vld1q_s16(x_filter4_ptr[2]); + const int16x8_t x_filter3 = vld1q_s16(x_filter4_ptr[3]); + + // Group low and high parts and transpose + int16x4_t filters_lo[] = { vget_low_s16(x_filter0), + vget_low_s16(x_filter1), + vget_low_s16(x_filter2), + vget_low_s16(x_filter3) }; + int16x4_t filters_hi[] = { vget_high_s16(x_filter0), + vget_high_s16(x_filter1), + vget_high_s16(x_filter2), + vget_high_s16(x_filter3) }; + transpose_array_inplace_u16_4x4((uint16x4_t *)filters_lo); + transpose_array_inplace_u16_4x4((uint16x4_t *)filters_hi); + + // Run the 2D Scale convolution + uint16x4_t d0 = highbd_convolve8_2d_scale_horiz4x8_s32_s16( + s0, s1, s2, s3, filters_lo, filters_hi, shift_s32, offset_s32); + + d0 = vmin_u16(d0, max); + + if (w == 2) { + store_u16_2x1(d, d0); + } else { + vst1_u16(d, d0); + } + + src_ptr += src_stride; + d += dst_stride; + height--; + } while (height > 0); + } else { + int height = h; + + do { + int width = w; + int x_qn = x0_qn; + uint16_t *d = dst_ptr; + const uint16_t *s = src_ptr; + + do { + // Load 4 src vectors at a time, they might be the same, but we have to + // calculate the indices anyway. Doing it in SIMD and then storing the + // indices is faster than having to calculate the expression + // &src_ptr[((x_qn + 0*x_step_qn) >> RS_SCALE_SUBPEL_BITS)] 4 times + // Ideally this should be a gather using the indices, but NEON does not + // have that, so have to emulate + const int32x4_t xqn_idx = + vmlaq_n_s32(vdupq_n_s32(x_qn), idx, x_step_qn); + // We have to multiply x2 to get the actual pointer as sizeof(uint16_t) + // = 2 + const int32x4_t src_idx = + vshlq_n_s32(vshrq_n_s32(xqn_idx, RS_SCALE_SUBPEL_BITS), 1); + + // Similarly for the filter vector indices, we calculate the filter + // indices for 4 columns. First we calculate the indices: + // x_qn & RS_SCALE_SUBPEL_MASK) >> RS_SCALE_EXTRA_BITS + // Then we calculate the actual pointers, multiplying with + // UPSCALE_UPSCALE_NORMATIVE_TAPS + // again shift left by 1 + const int32x4_t x_filter4_idx = vshlq_n_s32( + vshrq_n_s32(vandq_s32(xqn_idx, subpel_mask), RS_SCALE_EXTRA_BITS), + 1); + // Even though pointers are unsigned 32/64-bit ints we do signed + // addition The reason for this is that x_qn can be negative, leading to + // negative offsets. Argon test + // profile0_core/streams/test10573_11003.obu was failing because of + // this. +#if AOM_ARCH_AARCH64 + uint64x2_t tmp4[2]; + tmp4[0] = vreinterpretq_u64_s64( + vaddw_s32(vdupq_n_s64((const int64_t)s), vget_low_s32(src_idx))); + tmp4[1] = vreinterpretq_u64_s64( + vaddw_s32(vdupq_n_s64((const int64_t)s), vget_high_s32(src_idx))); + int16_t *src4_ptr[4]; + uint64_t *tmp_ptr = (uint64_t *)&src4_ptr; + vst1q_u64(tmp_ptr, tmp4[0]); + vst1q_u64(tmp_ptr + 2, tmp4[1]); + + // filter vectors + tmp4[0] = vreinterpretq_u64_s64(vmlal_s32( + vdupq_n_s64((const int64_t)x_filters), vget_low_s32(x_filter4_idx), + vdup_n_s32(UPSCALE_NORMATIVE_TAPS))); + tmp4[1] = vreinterpretq_u64_s64(vmlal_s32( + vdupq_n_s64((const int64_t)x_filters), vget_high_s32(x_filter4_idx), + vdup_n_s32(UPSCALE_NORMATIVE_TAPS))); + + const int16_t *x_filter4_ptr[4]; + tmp_ptr = (uint64_t *)&x_filter4_ptr; + vst1q_u64(tmp_ptr, tmp4[0]); + vst1q_u64(tmp_ptr + 2, tmp4[1]); +#else + uint32x4_t tmp4; + tmp4 = vreinterpretq_u32_s32( + vaddq_s32(vdupq_n_s32((const int32_t)s), src_idx)); + int16_t *src4_ptr[4]; + uint32_t *tmp_ptr = (uint32_t *)&src4_ptr; + vst1q_u32(tmp_ptr, tmp4); + + // filter vectors + tmp4 = vreinterpretq_u32_s32( + vmlaq_s32(vdupq_n_s32((const int32_t)x_filters), x_filter4_idx, + vdupq_n_s32(UPSCALE_NORMATIVE_TAPS))); + + const int16_t *x_filter4_ptr[4]; + tmp_ptr = (uint32_t *)&x_filter4_ptr; + vst1q_u32(tmp_ptr, tmp4); +#endif // AOM_ARCH_AARCH64 + + // Load source + int16x8_t s0 = vld1q_s16(src4_ptr[0]); + int16x8_t s1 = vld1q_s16(src4_ptr[1]); + int16x8_t s2 = vld1q_s16(src4_ptr[2]); + int16x8_t s3 = vld1q_s16(src4_ptr[3]); + + // Actually load the filters + const int16x8_t x_filter0 = vld1q_s16(x_filter4_ptr[0]); + const int16x8_t x_filter1 = vld1q_s16(x_filter4_ptr[1]); + const int16x8_t x_filter2 = vld1q_s16(x_filter4_ptr[2]); + const int16x8_t x_filter3 = vld1q_s16(x_filter4_ptr[3]); + + // Group low and high parts and transpose + int16x4_t filters_lo[] = { vget_low_s16(x_filter0), + vget_low_s16(x_filter1), + vget_low_s16(x_filter2), + vget_low_s16(x_filter3) }; + int16x4_t filters_hi[] = { vget_high_s16(x_filter0), + vget_high_s16(x_filter1), + vget_high_s16(x_filter2), + vget_high_s16(x_filter3) }; + transpose_array_inplace_u16_4x4((uint16x4_t *)filters_lo); + transpose_array_inplace_u16_4x4((uint16x4_t *)filters_hi); + + // Run the 2D Scale X convolution + uint16x4_t d0 = highbd_convolve8_2d_scale_horiz4x8_s32_s16( + s0, s1, s2, s3, filters_lo, filters_hi, shift_s32, offset_s32); + + d0 = vmin_u16(d0, max); + vst1_u16(d, d0); + + x_qn += 4 * x_step_qn; + d += 4; + width -= 4; + } while (width > 0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + height--; + } while (height > 0); + } +} diff --git a/third_party/aom/av1/common/arm/highbd_convolve_neon.c b/third_party/aom/av1/common/arm/highbd_convolve_neon.c new file mode 100644 index 0000000000..3a3e33fcba --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_convolve_neon.c @@ -0,0 +1,2120 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <assert.h> +#include <arm_neon.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" + +static INLINE uint16x4_t +highbd_convolve6_4_y(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x8_t y_filter) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum = vmull_lane_s16(s0, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s3, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 2); + + return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS); +} + +static INLINE uint16x8_t +highbd_convolve6_8_y(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t y_filter) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 2); + + int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 2); + + return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS), + vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS)); +} + +static INLINE void highbd_convolve_y_sr_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, const int bd) { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + + if (w == 4) { + const int16_t *s = (const int16_t *)(src_ptr + src_stride); + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4; + load_s16_4x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x4_t s5, s6, s7, s8; + load_s16_4x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x4_t d0 = + highbd_convolve6_4_y(s0, s1, s2, s3, s4, s5, y_filter_0_7); + uint16x4_t d1 = + highbd_convolve6_4_y(s1, s2, s3, s4, s5, s6, y_filter_0_7); + uint16x4_t d2 = + highbd_convolve6_4_y(s2, s3, s4, s5, s6, s7, y_filter_0_7); + uint16x4_t d3 = + highbd_convolve6_4_y(s3, s4, s5, s6, s7, s8, y_filter_0_7); + + d0 = vmin_u16(d0, vget_low_u16(max)); + d1 = vmin_u16(d1, vget_low_u16(max)); + d2 = vmin_u16(d2, vget_low_u16(max)); + d3 = vmin_u16(d3, vget_low_u16(max)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + // Width is a multiple of 8 and height is a multiple of 4. + do { + int height = h; + const int16_t *s = (const int16_t *)(src_ptr + src_stride); + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x8_t s5, s6, s7, s8; + load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x8_t d0 = + highbd_convolve6_8_y(s0, s1, s2, s3, s4, s5, y_filter_0_7); + uint16x8_t d1 = + highbd_convolve6_8_y(s1, s2, s3, s4, s5, s6, y_filter_0_7); + uint16x8_t d2 = + highbd_convolve6_8_y(s2, s3, s4, s5, s6, s7, y_filter_0_7); + uint16x8_t d3 = + highbd_convolve6_8_y(s3, s4, s5, s6, s7, s8, y_filter_0_7); + + d0 = vminq_u16(d0, max); + d1 = vminq_u16(d1, max); + d2 = vminq_u16(d2, max); + d3 = vminq_u16(d3, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE uint16x4_t highbd_convolve8_4_y( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum = vmull_lane_s16(s0, y_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, y_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, y_filter_4_7, 3); + + return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS); +} + +static INLINE uint16x8_t highbd_convolve8_8_y( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, const int16x8_t y_filter) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_4_7, 3); + + int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_4_7, 3); + + return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS), + vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS)); +} + +static INLINE void highbd_convolve_y_sr_8tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, int bd) { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x4_t d0 = + highbd_convolve8_4_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter); + uint16x4_t d1 = + highbd_convolve8_4_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter); + uint16x4_t d2 = + highbd_convolve8_4_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter); + uint16x4_t d3 = + highbd_convolve8_4_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter); + + d0 = vmin_u16(d0, vget_low_u16(max)); + d1 = vmin_u16(d1, vget_low_u16(max)); + d2 = vmin_u16(d2, vget_low_u16(max)); + d3 = vmin_u16(d3, vget_low_u16(max)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x8_t d0 = + highbd_convolve8_8_y(s0, s1, s2, s3, s4, s5, s6, s7, y_filter); + uint16x8_t d1 = + highbd_convolve8_8_y(s1, s2, s3, s4, s5, s6, s7, s8, y_filter); + uint16x8_t d2 = + highbd_convolve8_8_y(s2, s3, s4, s5, s6, s7, s8, s9, y_filter); + uint16x8_t d3 = + highbd_convolve8_8_y(s3, s4, s5, s6, s7, s8, s9, s10, y_filter); + + d0 = vminq_u16(d0, max); + d1 = vminq_u16(d1, max); + d2 = vminq_u16(d2, max); + d3 = vminq_u16(d3, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE uint16x4_t highbd_convolve12_4_y( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x4_t s8, + const int16x4_t s9, const int16x4_t s10, const int16x4_t s11, + const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7); + + int32x4_t sum = vmull_lane_s16(s0, y_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, y_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, y_filter_4_7, 3); + sum = vmlal_lane_s16(sum, s8, y_filter_8_11, 0); + sum = vmlal_lane_s16(sum, s9, y_filter_8_11, 1); + sum = vmlal_lane_s16(sum, s10, y_filter_8_11, 2); + sum = vmlal_lane_s16(sum, s11, y_filter_8_11, 3); + + return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS); +} + +static INLINE uint16x8_t highbd_convolve12_8_y( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, const int16x8_t s8, + const int16x8_t s9, const int16x8_t s10, const int16x8_t s11, + const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7); + + int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_4_7, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s8), y_filter_8_11, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s9), y_filter_8_11, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s10), y_filter_8_11, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s11), y_filter_8_11, 3); + + int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_4_7, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s8), y_filter_8_11, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s9), y_filter_8_11, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s10), y_filter_8_11, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s11), y_filter_8_11, 3); + + return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS), + vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS)); +} + +static INLINE void highbd_convolve_y_sr_12tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, int bd) { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x4_t y_filter_8_11 = vld1_s16(y_filter_ptr + 8); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; + load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, + &s9, &s10); + s += 11 * src_stride; + + do { + int16x4_t s11, s12, s13, s14; + load_s16_4x4(s, src_stride, &s11, &s12, &s13, &s14); + + uint16x4_t d0 = + highbd_convolve12_4_y(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, + s11, y_filter_0_7, y_filter_8_11); + uint16x4_t d1 = + highbd_convolve12_4_y(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + s12, y_filter_0_7, y_filter_8_11); + uint16x4_t d2 = + highbd_convolve12_4_y(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + s13, y_filter_0_7, y_filter_8_11); + uint16x4_t d3 = + highbd_convolve12_4_y(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, + s14, y_filter_0_7, y_filter_8_11); + + d0 = vmin_u16(d0, vget_low_u16(max)); + d1 = vmin_u16(d1, vget_low_u16(max)); + d2 = vmin_u16(d2, vget_low_u16(max)); + d3 = vmin_u16(d3, vget_low_u16(max)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; + load_s16_8x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, + &s9, &s10); + s += 11 * src_stride; + + do { + int16x8_t s11, s12, s13, s14; + load_s16_8x4(s, src_stride, &s11, &s12, &s13, &s14); + + uint16x8_t d0 = + highbd_convolve12_8_y(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, + s11, y_filter_0_7, y_filter_8_11); + uint16x8_t d1 = + highbd_convolve12_8_y(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, + s12, y_filter_0_7, y_filter_8_11); + uint16x8_t d2 = + highbd_convolve12_8_y(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + s13, y_filter_0_7, y_filter_8_11); + uint16x8_t d3 = + highbd_convolve12_8_y(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + s13, s14, y_filter_0_7, y_filter_8_11); + + d0 = vminq_u16(d0, max); + d1 = vminq_u16(d1, max); + d2 = vminq_u16(d2, max); + d3 = vminq_u16(d3, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +void av1_highbd_convolve_y_sr_neon(const uint16_t *src, int src_stride, + uint16_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_y, + const int subpel_y_qn, int bd) { + if (w == 2 || h == 2) { + av1_highbd_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h, + filter_params_y, subpel_y_qn, bd); + return; + } + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int vert_offset = filter_params_y->taps / 2 - 1; + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + src -= vert_offset * src_stride; + + if (y_filter_taps > 8) { + highbd_convolve_y_sr_12tap_neon(src, src_stride, dst, dst_stride, w, h, + y_filter_ptr, bd); + return; + } + if (y_filter_taps < 8) { + highbd_convolve_y_sr_6tap_neon(src, src_stride, dst, dst_stride, w, h, + y_filter_ptr, bd); + return; + } + + highbd_convolve_y_sr_8tap_neon(src, src_stride, dst, dst_stride, w, h, + y_filter_ptr, bd); +} + +static INLINE uint16x8_t highbd_convolve6_8_x(const int16x8_t s[6], + const int16x8_t x_filter, + const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter); + + int32x4_t sum0 = offset; + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[0]), x_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[1]), x_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[2]), x_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[3]), x_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[4]), x_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[5]), x_filter_4_7, 2); + + int32x4_t sum1 = offset; + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[0]), x_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[1]), x_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[2]), x_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[3]), x_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[4]), x_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[5]), x_filter_4_7, 2); + + return vcombine_u16(vqrshrun_n_s32(sum0, FILTER_BITS), + vqrshrun_n_s32(sum1, FILTER_BITS)); +} + +static INLINE void highbd_convolve_x_sr_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, ConvolveParams *conv_params, + int bd) { + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + // This shim allows to do only one rounding shift instead of two. + const int32x4_t offset = vdupq_n_s32(1 << (conv_params->round_0 - 1)); + + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[6], s1[6], s2[6], s3[6]; + load_s16_8x6(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5]); + load_s16_8x6(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5]); + load_s16_8x6(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5]); + load_s16_8x6(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5]); + + uint16x8_t d0 = highbd_convolve6_8_x(s0, x_filter, offset); + uint16x8_t d1 = highbd_convolve6_8_x(s1, x_filter, offset); + uint16x8_t d2 = highbd_convolve6_8_x(s2, x_filter, offset); + uint16x8_t d3 = highbd_convolve6_8_x(s3, x_filter, offset); + + d0 = vminq_u16(d0, max); + d1 = vminq_u16(d1, max); + d2 = vminq_u16(d2, max); + d3 = vminq_u16(d3, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); +} + +static INLINE uint16x4_t highbd_convolve4_4_x(const int16x4_t s[4], + const int16x4_t x_filter, + const int32x4_t offset) { + int32x4_t sum = offset; + sum = vmlal_lane_s16(sum, s[0], x_filter, 0); + sum = vmlal_lane_s16(sum, s[1], x_filter, 1); + sum = vmlal_lane_s16(sum, s[2], x_filter, 2); + sum = vmlal_lane_s16(sum, s[3], x_filter, 3); + + return vqrshrun_n_s32(sum, FILTER_BITS); +} + +static INLINE uint16x8_t highbd_convolve8_8_x(const int16x8_t s[8], + const int16x8_t x_filter, + const int32x4_t offset) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter); + + int32x4_t sum0 = offset; + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[0]), x_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[1]), x_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[2]), x_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[3]), x_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[4]), x_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[5]), x_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[6]), x_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[7]), x_filter_4_7, 3); + + int32x4_t sum1 = offset; + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[0]), x_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[1]), x_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[2]), x_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[3]), x_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[4]), x_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[5]), x_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[6]), x_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[7]), x_filter_4_7, 3); + + return vcombine_u16(vqrshrun_n_s32(sum0, FILTER_BITS), + vqrshrun_n_s32(sum1, FILTER_BITS)); +} + +static INLINE void highbd_convolve_x_sr_neon(const uint16_t *src_ptr, + int src_stride, uint16_t *dst_ptr, + int dst_stride, int w, int h, + const int16_t *x_filter_ptr, + ConvolveParams *conv_params, + int bd) { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + // This shim allows to do only one rounding shift instead of two. + const int32x4_t offset = vdupq_n_s32(1 << (conv_params->round_0 - 1)); + + if (w == 4) { + // 4-tap filters are used for blocks having width == 4. + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16_t *s = (const int16_t *)(src_ptr + 2); + uint16_t *d = dst_ptr; + + do { + int16x4_t s0[4], s1[4], s2[4], s3[4]; + load_s16_4x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_4x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_4x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_4x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x4_t d0 = highbd_convolve4_4_x(s0, x_filter, offset); + uint16x4_t d1 = highbd_convolve4_4_x(s1, x_filter, offset); + uint16x4_t d2 = highbd_convolve4_4_x(s2, x_filter, offset); + uint16x4_t d3 = highbd_convolve4_4_x(s3, x_filter, offset); + + d0 = vmin_u16(d0, vget_low_u16(max)); + d1 = vmin_u16(d1, vget_low_u16(max)); + d2 = vmin_u16(d2, vget_low_u16(max)); + d3 = vmin_u16(d3, vget_low_u16(max)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = highbd_convolve8_8_x(s0, x_filter, offset); + uint16x8_t d1 = highbd_convolve8_8_x(s1, x_filter, offset); + uint16x8_t d2 = highbd_convolve8_8_x(s2, x_filter, offset); + uint16x8_t d3 = highbd_convolve8_8_x(s3, x_filter, offset); + + d0 = vminq_u16(d0, max); + d1 = vminq_u16(d1, max); + d2 = vminq_u16(d2, max); + d3 = vminq_u16(d3, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE uint16x4_t highbd_convolve12_4_x(const int16x4_t s[12], + const int16x8_t x_filter_0_7, + const int16x4_t x_filter_8_11, + const int32x4_t offset) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter_0_7); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter_0_7); + + int32x4_t sum = offset; + sum = vmlal_lane_s16(sum, s[0], x_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s[1], x_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s[2], x_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s[3], x_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s[4], x_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s[5], x_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s[6], x_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s[7], x_filter_4_7, 3); + sum = vmlal_lane_s16(sum, s[8], x_filter_8_11, 0); + sum = vmlal_lane_s16(sum, s[9], x_filter_8_11, 1); + sum = vmlal_lane_s16(sum, s[10], x_filter_8_11, 2); + sum = vmlal_lane_s16(sum, s[11], x_filter_8_11, 3); + + return vqrshrun_n_s32(sum, FILTER_BITS); +} + +static INLINE uint16x8_t highbd_convolve12_8_x(const int16x8_t s[12], + const int16x8_t x_filter_0_7, + const int16x4_t x_filter_8_11, + const int32x4_t offset) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter_0_7); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter_0_7); + + int32x4_t sum0 = offset; + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[0]), x_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[1]), x_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[2]), x_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[3]), x_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[4]), x_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[5]), x_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[6]), x_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[7]), x_filter_4_7, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[8]), x_filter_8_11, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[9]), x_filter_8_11, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[10]), x_filter_8_11, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[11]), x_filter_8_11, 3); + + int32x4_t sum1 = offset; + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[0]), x_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[1]), x_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[2]), x_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[3]), x_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[4]), x_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[5]), x_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[6]), x_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[7]), x_filter_4_7, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[8]), x_filter_8_11, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[9]), x_filter_8_11, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[10]), x_filter_8_11, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[11]), x_filter_8_11, 3); + + return vcombine_u16(vqrshrun_n_s32(sum0, FILTER_BITS), + vqrshrun_n_s32(sum1, FILTER_BITS)); +} + +static INLINE void highbd_convolve_x_sr_12tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, ConvolveParams *conv_params, + int bd) { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + // This shim allows to do only one rounding shift instead of two. + const int32x4_t offset = vdupq_n_s32(1 << (conv_params->round_0 - 1)); + const int16x8_t x_filter_0_7 = vld1q_s16(x_filter_ptr); + const int16x4_t x_filter_8_11 = vld1_s16(x_filter_ptr + 8); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x4_t s0[12], s1[12], s2[12], s3[12]; + load_s16_4x12(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7], &s0[8], &s0[9], &s0[10], + &s0[11]); + load_s16_4x12(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7], &s1[8], &s1[9], &s1[10], + &s1[11]); + load_s16_4x12(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7], &s2[8], &s2[9], &s2[10], + &s2[11]); + load_s16_4x12(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7], &s3[8], &s3[9], &s3[10], + &s3[11]); + + uint16x4_t d0 = + highbd_convolve12_4_x(s0, x_filter_0_7, x_filter_8_11, offset); + uint16x4_t d1 = + highbd_convolve12_4_x(s1, x_filter_0_7, x_filter_8_11, offset); + uint16x4_t d2 = + highbd_convolve12_4_x(s2, x_filter_0_7, x_filter_8_11, offset); + uint16x4_t d3 = + highbd_convolve12_4_x(s3, x_filter_0_7, x_filter_8_11, offset); + + d0 = vmin_u16(d0, vget_low_u16(max)); + d1 = vmin_u16(d1, vget_low_u16(max)); + d2 = vmin_u16(d2, vget_low_u16(max)); + d3 = vmin_u16(d3, vget_low_u16(max)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[12], s1[12], s2[12], s3[12]; + load_s16_8x12(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7], &s0[8], &s0[9], &s0[10], + &s0[11]); + load_s16_8x12(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7], &s1[8], &s1[9], &s1[10], + &s1[11]); + load_s16_8x12(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7], &s2[8], &s2[9], &s2[10], + &s2[11]); + load_s16_8x12(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7], &s3[8], &s3[9], &s3[10], + &s3[11]); + + uint16x8_t d0 = + highbd_convolve12_8_x(s0, x_filter_0_7, x_filter_8_11, offset); + uint16x8_t d1 = + highbd_convolve12_8_x(s1, x_filter_0_7, x_filter_8_11, offset); + uint16x8_t d2 = + highbd_convolve12_8_x(s2, x_filter_0_7, x_filter_8_11, offset); + uint16x8_t d3 = + highbd_convolve12_8_x(s3, x_filter_0_7, x_filter_8_11, offset); + + d0 = vminq_u16(d0, max); + d1 = vminq_u16(d1, max); + d2 = vminq_u16(d2, max); + d3 = vminq_u16(d3, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +void av1_highbd_convolve_x_sr_neon(const uint16_t *src, int src_stride, + uint16_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const int subpel_x_qn, + ConvolveParams *conv_params, int bd) { + if (w == 2 || h == 2) { + av1_highbd_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h, + filter_params_x, subpel_x_qn, conv_params, bd); + return; + } + const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn); + const int horiz_offset = filter_params_x->taps / 2 - 1; + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + src -= horiz_offset; + + if (x_filter_taps > 8) { + highbd_convolve_x_sr_12tap_neon(src, src_stride, dst, dst_stride, w, h, + x_filter_ptr, conv_params, bd); + return; + } + if (x_filter_taps <= 6 && w != 4) { + highbd_convolve_x_sr_6tap_neon(src + 1, src_stride, dst, dst_stride, w, h, + x_filter_ptr, conv_params, bd); + return; + } + + highbd_convolve_x_sr_neon(src, src_stride, dst, dst_stride, w, h, + x_filter_ptr, conv_params, bd); +} + +static INLINE uint16x4_t highbd_convolve6_4_2d_v( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x8_t y_filter, const int32x4_t round_shift, + const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum = vmlal_lane_s16(offset, s0, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s3, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 2); + + sum = vshlq_s32(sum, round_shift); + return vqmovun_s32(sum); +} + +static INLINE uint16x8_t highbd_convolve6_8_2d_v( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t y_filter, const int32x4_t round_shift, + const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 2); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 2); + + sum0 = vshlq_s32(sum0, round_shift); + sum1 = vshlq_s32(sum1, round_shift); + + return vcombine_u16(vqmovun_s32(sum0), vqmovun_s32(sum1)); +} + +static INLINE void highbd_convolve_2d_sr_vert_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, ConvolveParams *conv_params, + int bd, const int offset) { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + const int32x4_t offset_s32 = vdupq_n_s32(offset); + const int round1_shift = conv_params->round_1; + const int32x4_t round1_shift_s32 = vdupq_n_s32(-round1_shift); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + int16x4_t s0, s1, s2, s3, s4; + load_s16_4x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x4_t s5, s6, s7, s8; + load_s16_4x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x4_t d0 = highbd_convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter, + round1_shift_s32, offset_s32); + uint16x4_t d1 = highbd_convolve6_4_2d_v(s1, s2, s3, s4, s5, s6, y_filter, + round1_shift_s32, offset_s32); + uint16x4_t d2 = highbd_convolve6_4_2d_v(s2, s3, s4, s5, s6, s7, y_filter, + round1_shift_s32, offset_s32); + uint16x4_t d3 = highbd_convolve6_4_2d_v(s3, s4, s5, s6, s7, s8, y_filter, + round1_shift_s32, offset_s32); + + d0 = vmin_u16(d0, vget_low_u16(max)); + d1 = vmin_u16(d1, vget_low_u16(max)); + d2 = vmin_u16(d2, vget_low_u16(max)); + d3 = vmin_u16(d3, vget_low_u16(max)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + s += 5 * src_stride; + + do { + int16x8_t s5, s6, s7, s8; + load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8); + + uint16x8_t d0 = highbd_convolve6_8_2d_v( + s0, s1, s2, s3, s4, s5, y_filter, round1_shift_s32, offset_s32); + uint16x8_t d1 = highbd_convolve6_8_2d_v( + s1, s2, s3, s4, s5, s6, y_filter, round1_shift_s32, offset_s32); + uint16x8_t d2 = highbd_convolve6_8_2d_v( + s2, s3, s4, s5, s6, s7, y_filter, round1_shift_s32, offset_s32); + uint16x8_t d3 = highbd_convolve6_8_2d_v( + s3, s4, s5, s6, s7, s8, y_filter, round1_shift_s32, offset_s32); + + d0 = vminq_u16(d0, max); + d1 = vminq_u16(d1, max); + d2 = vminq_u16(d2, max); + d3 = vminq_u16(d3, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE uint16x4_t highbd_convolve8_4_2d_v( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter, + const int32x4_t round_shift, const int32x4_t offset) { + const int16x4_t y_filter_lo = vget_low_s16(y_filter); + const int16x4_t y_filter_hi = vget_high_s16(y_filter); + + int32x4_t sum = vmlal_lane_s16(offset, s0, y_filter_lo, 0); + sum = vmlal_lane_s16(sum, s1, y_filter_lo, 1); + sum = vmlal_lane_s16(sum, s2, y_filter_lo, 2); + sum = vmlal_lane_s16(sum, s3, y_filter_lo, 3); + sum = vmlal_lane_s16(sum, s4, y_filter_hi, 0); + sum = vmlal_lane_s16(sum, s5, y_filter_hi, 1); + sum = vmlal_lane_s16(sum, s6, y_filter_hi, 2); + sum = vmlal_lane_s16(sum, s7, y_filter_hi, 3); + + sum = vshlq_s32(sum, round_shift); + return vqmovun_s32(sum); +} + +static INLINE uint16x8_t highbd_convolve8_8_2d_v( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, const int16x8_t y_filter, + const int32x4_t round_shift, const int32x4_t offset) { + const int16x4_t y_filter_lo = vget_low_s16(y_filter); + const int16x4_t y_filter_hi = vget_high_s16(y_filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), y_filter_lo, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_lo, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_lo, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_lo, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_hi, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_hi, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_hi, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_hi, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), y_filter_lo, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_lo, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_lo, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_lo, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_hi, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_hi, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_hi, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_hi, 3); + + sum0 = vshlq_s32(sum0, round_shift); + sum1 = vshlq_s32(sum1, round_shift); + + return vcombine_u16(vqmovun_s32(sum0), vqmovun_s32(sum1)); +} + +static INLINE void highbd_convolve_2d_sr_vert_8tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, ConvolveParams *conv_params, + int bd, const int offset) { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + const int32x4_t offset_s32 = vdupq_n_s32(offset); + const int round1_shift = conv_params->round_1; + const int32x4_t round1_shift_s32 = vdupq_n_s32(-round1_shift); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x4_t d0 = + highbd_convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + round1_shift_s32, offset_s32); + uint16x4_t d1 = + highbd_convolve8_4_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + round1_shift_s32, offset_s32); + uint16x4_t d2 = + highbd_convolve8_4_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + round1_shift_s32, offset_s32); + uint16x4_t d3 = + highbd_convolve8_4_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, y_filter, + round1_shift_s32, offset_s32); + + d0 = vmin_u16(d0, vget_low_u16(max)); + d1 = vmin_u16(d1, vget_low_u16(max)); + d2 = vmin_u16(d2, vget_low_u16(max)); + d3 = vmin_u16(d3, vget_low_u16(max)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + uint16x8_t d0 = + highbd_convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, + round1_shift_s32, offset_s32); + uint16x8_t d1 = + highbd_convolve8_8_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, y_filter, + round1_shift_s32, offset_s32); + uint16x8_t d2 = + highbd_convolve8_8_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, y_filter, + round1_shift_s32, offset_s32); + uint16x8_t d3 = + highbd_convolve8_8_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, y_filter, + round1_shift_s32, offset_s32); + + d0 = vminq_u16(d0, max); + d1 = vminq_u16(d1, max); + d2 = vminq_u16(d2, max); + d3 = vminq_u16(d3, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE uint16x4_t highbd_convolve12_4_2d_v( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x4_t s8, + const int16x4_t s9, const int16x4_t s10, const int16x4_t s11, + const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11, + const int32x4_t round_shift, const int32x4_t offset) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7); + + int32x4_t sum = vmlal_lane_s16(offset, s0, y_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s3, y_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s6, y_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s7, y_filter_4_7, 3); + sum = vmlal_lane_s16(sum, s8, y_filter_8_11, 0); + sum = vmlal_lane_s16(sum, s9, y_filter_8_11, 1); + sum = vmlal_lane_s16(sum, s10, y_filter_8_11, 2); + sum = vmlal_lane_s16(sum, s11, y_filter_8_11, 3); + + sum = vshlq_s32(sum, round_shift); + return vqmovun_s32(sum); +} + +static INLINE uint16x8_t highbd_convolve12_8_2d_v( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, const int16x8_t s8, + const int16x8_t s9, const int16x8_t s10, const int16x8_t s11, + const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11, + const int32x4_t round_shift, const int32x4_t offset) { + const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7); + const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), y_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_4_7, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s8), y_filter_8_11, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s9), y_filter_8_11, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s10), y_filter_8_11, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s11), y_filter_8_11, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), y_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_4_7, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s8), y_filter_8_11, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s9), y_filter_8_11, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s10), y_filter_8_11, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s11), y_filter_8_11, 3); + + sum0 = vshlq_s32(sum0, round_shift); + sum1 = vshlq_s32(sum1, round_shift); + + return vcombine_u16(vqmovun_s32(sum0), vqmovun_s32(sum1)); +} + +static INLINE void highbd_convolve_2d_sr_vert_12tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, ConvolveParams *conv_params, + const int bd, const int offset) { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x4_t y_filter_8_11 = vld1_s16(y_filter_ptr + 8); + const int32x4_t offset_s32 = vdupq_n_s32(offset); + const int round1_shift = conv_params->round_1; + const int32x4_t round1_shift_s32 = vdupq_n_s32(-round1_shift); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; + load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, + &s9, &s10); + s += 11 * src_stride; + + do { + int16x4_t s11, s12, s13, s14; + load_s16_4x4(s, src_stride, &s11, &s12, &s13, &s14); + + uint16x4_t d0 = highbd_convolve12_4_2d_v( + s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, y_filter_0_7, + y_filter_8_11, round1_shift_s32, offset_s32); + uint16x4_t d1 = highbd_convolve12_4_2d_v( + s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, y_filter_0_7, + y_filter_8_11, round1_shift_s32, offset_s32); + uint16x4_t d2 = highbd_convolve12_4_2d_v( + s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, y_filter_0_7, + y_filter_8_11, round1_shift_s32, offset_s32); + uint16x4_t d3 = highbd_convolve12_4_2d_v( + s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, y_filter_0_7, + y_filter_8_11, round1_shift_s32, offset_s32); + + d0 = vmin_u16(d0, vget_low_u16(max)); + d1 = vmin_u16(d1, vget_low_u16(max)); + d2 = vmin_u16(d2, vget_low_u16(max)); + d3 = vmin_u16(d3, vget_low_u16(max)); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; + load_s16_8x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, + &s9, &s10); + s += 11 * src_stride; + + do { + int16x8_t s11, s12, s13, s14; + load_s16_8x4(s, src_stride, &s11, &s12, &s13, &s14); + + uint16x8_t d0 = highbd_convolve12_8_2d_v( + s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, y_filter_0_7, + y_filter_8_11, round1_shift_s32, offset_s32); + uint16x8_t d1 = highbd_convolve12_8_2d_v( + s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, y_filter_0_7, + y_filter_8_11, round1_shift_s32, offset_s32); + uint16x8_t d2 = highbd_convolve12_8_2d_v( + s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, y_filter_0_7, + y_filter_8_11, round1_shift_s32, offset_s32); + uint16x8_t d3 = highbd_convolve12_8_2d_v( + s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, y_filter_0_7, + y_filter_8_11, round1_shift_s32, offset_s32); + + d0 = vminq_u16(d0, max); + d1 = vminq_u16(d1, max); + d2 = vminq_u16(d2, max); + d3 = vminq_u16(d3, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + s5 = s9; + s6 = s10; + s7 = s11; + s8 = s12; + s9 = s13; + s10 = s14; + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE uint16x8_t highbd_convolve6_8_2d_h(const int16x8_t s[6], + const int16x8_t x_filter, + const int32x4_t shift_s32, + const int32x4_t offset) { + // Values at indices 0 and 7 of y_filter are zero. + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s[0]), x_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[1]), x_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[2]), x_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[3]), x_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[4]), x_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[5]), x_filter_4_7, 2); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s[0]), x_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[1]), x_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[2]), x_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[3]), x_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[4]), x_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[5]), x_filter_4_7, 2); + + sum0 = vqrshlq_s32(sum0, shift_s32); + sum1 = vqrshlq_s32(sum1, shift_s32); + + return vcombine_u16(vqmovun_s32(sum0), vqmovun_s32(sum1)); +} + +static INLINE void highbd_convolve_2d_sr_horiz_6tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, ConvolveParams *conv_params, + const int offset) { + // The smallest block height processed by the SIMD functions is 4, and the + // horizontal convolution needs to process an extra (filter_taps/2 - 1) lines + // for the vertical convolution. + assert(h >= 5); + const int32x4_t shift_s32 = vdupq_n_s32(-conv_params->round_0); + const int32x4_t offset_s32 = vdupq_n_s32(offset); + + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[6], s1[6], s2[6], s3[6]; + load_s16_8x6(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5]); + load_s16_8x6(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5]); + load_s16_8x6(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5]); + load_s16_8x6(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5]); + + uint16x8_t d0 = + highbd_convolve6_8_2d_h(s0, x_filter, shift_s32, offset_s32); + uint16x8_t d1 = + highbd_convolve6_8_2d_h(s1, x_filter, shift_s32, offset_s32); + uint16x8_t d2 = + highbd_convolve6_8_2d_h(s2, x_filter, shift_s32, offset_s32); + uint16x8_t d3 = + highbd_convolve6_8_2d_h(s3, x_filter, shift_s32, offset_s32); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[6]; + load_s16_8x6(s, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4], &s0[5]); + + uint16x8_t d0 = + highbd_convolve6_8_2d_h(s0, x_filter, shift_s32, offset_s32); + vst1q_u16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); +} + +static INLINE uint16x4_t highbd_convolve4_4_2d_h(const int16x4_t s[4], + const int16x4_t x_filter, + const int32x4_t shift_s32, + const int32x4_t offset) { + int32x4_t sum = vmlal_lane_s16(offset, s[0], x_filter, 0); + sum = vmlal_lane_s16(sum, s[1], x_filter, 1); + sum = vmlal_lane_s16(sum, s[2], x_filter, 2); + sum = vmlal_lane_s16(sum, s[3], x_filter, 3); + + sum = vqrshlq_s32(sum, shift_s32); + return vqmovun_s32(sum); +} + +static INLINE uint16x8_t highbd_convolve8_8_2d_h(const int16x8_t s[8], + const int16x8_t x_filter, + const int32x4_t shift_s32, + const int32x4_t offset) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s[0]), x_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[1]), x_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[2]), x_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[3]), x_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[4]), x_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[5]), x_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[6]), x_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[7]), x_filter_4_7, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s[0]), x_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[1]), x_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[2]), x_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[3]), x_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[4]), x_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[5]), x_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[6]), x_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[7]), x_filter_4_7, 3); + + sum0 = vqrshlq_s32(sum0, shift_s32); + sum1 = vqrshlq_s32(sum1, shift_s32); + + return vcombine_u16(vqmovun_s32(sum0), vqmovun_s32(sum1)); +} + +static INLINE void highbd_convolve_2d_sr_horiz_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, ConvolveParams *conv_params, + const int offset) { + // The smallest block height processed by the SIMD functions is 4, and the + // horizontal convolution needs to process an extra (filter_taps/2 - 1) lines + // for the vertical convolution. + assert(h >= 5); + const int32x4_t shift_s32 = vdupq_n_s32(-conv_params->round_0); + const int32x4_t offset_s32 = vdupq_n_s32(offset); + + if (w == 4) { + // 4-tap filters are used for blocks having width <= 4. + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16_t *s = (const int16_t *)(src_ptr + 1); + uint16_t *d = dst_ptr; + + do { + int16x4_t s0[4], s1[4], s2[4], s3[4]; + load_s16_4x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_4x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_4x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_4x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x4_t d0 = + highbd_convolve4_4_2d_h(s0, x_filter, shift_s32, offset_s32); + uint16x4_t d1 = + highbd_convolve4_4_2d_h(s1, x_filter, shift_s32, offset_s32); + uint16x4_t d2 = + highbd_convolve4_4_2d_h(s2, x_filter, shift_s32, offset_s32); + uint16x4_t d3 = + highbd_convolve4_4_2d_h(s3, x_filter, shift_s32, offset_s32); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h > 4); + + do { + int16x4_t s0[4]; + load_s16_4x4(s, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + + uint16x4_t d0 = + highbd_convolve4_4_2d_h(s0, x_filter, shift_s32, offset_s32); + + vst1_u16(d, d0); + + s += src_stride; + d += dst_stride; + } while (--h != 0); + } else { + const int16x8_t x_filter = vld1q_s16(x_filter_ptr); + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = + highbd_convolve8_8_2d_h(s0, x_filter, shift_s32, offset_s32); + uint16x8_t d1 = + highbd_convolve8_8_2d_h(s1, x_filter, shift_s32, offset_s32); + uint16x8_t d2 = + highbd_convolve8_8_2d_h(s2, x_filter, shift_s32, offset_s32); + uint16x8_t d3 = + highbd_convolve8_8_2d_h(s3, x_filter, shift_s32, offset_s32); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + + uint16x8_t d0 = + highbd_convolve8_8_2d_h(s0, x_filter, shift_s32, offset_s32); + vst1q_u16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +static INLINE uint16x4_t highbd_convolve12_4_2d_h(const int16x4_t s[12], + const int16x8_t x_filter_0_7, + const int16x4_t x_filter_8_11, + const int32x4_t shift_s32, + const int32x4_t offset) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter_0_7); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter_0_7); + + int32x4_t sum = vmlal_lane_s16(offset, s[0], x_filter_0_3, 0); + sum = vmlal_lane_s16(sum, s[1], x_filter_0_3, 1); + sum = vmlal_lane_s16(sum, s[2], x_filter_0_3, 2); + sum = vmlal_lane_s16(sum, s[3], x_filter_0_3, 3); + sum = vmlal_lane_s16(sum, s[4], x_filter_4_7, 0); + sum = vmlal_lane_s16(sum, s[5], x_filter_4_7, 1); + sum = vmlal_lane_s16(sum, s[6], x_filter_4_7, 2); + sum = vmlal_lane_s16(sum, s[7], x_filter_4_7, 3); + sum = vmlal_lane_s16(sum, s[8], x_filter_8_11, 0); + sum = vmlal_lane_s16(sum, s[9], x_filter_8_11, 1); + sum = vmlal_lane_s16(sum, s[10], x_filter_8_11, 2); + sum = vmlal_lane_s16(sum, s[11], x_filter_8_11, 3); + + sum = vqrshlq_s32(sum, shift_s32); + return vqmovun_s32(sum); +} + +static INLINE uint16x8_t highbd_convolve12_8_2d_h(const int16x8_t s[12], + const int16x8_t x_filter_0_7, + const int16x4_t x_filter_8_11, + const int32x4_t shift_s32, + const int32x4_t offset) { + const int16x4_t x_filter_0_3 = vget_low_s16(x_filter_0_7); + const int16x4_t x_filter_4_7 = vget_high_s16(x_filter_0_7); + + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s[0]), x_filter_0_3, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[1]), x_filter_0_3, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[2]), x_filter_0_3, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[3]), x_filter_0_3, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[4]), x_filter_4_7, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[5]), x_filter_4_7, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[6]), x_filter_4_7, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[7]), x_filter_4_7, 3); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[8]), x_filter_8_11, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[9]), x_filter_8_11, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[10]), x_filter_8_11, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s[11]), x_filter_8_11, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s[0]), x_filter_0_3, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[1]), x_filter_0_3, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[2]), x_filter_0_3, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[3]), x_filter_0_3, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[4]), x_filter_4_7, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[5]), x_filter_4_7, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[6]), x_filter_4_7, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[7]), x_filter_4_7, 3); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[8]), x_filter_8_11, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[9]), x_filter_8_11, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[10]), x_filter_8_11, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s[11]), x_filter_8_11, 3); + + sum0 = vqrshlq_s32(sum0, shift_s32); + sum1 = vqrshlq_s32(sum1, shift_s32); + + return vcombine_u16(vqmovun_s32(sum0), vqmovun_s32(sum1)); +} + +static INLINE void highbd_convolve_2d_sr_horiz_12tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *x_filter_ptr, ConvolveParams *conv_params, + const int offset) { + // The smallest block height processed by the SIMD functions is 4, and the + // horizontal convolution needs to process an extra (filter_taps/2 - 1) lines + // for the vertical convolution. + assert(h >= 5); + const int32x4_t shift_s32 = vdupq_n_s32(-conv_params->round_0); + const int16x8_t x_filter_0_7 = vld1q_s16(x_filter_ptr); + const int16x4_t x_filter_8_11 = vld1_s16(x_filter_ptr + 8); + const int32x4_t offset_s32 = vdupq_n_s32(offset); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x4_t s0[12], s1[12], s2[12], s3[12]; + load_s16_4x12(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7], &s0[8], &s0[9], &s0[10], + &s0[11]); + load_s16_4x12(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7], &s1[8], &s1[9], &s1[10], + &s1[11]); + load_s16_4x12(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7], &s2[8], &s2[9], &s2[10], + &s2[11]); + load_s16_4x12(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7], &s3[8], &s3[9], &s3[10], + &s3[11]); + + uint16x4_t d0 = highbd_convolve12_4_2d_h(s0, x_filter_0_7, x_filter_8_11, + shift_s32, offset_s32); + uint16x4_t d1 = highbd_convolve12_4_2d_h(s1, x_filter_0_7, x_filter_8_11, + shift_s32, offset_s32); + uint16x4_t d2 = highbd_convolve12_4_2d_h(s2, x_filter_0_7, x_filter_8_11, + shift_s32, offset_s32); + uint16x4_t d3 = highbd_convolve12_4_2d_h(s3, x_filter_0_7, x_filter_8_11, + shift_s32, offset_s32); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h > 4); + + do { + int16x4_t s0[12]; + load_s16_4x12(s, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4], &s0[5], + &s0[6], &s0[7], &s0[8], &s0[9], &s0[10], &s0[11]); + + uint16x4_t d0 = highbd_convolve12_4_2d_h(s0, x_filter_0_7, x_filter_8_11, + shift_s32, offset_s32); + + vst1_u16(d, d0); + + s += src_stride; + d += dst_stride; + } while (--h != 0); + } else { + int height = h; + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[12], s1[12], s2[12], s3[12]; + load_s16_8x12(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7], &s0[8], &s0[9], &s0[10], + &s0[11]); + load_s16_8x12(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7], &s1[8], &s1[9], &s1[10], + &s1[11]); + load_s16_8x12(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7], &s2[8], &s2[9], &s2[10], + &s2[11]); + load_s16_8x12(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7], &s3[8], &s3[9], &s3[10], + &s3[11]); + + uint16x8_t d0 = highbd_convolve12_8_2d_h( + s0, x_filter_0_7, x_filter_8_11, shift_s32, offset_s32); + uint16x8_t d1 = highbd_convolve12_8_2d_h( + s1, x_filter_0_7, x_filter_8_11, shift_s32, offset_s32); + uint16x8_t d2 = highbd_convolve12_8_2d_h( + s2, x_filter_0_7, x_filter_8_11, shift_s32, offset_s32); + uint16x8_t d3 = highbd_convolve12_8_2d_h( + s3, x_filter_0_7, x_filter_8_11, shift_s32, offset_s32); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += 4 * src_stride; + dst_ptr += 4 * dst_stride; + height -= 4; + } while (height > 4); + + do { + int width = w; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + do { + int16x8_t s0[12]; + load_s16_8x12(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7], &s0[8], &s0[9], &s0[10], + &s0[11]); + + uint16x8_t d0 = highbd_convolve12_8_2d_h( + s0, x_filter_0_7, x_filter_8_11, shift_s32, offset_s32); + vst1q_u16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width > 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + } +} + +void av1_highbd_convolve_2d_sr_neon(const uint16_t *src, int src_stride, + uint16_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, + const int subpel_x_qn, + const int subpel_y_qn, + ConvolveParams *conv_params, int bd) { + if (w == 2 || h == 2) { + av1_highbd_convolve_2d_sr_c(src, src_stride, dst, dst_stride, w, h, + filter_params_x, filter_params_y, subpel_x_qn, + subpel_y_qn, conv_params, bd); + return; + } + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn); + const int clamped_x_taps = x_filter_taps < 6 ? 6 : x_filter_taps; + + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 6 ? 6 : y_filter_taps; + const int im_h = h + clamped_y_taps - 1; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = clamped_x_taps / 2 - 1; + const int x_offset_initial = (1 << (bd + FILTER_BITS - 1)); + const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; + // The extra shim of (1 << (conv_params->round_1 - 1)) allows us to do a + // simple shift left instead of a rounding saturating shift left. + const int y_offset = + (1 << (conv_params->round_1 - 1)) - (1 << (y_offset_bits - 1)); + + const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + if (x_filter_taps > 8) { + highbd_convolve_2d_sr_horiz_12tap_neon(src_ptr, src_stride, im_block, + im_stride, w, im_h, x_filter_ptr, + conv_params, x_offset_initial); + + highbd_convolve_2d_sr_vert_12tap_neon(im_block, im_stride, dst, dst_stride, + w, h, y_filter_ptr, conv_params, bd, + y_offset); + return; + } + if (x_filter_taps <= 6 && w != 4) { + highbd_convolve_2d_sr_horiz_6tap_neon(src_ptr, src_stride, im_block, + im_stride, w, im_h, x_filter_ptr, + conv_params, x_offset_initial); + } else { + highbd_convolve_2d_sr_horiz_neon(src_ptr, src_stride, im_block, im_stride, + w, im_h, x_filter_ptr, conv_params, + x_offset_initial); + } + + if (y_filter_taps <= 6) { + highbd_convolve_2d_sr_vert_6tap_neon(im_block, im_stride, dst, dst_stride, + w, h, y_filter_ptr, conv_params, bd, + y_offset); + } else { + highbd_convolve_2d_sr_vert_8tap_neon(im_block, im_stride, dst, dst_stride, + w, h, y_filter_ptr, conv_params, bd, + y_offset); + } +} + +// Filter used is [64, 64]. +void av1_highbd_convolve_x_sr_intrabc_neon( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params, int bd) { + assert(subpel_x_qn == 8); + assert(filter_params_x->taps == 2); + assert((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS); + (void)filter_params_x; + (void)subpel_x_qn; + (void)conv_params; + (void)bd; + + if (w <= 4) { + do { + uint16x4_t s0 = vld1_u16(src); + uint16x4_t s1 = vld1_u16(src + 1); + + uint16x4_t d0 = vrhadd_u16(s0, s1); + + if (w == 2) { + store_u16_2x1(dst, d0); + } else { + vst1_u16(dst, d0); + } + + src += src_stride; + dst += dst_stride; + } while (--h != 0); + } else { + do { + const uint16_t *src_ptr = src; + uint16_t *dst_ptr = dst; + int width = w; + + do { + uint16x8_t s0 = vld1q_u16(src_ptr); + uint16x8_t s1 = vld1q_u16(src_ptr + 1); + + uint16x8_t d0 = vrhaddq_u16(s0, s1); + + vst1q_u16(dst_ptr, d0); + + src_ptr += 8; + dst_ptr += 8; + width -= 8; + } while (width != 0); + src += src_stride; + dst += dst_stride; + } while (--h != 0); + } +} + +// Filter used is [64, 64]. +void av1_highbd_convolve_y_sr_intrabc_neon( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn, + int bd) { + assert(subpel_y_qn == 8); + assert(filter_params_y->taps == 2); + (void)filter_params_y; + (void)subpel_y_qn; + (void)bd; + + if (w <= 4) { + do { + uint16x4_t s0 = vld1_u16(src); + uint16x4_t s1 = vld1_u16(src + src_stride); + + uint16x4_t d0 = vrhadd_u16(s0, s1); + + if (w == 2) { + store_u16_2x1(dst, d0); + } else { + vst1_u16(dst, d0); + } + + src += src_stride; + dst += dst_stride; + } while (--h != 0); + } else { + do { + const uint16_t *src_ptr = src; + uint16_t *dst_ptr = dst; + int height = h; + + do { + uint16x8_t s0 = vld1q_u16(src_ptr); + uint16x8_t s1 = vld1q_u16(src_ptr + src_stride); + + uint16x8_t d0 = vrhaddq_u16(s0, s1); + + vst1q_u16(dst_ptr, d0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--height != 0); + src += 8; + dst += 8; + w -= 8; + } while (w != 0); + } +} + +// Both horizontal and vertical passes use the same 2-tap filter: [64, 64]. +void av1_highbd_convolve_2d_sr_intrabc_neon( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, const int subpel_x_qn, + const int subpel_y_qn, ConvolveParams *conv_params, int bd) { + assert(subpel_x_qn == 8); + assert(subpel_y_qn == 8); + assert(filter_params_x->taps == 2 && filter_params_y->taps == 2); + assert((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS); + assert(w <= MAX_SB_SIZE && h <= MAX_SB_SIZE); + (void)filter_params_x; + (void)subpel_x_qn; + (void)filter_params_y; + (void)subpel_y_qn; + (void)conv_params; + (void)bd; + + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]); + int im_h = h + 1; + int im_stride = MAX_SB_SIZE; + + uint16x8_t vert_offset = vdupq_n_u16(1); + + uint16_t *im = im_block; + + // Horizontal filter. + if (w <= 4) { + do { + uint16x4_t s0 = vld1_u16(src); + uint16x4_t s1 = vld1_u16(src + 1); + + uint16x4_t d0 = vadd_u16(s0, s1); + + // Safe to store the whole vector, the im buffer is big enough. + vst1_u16(im, d0); + + src += src_stride; + im += im_stride; + } while (--im_h != 0); + } else { + do { + const uint16_t *src_ptr = src; + uint16_t *im_ptr = im; + int width = w; + + do { + uint16x8_t s0 = vld1q_u16(src_ptr); + uint16x8_t s1 = vld1q_u16(src_ptr + 1); + + uint16x8_t d0 = vaddq_u16(s0, s1); + + vst1q_u16(im_ptr, d0); + + src_ptr += 8; + im_ptr += 8; + width -= 8; + } while (width != 0); + src += src_stride; + im += im_stride; + } while (--im_h != 0); + } + + im = im_block; + + // Vertical filter. + if (w <= 4) { + do { + uint16x4_t s0 = vld1_u16(im); + uint16x4_t s1 = vld1_u16(im + im_stride); + + uint16x4_t d0 = vhadd_u16(s0, s1); + d0 = vhadd_u16(d0, vget_low_u16(vert_offset)); + + if (w == 2) { + store_u16_2x1(dst, d0); + } else { + vst1_u16(dst, d0); + } + + im += im_stride; + dst += dst_stride; + } while (--h != 0); + } else { + do { + uint16_t *im_ptr = im; + uint16_t *dst_ptr = dst; + int height = h; + + do { + uint16x8_t s0 = vld1q_u16(im_ptr); + uint16x8_t s1 = vld1q_u16(im_ptr + im_stride); + + uint16x8_t d0 = vhaddq_u16(s0, s1); + d0 = vhaddq_u16(d0, vert_offset); + + vst1q_u16(dst_ptr, d0); + + im_ptr += im_stride; + dst_ptr += dst_stride; + } while (--height != 0); + im += 8; + dst += 8; + w -= 8; + } while (w != 0); + } +} diff --git a/third_party/aom/av1/common/arm/highbd_convolve_neon.h b/third_party/aom/av1/common/arm/highbd_convolve_neon.h new file mode 100644 index 0000000000..08b2bda4e5 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_convolve_neon.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#ifndef AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_NEON_H_ +#define AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_NEON_H_ + +#include <arm_neon.h> + +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "av1/common/convolve.h" + +static INLINE int32x4_t highbd_convolve8_4_s32( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter, + const int32x4_t offset) { + const int16x4_t y_filter_lo = vget_low_s16(y_filter); + const int16x4_t y_filter_hi = vget_high_s16(y_filter); + + int32x4_t sum = vmlal_lane_s16(offset, s0, y_filter_lo, 0); + sum = vmlal_lane_s16(sum, s1, y_filter_lo, 1); + sum = vmlal_lane_s16(sum, s2, y_filter_lo, 2); + sum = vmlal_lane_s16(sum, s3, y_filter_lo, 3); + sum = vmlal_lane_s16(sum, s4, y_filter_hi, 0); + sum = vmlal_lane_s16(sum, s5, y_filter_hi, 1); + sum = vmlal_lane_s16(sum, s6, y_filter_hi, 2); + sum = vmlal_lane_s16(sum, s7, y_filter_hi, 3); + + return sum; +} + +static INLINE uint16x4_t highbd_convolve8_4_sr_s32_s16( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter, + const int32x4_t shift_s32, const int32x4_t offset) { + int32x4_t sum = + highbd_convolve8_4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, offset); + + sum = vqrshlq_s32(sum, shift_s32); + return vqmovun_s32(sum); +} + +// Like above but also perform round shifting and subtract correction term +static INLINE uint16x4_t highbd_convolve8_4_srsub_s32_s16( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter, + const int32x4_t round_shift, const int32x4_t offset, + const int32x4_t correction) { + int32x4_t sum = + highbd_convolve8_4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, offset); + + sum = vsubq_s32(vqrshlq_s32(sum, round_shift), correction); + return vqmovun_s32(sum); +} + +static INLINE void highbd_convolve8_8_s32( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, const int16x8_t y_filter, + const int32x4_t offset, int32x4_t *sum0, int32x4_t *sum1) { + const int16x4_t y_filter_lo = vget_low_s16(y_filter); + const int16x4_t y_filter_hi = vget_high_s16(y_filter); + + *sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), y_filter_lo, 0); + *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s1), y_filter_lo, 1); + *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s2), y_filter_lo, 2); + *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s3), y_filter_lo, 3); + *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s4), y_filter_hi, 0); + *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s5), y_filter_hi, 1); + *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s6), y_filter_hi, 2); + *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s7), y_filter_hi, 3); + + *sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), y_filter_lo, 0); + *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s1), y_filter_lo, 1); + *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s2), y_filter_lo, 2); + *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s3), y_filter_lo, 3); + *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s4), y_filter_hi, 0); + *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s5), y_filter_hi, 1); + *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s6), y_filter_hi, 2); + *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s7), y_filter_hi, 3); +} + +// Like above but also perform round shifting and subtract correction term +static INLINE uint16x8_t highbd_convolve8_8_srsub_s32_s16( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, const int16x8_t y_filter, + const int32x4_t round_shift, const int32x4_t offset, + const int32x4_t correction) { + int32x4_t sum0; + int32x4_t sum1; + highbd_convolve8_8_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, offset, + &sum0, &sum1); + + sum0 = vsubq_s32(vqrshlq_s32(sum0, round_shift), correction); + sum1 = vsubq_s32(vqrshlq_s32(sum1, round_shift), correction); + + return vcombine_u16(vqmovun_s32(sum0), vqmovun_s32(sum1)); +} + +static INLINE int32x4_t highbd_convolve8_2d_scale_horiz4x8_s32( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x4_t *filters_lo, + const int16x4_t *filters_hi, const int32x4_t offset) { + int16x4_t s_lo[] = { vget_low_s16(s0), vget_low_s16(s1), vget_low_s16(s2), + vget_low_s16(s3) }; + int16x4_t s_hi[] = { vget_high_s16(s0), vget_high_s16(s1), vget_high_s16(s2), + vget_high_s16(s3) }; + + transpose_array_inplace_u16_4x4((uint16x4_t *)s_lo); + transpose_array_inplace_u16_4x4((uint16x4_t *)s_hi); + + int32x4_t sum = vmlal_s16(offset, s_lo[0], filters_lo[0]); + sum = vmlal_s16(sum, s_lo[1], filters_lo[1]); + sum = vmlal_s16(sum, s_lo[2], filters_lo[2]); + sum = vmlal_s16(sum, s_lo[3], filters_lo[3]); + sum = vmlal_s16(sum, s_hi[0], filters_hi[0]); + sum = vmlal_s16(sum, s_hi[1], filters_hi[1]); + sum = vmlal_s16(sum, s_hi[2], filters_hi[2]); + sum = vmlal_s16(sum, s_hi[3], filters_hi[3]); + + return sum; +} + +static INLINE uint16x4_t highbd_convolve8_2d_scale_horiz4x8_s32_s16( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x4_t *filters_lo, + const int16x4_t *filters_hi, const int32x4_t shift_s32, + const int32x4_t offset) { + int32x4_t sum = highbd_convolve8_2d_scale_horiz4x8_s32( + s0, s1, s2, s3, filters_lo, filters_hi, offset); + + sum = vqrshlq_s32(sum, shift_s32); + return vqmovun_s32(sum); +} + +#endif // AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_NEON_H_ diff --git a/third_party/aom/av1/common/arm/highbd_convolve_scale_neon.c b/third_party/aom/av1/common/arm/highbd_convolve_scale_neon.c new file mode 100644 index 0000000000..702c651536 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_convolve_scale_neon.c @@ -0,0 +1,552 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <assert.h> +#include <arm_neon.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" +#include "av1/common/arm/highbd_convolve_neon.h" + +static INLINE void highbd_dist_wtd_comp_avg_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, ConvolveParams *conv_params, const int round_bits, + const int offset, const int bd) { + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const int32x4_t round_shift = vdupq_n_s32(-round_bits); + const uint32x4_t offset_vec = vdupq_n_u32(offset); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset); + uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset); + + // Weighted averaging + if (w <= 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset); + wtd_avg = vmlal_u16(wtd_avg, src, bck_offset); + wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec)); + d0 = vqrshlq_s32(d0, round_shift); + + uint16x4_t d0_u16 = vqmovun_s32(d0); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + if (w == 2) { + store_u16_2x1(dst_ptr, d0_u16); + } else { + vst1_u16(dst_ptr, d0_u16); + } + + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset); + wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset); + wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec)); + d0 = vqrshlq_s32(d0, round_shift); + + uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset); + wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset); + wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS); + int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec)); + d1 = vqrshlq_s32(d1, round_shift); + + uint16x8_t d01 = vcombine_u16(vqmovun_s32(d0), vqmovun_s32(d1)); + d01 = vminq_u16(d01, max); + vst1q_u16(dst, d01); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } +} + +static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride, + uint16_t *dst_ptr, int dst_stride, + int w, int h, + ConvolveParams *conv_params, + const int round_bits, const int offset, + const int bd) { + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const int32x4_t round_shift = vdupq_n_s32(-round_bits); + const uint16x4_t offset_vec = vdup_n_u16(offset); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + if (w <= 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint16x4_t avg = vhadd_u16(src, ref); + int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec)); + d0 = vqrshlq_s32(d0, round_shift); + + uint16x4_t d0_u16 = vqmovun_s32(d0); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + if (w == 2) { + store_u16_2x1(dst_ptr, d0_u16); + } else { + vst1_u16(dst_ptr, d0_u16); + } + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint16x8_t avg = vhaddq_u16(s, r); + int32x4_t d0_lo = + vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec)); + int32x4_t d0_hi = + vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec)); + d0_lo = vqrshlq_s32(d0_lo, round_shift); + d0_hi = vqrshlq_s32(d0_hi, round_shift); + + uint16x8_t d0 = vcombine_u16(vqmovun_s32(d0_lo), vqmovun_s32(d0_hi)); + d0 = vminq_u16(d0, max); + vst1q_u16(dst, d0); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } +} + +static INLINE void highbd_convolve_2d_x_scale_8tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int subpel_x_qn, const int x_step_qn, + const InterpFilterParams *filter_params, ConvolveParams *conv_params, + const int offset) { + static const uint32_t kIdx[4] = { 0, 1, 2, 3 }; + const uint32x4_t idx = vld1q_u32(kIdx); + const uint32x4_t subpel_mask = vdupq_n_u32(SCALE_SUBPEL_MASK); + const int32x4_t shift_s32 = vdupq_n_s32(-conv_params->round_0); + const int32x4_t offset_s32 = vdupq_n_s32(offset); + + if (w <= 4) { + int height = h; + uint16_t *d = dst_ptr; + + do { + int x_qn = subpel_x_qn; + + // Load 4 src vectors at a time, they might be the same, but we have to + // calculate the indices anyway. Doing it in SIMD and then storing the + // indices is faster than having to calculate the expression + // &src_ptr[((x_qn + 0*x_step_qn) >> SCALE_SUBPEL_BITS)] 4 times + // Ideally this should be a gather using the indices, but NEON does not + // have that, so have to emulate + const uint32x4_t xqn_idx = vmlaq_n_u32(vdupq_n_u32(x_qn), idx, x_step_qn); + // We have to multiply x2 to get the actual pointer as sizeof(uint16_t) = + // 2 + const uint32x4_t src_idx_u32 = + vshlq_n_u32(vshrq_n_u32(xqn_idx, SCALE_SUBPEL_BITS), 1); +#if AOM_ARCH_AARCH64 + uint64x2_t src4[2]; + src4[0] = vaddw_u32(vdupq_n_u64((const uint64_t)src_ptr), + vget_low_u32(src_idx_u32)); + src4[1] = vaddw_u32(vdupq_n_u64((const uint64_t)src_ptr), + vget_high_u32(src_idx_u32)); + int16_t *src4_ptr[4]; + uint64_t *tmp_ptr = (uint64_t *)&src4_ptr; + vst1q_u64(tmp_ptr, src4[0]); + vst1q_u64(tmp_ptr + 2, src4[1]); +#else + uint32x4_t src4; + src4 = vaddq_u32(vdupq_n_u32((const uint32_t)src_ptr), src_idx_u32); + int16_t *src4_ptr[4]; + uint32_t *tmp_ptr = (uint32_t *)&src4_ptr; + vst1q_u32(tmp_ptr, src4); +#endif // AOM_ARCH_AARCH64 + // Same for the filter vectors + const int32x4_t filter_idx_s32 = vreinterpretq_s32_u32( + vshrq_n_u32(vandq_u32(xqn_idx, subpel_mask), SCALE_EXTRA_BITS)); + int32_t x_filter4_idx[4]; + vst1q_s32(x_filter4_idx, filter_idx_s32); + const int16_t *x_filter4_ptr[4]; + + // Load source + int16x8_t s0 = vld1q_s16(src4_ptr[0]); + int16x8_t s1 = vld1q_s16(src4_ptr[1]); + int16x8_t s2 = vld1q_s16(src4_ptr[2]); + int16x8_t s3 = vld1q_s16(src4_ptr[3]); + + // We could easily do this using SIMD as well instead of calling the + // inline function 4 times. + x_filter4_ptr[0] = + av1_get_interp_filter_subpel_kernel(filter_params, x_filter4_idx[0]); + x_filter4_ptr[1] = + av1_get_interp_filter_subpel_kernel(filter_params, x_filter4_idx[1]); + x_filter4_ptr[2] = + av1_get_interp_filter_subpel_kernel(filter_params, x_filter4_idx[2]); + x_filter4_ptr[3] = + av1_get_interp_filter_subpel_kernel(filter_params, x_filter4_idx[3]); + + // Actually load the filters + const int16x8_t x_filter0 = vld1q_s16(x_filter4_ptr[0]); + const int16x8_t x_filter1 = vld1q_s16(x_filter4_ptr[1]); + const int16x8_t x_filter2 = vld1q_s16(x_filter4_ptr[2]); + const int16x8_t x_filter3 = vld1q_s16(x_filter4_ptr[3]); + + // Group low and high parts and transpose + int16x4_t filters_lo[] = { vget_low_s16(x_filter0), + vget_low_s16(x_filter1), + vget_low_s16(x_filter2), + vget_low_s16(x_filter3) }; + int16x4_t filters_hi[] = { vget_high_s16(x_filter0), + vget_high_s16(x_filter1), + vget_high_s16(x_filter2), + vget_high_s16(x_filter3) }; + transpose_array_inplace_u16_4x4((uint16x4_t *)filters_lo); + transpose_array_inplace_u16_4x4((uint16x4_t *)filters_hi); + + // Run the 2D Scale convolution + uint16x4_t d0 = highbd_convolve8_2d_scale_horiz4x8_s32_s16( + s0, s1, s2, s3, filters_lo, filters_hi, shift_s32, offset_s32); + + if (w == 2) { + store_u16_2x1(d, d0); + } else { + vst1_u16(d, d0); + } + + src_ptr += src_stride; + d += dst_stride; + height--; + } while (height > 0); + } else { + int height = h; + + do { + int width = w; + int x_qn = subpel_x_qn; + uint16_t *d = dst_ptr; + const uint16_t *s = src_ptr; + + do { + // Load 4 src vectors at a time, they might be the same, but we have to + // calculate the indices anyway. Doing it in SIMD and then storing the + // indices is faster than having to calculate the expression + // &src_ptr[((x_qn + 0*x_step_qn) >> SCALE_SUBPEL_BITS)] 4 times + // Ideally this should be a gather using the indices, but NEON does not + // have that, so have to emulate + const uint32x4_t xqn_idx = + vmlaq_n_u32(vdupq_n_u32(x_qn), idx, x_step_qn); + // We have to multiply x2 to get the actual pointer as sizeof(uint16_t) + // = 2 + const uint32x4_t src_idx_u32 = + vshlq_n_u32(vshrq_n_u32(xqn_idx, SCALE_SUBPEL_BITS), 1); +#if AOM_ARCH_AARCH64 + uint64x2_t src4[2]; + src4[0] = vaddw_u32(vdupq_n_u64((const uint64_t)s), + vget_low_u32(src_idx_u32)); + src4[1] = vaddw_u32(vdupq_n_u64((const uint64_t)s), + vget_high_u32(src_idx_u32)); + int16_t *src4_ptr[4]; + uint64_t *tmp_ptr = (uint64_t *)&src4_ptr; + vst1q_u64(tmp_ptr, src4[0]); + vst1q_u64(tmp_ptr + 2, src4[1]); +#else + uint32x4_t src4; + src4 = vaddq_u32(vdupq_n_u32((const uint32_t)s), src_idx_u32); + int16_t *src4_ptr[4]; + uint32_t *tmp_ptr = (uint32_t *)&src4_ptr; + vst1q_u32(tmp_ptr, src4); +#endif // AOM_ARCH_AARCH64 + // Same for the filter vectors + const int32x4_t filter_idx_s32 = vreinterpretq_s32_u32( + vshrq_n_u32(vandq_u32(xqn_idx, subpel_mask), SCALE_EXTRA_BITS)); + int32_t x_filter4_idx[4]; + vst1q_s32(x_filter4_idx, filter_idx_s32); + const int16_t *x_filter4_ptr[4]; + + // Load source + int16x8_t s0 = vld1q_s16(src4_ptr[0]); + int16x8_t s1 = vld1q_s16(src4_ptr[1]); + int16x8_t s2 = vld1q_s16(src4_ptr[2]); + int16x8_t s3 = vld1q_s16(src4_ptr[3]); + + // We could easily do this using SIMD as well instead of calling the + // inline function 4 times. + x_filter4_ptr[0] = av1_get_interp_filter_subpel_kernel( + filter_params, x_filter4_idx[0]); + x_filter4_ptr[1] = av1_get_interp_filter_subpel_kernel( + filter_params, x_filter4_idx[1]); + x_filter4_ptr[2] = av1_get_interp_filter_subpel_kernel( + filter_params, x_filter4_idx[2]); + x_filter4_ptr[3] = av1_get_interp_filter_subpel_kernel( + filter_params, x_filter4_idx[3]); + + // Actually load the filters + const int16x8_t x_filter0 = vld1q_s16(x_filter4_ptr[0]); + const int16x8_t x_filter1 = vld1q_s16(x_filter4_ptr[1]); + const int16x8_t x_filter2 = vld1q_s16(x_filter4_ptr[2]); + const int16x8_t x_filter3 = vld1q_s16(x_filter4_ptr[3]); + + // Group low and high parts and transpose + int16x4_t filters_lo[] = { vget_low_s16(x_filter0), + vget_low_s16(x_filter1), + vget_low_s16(x_filter2), + vget_low_s16(x_filter3) }; + int16x4_t filters_hi[] = { vget_high_s16(x_filter0), + vget_high_s16(x_filter1), + vget_high_s16(x_filter2), + vget_high_s16(x_filter3) }; + transpose_array_inplace_u16_4x4((uint16x4_t *)filters_lo); + transpose_array_inplace_u16_4x4((uint16x4_t *)filters_hi); + + // Run the 2D Scale X convolution + uint16x4_t d0 = highbd_convolve8_2d_scale_horiz4x8_s32_s16( + s0, s1, s2, s3, filters_lo, filters_hi, shift_s32, offset_s32); + + vst1_u16(d, d0); + + x_qn += 4 * x_step_qn; + d += 4; + width -= 4; + } while (width > 0); + + src_ptr += src_stride; + dst_ptr += dst_stride; + height--; + } while (height > 0); + } +} + +static INLINE void highbd_convolve_2d_y_scale_8tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int subpel_y_qn, const int y_step_qn, + const InterpFilterParams *filter_params, const int round1_bits, + const int offset) { + const int32x4_t offset_s32 = vdupq_n_s32(1 << offset); + + const int32x4_t round1_shift_s32 = vdupq_n_s32(-round1_bits); + if (w <= 4) { + int height = h; + uint16_t *d = dst_ptr; + int y_qn = subpel_y_qn; + + do { + const int16_t *s = + (const int16_t *)&src_ptr[(y_qn >> SCALE_SUBPEL_BITS) * src_stride]; + + int16x4_t s0, s1, s2, s3, s4, s5, s6, s7; + load_s16_4x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); + + const int y_filter_idx = (y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS; + const int16_t *y_filter_ptr = + av1_get_interp_filter_subpel_kernel(filter_params, y_filter_idx); + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + uint16x4_t d0 = highbd_convolve8_4_srsub_s32_s16( + s0, s1, s2, s3, s4, s5, s6, s7, y_filter, round1_shift_s32, + offset_s32, vdupq_n_s32(0)); + + if (w == 2) { + store_u16_2x1(d, d0); + } else { + vst1_u16(d, d0); + } + + y_qn += y_step_qn; + d += dst_stride; + height--; + } while (height > 0); + } else { + int width = w; + + do { + int height = h; + int y_qn = subpel_y_qn; + + uint16_t *d = dst_ptr; + + do { + const int16_t *s = + (const int16_t *)&src_ptr[(y_qn >> SCALE_SUBPEL_BITS) * src_stride]; + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; + load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); + + const int y_filter_idx = (y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS; + const int16_t *y_filter_ptr = + av1_get_interp_filter_subpel_kernel(filter_params, y_filter_idx); + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + uint16x8_t d0 = highbd_convolve8_8_srsub_s32_s16( + s0, s1, s2, s3, s4, s5, s6, s7, y_filter, round1_shift_s32, + offset_s32, vdupq_n_s32(0)); + vst1q_u16(d, d0); + + y_qn += y_step_qn; + d += dst_stride; + height--; + } while (height > 0); + src_ptr += 8; + dst_ptr += 8; + width -= 8; + } while (width > 0); + } +} + +static INLINE void highbd_convolve_correct_offset_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int round_bits, const int offset, const int bd) { + const int32x4_t round_shift_s32 = vdupq_n_s32(-round_bits); + const int16x4_t offset_s16 = vdup_n_s16(offset); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + if (w <= 4) { + for (int y = 0; y < h; ++y) { + const int16x4_t s = vld1_s16((const int16_t *)src_ptr + y * src_stride); + const int32x4_t d0 = + vqrshlq_s32(vsubl_s16(s, offset_s16), round_shift_s32); + uint16x4_t d = vqmovun_s32(d0); + d = vmin_u16(d, vget_low_u16(max)); + if (w == 2) { + store_u16_2x1(dst_ptr + y * dst_stride, d); + } else { + vst1_u16(dst_ptr + y * dst_stride, d); + } + } + } else { + for (int y = 0; y < h; ++y) { + for (int x = 0; x < w; x += 8) { + // Subtract round offset and convolve round + const int16x8_t s = + vld1q_s16((const int16_t *)src_ptr + y * src_stride + x); + const int32x4_t d0 = vqrshlq_s32(vsubl_s16(vget_low_s16(s), offset_s16), + round_shift_s32); + const int32x4_t d1 = vqrshlq_s32( + vsubl_s16(vget_high_s16(s), offset_s16), round_shift_s32); + uint16x8_t d01 = vcombine_u16(vqmovun_s32(d0), vqmovun_s32(d1)); + d01 = vminq_u16(d01, max); + vst1q_u16(dst_ptr + y * dst_stride + x, d01); + } + } + } +} + +void av1_highbd_convolve_2d_scale_neon( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, const int subpel_x_qn, + const int x_step_qn, const int subpel_y_qn, const int y_step_qn, + ConvolveParams *conv_params, int bd) { + uint16_t *im_block = (uint16_t *)aom_memalign( + 16, 2 * sizeof(uint16_t) * MAX_SB_SIZE * (MAX_SB_SIZE + MAX_FILTER_TAP)); + if (!im_block) return; + uint16_t *im_block2 = (uint16_t *)aom_memalign( + 16, 2 * sizeof(uint16_t) * MAX_SB_SIZE * (MAX_SB_SIZE + MAX_FILTER_TAP)); + if (!im_block2) { + aom_free(im_block); // free the first block and return. + return; + } + + int im_h = (((h - 1) * y_step_qn + subpel_y_qn) >> SCALE_SUBPEL_BITS) + + filter_params_y->taps; + const int im_stride = MAX_SB_SIZE; + const int bits = + FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; + assert(bits >= 0); + + const int vert_offset = filter_params_y->taps / 2 - 1; + const int horiz_offset = filter_params_x->taps / 2 - 1; + const int x_offset_bits = (1 << (bd + FILTER_BITS - 1)); + const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; + const int y_offset_correction = + ((1 << (y_offset_bits - conv_params->round_1)) + + (1 << (y_offset_bits - conv_params->round_1 - 1))); + + CONV_BUF_TYPE *dst16 = conv_params->dst; + const int dst16_stride = conv_params->dst_stride; + + const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + + highbd_convolve_2d_x_scale_8tap_neon( + src_ptr, src_stride, im_block, im_stride, w, im_h, subpel_x_qn, x_step_qn, + filter_params_x, conv_params, x_offset_bits); + if (conv_params->is_compound && !conv_params->do_average) { + highbd_convolve_2d_y_scale_8tap_neon( + im_block, im_stride, dst16, dst16_stride, w, h, subpel_y_qn, y_step_qn, + filter_params_y, conv_params->round_1, y_offset_bits); + } else { + highbd_convolve_2d_y_scale_8tap_neon( + im_block, im_stride, im_block2, im_stride, w, h, subpel_y_qn, y_step_qn, + filter_params_y, conv_params->round_1, y_offset_bits); + } + + // Do the compound averaging outside the loop, avoids branching within the + // main loop + if (conv_params->is_compound) { + if (conv_params->do_average) { + if (conv_params->use_dist_wtd_comp_avg) { + highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, + h, conv_params, bits, y_offset_correction, + bd); + } else { + highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h, + conv_params, bits, y_offset_correction, bd); + } + } + } else { + highbd_convolve_correct_offset_neon(im_block2, im_stride, dst, dst_stride, + w, h, bits, y_offset_correction, bd); + } + aom_free(im_block); + aom_free(im_block2); +} diff --git a/third_party/aom/av1/common/arm/highbd_inv_txfm_neon.c b/third_party/aom/av1/common/arm/highbd_inv_txfm_neon.c new file mode 100644 index 0000000000..84bc8fd963 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_inv_txfm_neon.c @@ -0,0 +1,5994 @@ +/* + * Copyright (c) 2020, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you canzip + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "av1/common/av1_inv_txfm1d_cfg.h" +#include "av1/common/idct.h" +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#if AOM_ARCH_AARCH64 +#define TRANSPOSE_4X4(x0, x1, x2, x3, y0, y1, y2, y3) \ + do { \ + int32x4x2_t swap_low = vtrnq_s32(x0, x1); \ + int32x4x2_t swap_high = vtrnq_s32(x2, x3); \ + y0 = vreinterpretq_s32_s64( \ + vzip1q_s64(vreinterpretq_s64_s32(swap_low.val[0]), \ + vreinterpretq_s64_s32(swap_high.val[0]))); \ + y1 = vreinterpretq_s32_s64( \ + vzip1q_s64(vreinterpretq_s64_s32(swap_low.val[1]), \ + vreinterpretq_s64_s32(swap_high.val[1]))); \ + y2 = vreinterpretq_s32_s64( \ + vzip2q_s64(vreinterpretq_s64_s32(swap_low.val[0]), \ + vreinterpretq_s64_s32(swap_high.val[0]))); \ + y3 = vreinterpretq_s32_s64( \ + vzip2q_s64(vreinterpretq_s64_s32(swap_low.val[1]), \ + vreinterpretq_s64_s32(swap_high.val[1]))); \ + } while (0) +#else +#define TRANSPOSE_4X4(x0, x1, x2, x3, y0, y1, y2, y3) \ + do { \ + int32x4x2_t swap_low = vtrnq_s32(x0, x1); \ + int32x4x2_t swap_high = vtrnq_s32(x2, x3); \ + y0 = vextq_s32(vextq_s32(swap_low.val[0], swap_low.val[0], 2), \ + swap_high.val[0], 2); \ + y1 = vextq_s32(vextq_s32(swap_low.val[1], swap_low.val[1], 2), \ + swap_high.val[1], 2); \ + y2 = vextq_s32(swap_low.val[0], \ + vextq_s32(swap_high.val[0], swap_high.val[0], 2), 2); \ + y3 = vextq_s32(swap_low.val[1], \ + vextq_s32(swap_high.val[1], swap_high.val[1], 2), 2); \ + } while (0) +#endif // AOM_ARCH_AARCH64 + +static INLINE void transpose_4x4(const int32x4_t *in, int32x4_t *out) { + TRANSPOSE_4X4(in[0], in[1], in[2], in[3], out[0], out[1], out[2], out[3]); +} + +static INLINE void transpose_8x8(const int32x4_t *in, int32x4_t *out) { + TRANSPOSE_4X4(in[0], in[2], in[4], in[6], out[0], out[2], out[4], out[6]); + TRANSPOSE_4X4(in[1], in[3], in[5], in[7], out[8], out[10], out[12], out[14]); + TRANSPOSE_4X4(in[8], in[10], in[12], in[14], out[1], out[3], out[5], out[7]); + TRANSPOSE_4X4(in[9], in[11], in[13], in[15], out[9], out[11], out[13], + out[15]); +} + +static INLINE void round_shift_array_32_neon(int32x4_t *input, + int32x4_t *output, const int size, + const int bit) { + const int32x4_t v_bit = vdupq_n_s32(-bit); + for (int i = 0; i < size; i++) { + output[i] = vrshlq_s32(input[i], v_bit); + } +} + +static INLINE void round_shift_rect_array_32_neon(int32x4_t *input, + int32x4_t *output, + const int size) { + for (int i = 0; i < size; i++) { + const int32x4_t r0 = vmulq_n_s32(input[i], NewInvSqrt2); + output[i] = vrshrq_n_s32(r0, NewSqrt2Bits); + } +} + +static INLINE int32x4_t half_btf_neon_r(const int32_t *n0, const int32x4_t *w0, + const int32_t *n1, const int32x4_t *w1, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int32x4_t x; + x = vmlaq_n_s32(*rnding, *w0, *n0); + x = vmlaq_n_s32(x, *w1, *n1); + x = vshlq_s32(x, *v_bit); + return x; +} + +static INLINE int32x4_t half_btf_neon_mode11_r( + const int32_t *n0, const int32x4_t *w0, const int32_t *n1, + const int32x4_t *w1, const int32x4_t *v_bit, const int32x4_t *rnding) { + int32x4_t x; + x = vmlaq_n_s32(*rnding, *w0, -*n0); + x = vmlaq_n_s32(x, *w1, -*n1); + x = vshlq_s32(x, *v_bit); + return x; +} + +static INLINE int32x4_t half_btf_neon_mode01_r( + const int32_t *n0, const int32x4_t *w0, const int32_t *n1, + const int32x4_t *w1, const int32x4_t *v_bit, const int32x4_t *rnding) { + int32x4_t x; + x = vmlaq_n_s32(*rnding, *w0, *n0); + x = vmlsq_n_s32(x, *w1, *n1); + x = vshlq_s32(x, *v_bit); + return x; +} + +static INLINE int32x4_t half_btf_neon_mode10_r( + const int32_t *n0, const int32x4_t *w0, const int32_t *n1, + const int32x4_t *w1, const int32x4_t *v_bit, const int32x4_t *rnding) { + int32x4_t x; + x = vmlaq_n_s32(*rnding, *w1, *n1); + x = vmlsq_n_s32(x, *w0, *n0); + x = vshlq_s32(x, *v_bit); + return x; +} + +static INLINE int32x4_t half_btf_0_neon_r(const int32_t *n0, + const int32x4_t *w0, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int32x4_t x; + x = vmlaq_n_s32(*rnding, *w0, *n0); + x = vshlq_s32(x, *v_bit); + return x; +} + +static INLINE int32x4_t half_btf_0_m_neon_r(const int32_t *n0, + const int32x4_t *w0, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int32x4_t x; + x = vmlaq_n_s32(*rnding, *w0, -*n0); + x = vshlq_s32(x, *v_bit); + return x; +} + +static INLINE void flip_buf_neon(int32x4_t *in, int32x4_t *out, int size) { + for (int i = 0; i < size; ++i) { + out[size - i - 1] = in[i]; + } +} + +typedef void (*fwd_transform_1d_neon)(int32x4_t *in, int32x4_t *out, int bit, + const int num_cols); + +typedef void (*transform_1d_neon)(int32x4_t *in, int32x4_t *out, int32_t bit, + int32_t do_cols, int32_t bd, + int32_t out_shift); + +static INLINE uint16x8_t highbd_clamp_u16(uint16x8_t *u, const uint16x8_t *min, + const uint16x8_t *max) { + int16x8_t clamped; + clamped = vminq_s16(vreinterpretq_s16_u16(*u), vreinterpretq_s16_u16(*max)); + clamped = vmaxq_s16(clamped, vreinterpretq_s16_u16(*min)); + return vreinterpretq_u16_s16(clamped); +} + +static INLINE void round_shift_4x4(int32x4_t *in, int shift) { + if (shift != 0) { + const int32x4_t v_shift = vdupq_n_s32(-shift); + in[0] = vrshlq_s32(in[0], v_shift); + in[1] = vrshlq_s32(in[1], v_shift); + in[2] = vrshlq_s32(in[2], v_shift); + in[3] = vrshlq_s32(in[3], v_shift); + } +} + +static void round_shift_8x8(int32x4_t *in, int shift) { + assert(shift != 0); + const int32x4_t v_shift = vdupq_n_s32(-shift); + in[0] = vrshlq_s32(in[0], v_shift); + in[1] = vrshlq_s32(in[1], v_shift); + in[2] = vrshlq_s32(in[2], v_shift); + in[3] = vrshlq_s32(in[3], v_shift); + in[4] = vrshlq_s32(in[4], v_shift); + in[5] = vrshlq_s32(in[5], v_shift); + in[6] = vrshlq_s32(in[6], v_shift); + in[7] = vrshlq_s32(in[7], v_shift); + in[8] = vrshlq_s32(in[8], v_shift); + in[9] = vrshlq_s32(in[9], v_shift); + in[10] = vrshlq_s32(in[10], v_shift); + in[11] = vrshlq_s32(in[11], v_shift); + in[12] = vrshlq_s32(in[12], v_shift); + in[13] = vrshlq_s32(in[13], v_shift); + in[14] = vrshlq_s32(in[14], v_shift); + in[15] = vrshlq_s32(in[15], v_shift); +} + +static void highbd_clamp_s32_neon(int32x4_t *in, int32x4_t *out, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi, int size) { + int32x4_t a0, a1; + for (int i = 0; i < size; i += 4) { + a0 = vmaxq_s32(in[i], *clamp_lo); + out[i] = vminq_s32(a0, *clamp_hi); + + a1 = vmaxq_s32(in[i + 1], *clamp_lo); + out[i + 1] = vminq_s32(a1, *clamp_hi); + + a0 = vmaxq_s32(in[i + 2], *clamp_lo); + out[i + 2] = vminq_s32(a0, *clamp_hi); + + a1 = vmaxq_s32(in[i + 3], *clamp_lo); + out[i + 3] = vminq_s32(a1, *clamp_hi); + } +} + +static INLINE uint16x8_t highbd_get_recon_8x8_neon(const uint16x8_t pred, + int32x4_t res0, + int32x4_t res1, + const int bd) { + const uint16x8_t v_zero = vdupq_n_u16(0); + int32x4_t min_clip_val = vreinterpretq_s32_u16(v_zero); + int32x4_t max_clip_val = vdupq_n_s32((1 << bd) - 1); + uint16x8x2_t x; + x.val[0] = vreinterpretq_u16_s32( + vaddw_s16(res0, vreinterpret_s16_u16(vget_low_u16(pred)))); + x.val[1] = vreinterpretq_u16_s32( + vaddw_s16(res1, vreinterpret_s16_u16(vget_high_u16(pred)))); + x.val[0] = vreinterpretq_u16_s32( + vmaxq_s32(vreinterpretq_s32_u16(x.val[0]), min_clip_val)); + x.val[0] = vreinterpretq_u16_s32( + vminq_s32(vreinterpretq_s32_u16(x.val[0]), max_clip_val)); + x.val[1] = vreinterpretq_u16_s32( + vmaxq_s32(vreinterpretq_s32_u16(x.val[1]), min_clip_val)); + x.val[1] = vreinterpretq_u16_s32( + vminq_s32(vreinterpretq_s32_u16(x.val[1]), max_clip_val)); + uint16x8_t res = vcombine_u16(vqmovn_u32(vreinterpretq_u32_u16(x.val[0])), + vqmovn_u32(vreinterpretq_u32_u16(x.val[1]))); + return res; +} + +static INLINE uint16x4_t highbd_get_recon_4xn_neon(uint16x4_t pred, + int32x4_t res0, + const int bd) { + uint16x4_t x0_ = vreinterpret_u16_s16( + vmovn_s32(vaddw_s16(res0, vreinterpret_s16_u16(pred)))); + uint16x8_t x0 = vcombine_u16(x0_, x0_); + const uint16x8_t vmin = vdupq_n_u16(0); + const uint16x8_t vmax = vdupq_n_u16((1 << bd) - 1); + x0 = highbd_clamp_u16(&x0, &vmin, &vmax); + return vget_low_u16(x0); +} + +static INLINE void highbd_write_buffer_4xn_neon(int32x4_t *in, uint16_t *output, + int stride, int flipud, + int height, const int bd) { + int j = flipud ? (height - 1) : 0; + const int step = flipud ? -1 : 1; + for (int i = 0; i < height; ++i, j += step) { + uint16x4_t v = vld1_u16(output + i * stride); + uint16x4_t u = highbd_get_recon_4xn_neon(v, in[j], bd); + + vst1_u16(output + i * stride, u); + } +} + +static INLINE void highbd_write_buffer_8xn_neon(int32x4_t *in, uint16_t *output, + int stride, int flipud, + int height, const int bd) { + int j = flipud ? (height - 1) : 0; + const int step = flipud ? -1 : 1; + for (int i = 0; i < height; ++i, j += step) { + uint16x8_t v = vld1q_u16(output + i * stride); + uint16x8_t u = highbd_get_recon_8x8_neon(v, in[j], in[j + height], bd); + + vst1q_u16(output + i * stride, u); + } +} + +static INLINE void load_buffer_32bit_input(const int32_t *in, int stride, + int32x4_t *out, int out_size) { + for (int i = 0; i < out_size; ++i) { + out[i] = vld1q_s32(in + i * stride); + } +} + +static INLINE void load_buffer_4x4(const int32_t *coeff, int32x4_t *in) { + in[0] = vld1q_s32(coeff + 0); + in[1] = vld1q_s32(coeff + 4); + in[2] = vld1q_s32(coeff + 8); + in[3] = vld1q_s32(coeff + 12); +} + +static void addsub_neon(const int32x4_t in0, const int32x4_t in1, + int32x4_t *out0, int32x4_t *out1, + const int32x4_t *clamp_lo, const int32x4_t *clamp_hi) { + int32x4_t a0 = vaddq_s32(in0, in1); + int32x4_t a1 = vsubq_s32(in0, in1); + + a0 = vmaxq_s32(a0, *clamp_lo); + a0 = vminq_s32(a0, *clamp_hi); + a1 = vmaxq_s32(a1, *clamp_lo); + a1 = vminq_s32(a1, *clamp_hi); + + *out0 = a0; + *out1 = a1; +} + +static void shift_and_clamp_neon(int32x4_t *in0, int32x4_t *in1, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi, + const int32x4_t *v_shift) { + int32x4_t in0_w_offset = vrshlq_s32(*in0, *v_shift); + int32x4_t in1_w_offset = vrshlq_s32(*in1, *v_shift); + + in0_w_offset = vmaxq_s32(in0_w_offset, *clamp_lo); + in0_w_offset = vminq_s32(in0_w_offset, *clamp_hi); + in1_w_offset = vmaxq_s32(in1_w_offset, *clamp_lo); + in1_w_offset = vminq_s32(in1_w_offset, *clamp_hi); + + *in0 = in0_w_offset; + *in1 = in1_w_offset; +} + +static INLINE void idct32_stage4_neon(int32x4_t *bf1, const int32_t *cospi, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int32x4_t temp1, temp2; + temp1 = half_btf_neon_mode10_r(&cospi[8], &bf1[17], &cospi[56], &bf1[30], + v_bit, rnding); + bf1[30] = + half_btf_neon_r(&cospi[56], &bf1[17], &cospi[8], &bf1[30], v_bit, rnding); + bf1[17] = temp1; + + temp2 = half_btf_neon_mode11_r(&cospi[56], &bf1[18], &cospi[8], &bf1[29], + v_bit, rnding); + bf1[29] = half_btf_neon_mode10_r(&cospi[8], &bf1[18], &cospi[56], &bf1[29], + v_bit, rnding); + bf1[18] = temp2; + + temp1 = half_btf_neon_mode10_r(&cospi[40], &bf1[21], &cospi[24], &bf1[26], + v_bit, rnding); + bf1[26] = half_btf_neon_r(&cospi[24], &bf1[21], &cospi[40], &bf1[26], v_bit, + rnding); + bf1[21] = temp1; + + temp2 = half_btf_neon_mode11_r(&cospi[24], &bf1[22], &cospi[40], &bf1[25], + v_bit, rnding); + bf1[25] = half_btf_neon_mode10_r(&cospi[40], &bf1[22], &cospi[24], &bf1[25], + v_bit, rnding); + bf1[22] = temp2; +} + +static INLINE void idct32_stage5_neon(int32x4_t *bf1, const int32_t *cospi, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int32x4_t temp1, temp2; + temp1 = half_btf_neon_mode10_r(&cospi[16], &bf1[9], &cospi[48], &bf1[14], + v_bit, rnding); + bf1[14] = + half_btf_neon_r(&cospi[48], &bf1[9], &cospi[16], &bf1[14], v_bit, rnding); + bf1[9] = temp1; + + temp2 = half_btf_neon_mode11_r(&cospi[48], &bf1[10], &cospi[16], &bf1[13], + v_bit, rnding); + bf1[13] = half_btf_neon_mode10_r(&cospi[16], &bf1[10], &cospi[48], &bf1[13], + v_bit, rnding); + bf1[10] = temp2; + + addsub_neon(bf1[16], bf1[19], bf1 + 16, bf1 + 19, clamp_lo, clamp_hi); + addsub_neon(bf1[17], bf1[18], bf1 + 17, bf1 + 18, clamp_lo, clamp_hi); + addsub_neon(bf1[23], bf1[20], bf1 + 23, bf1 + 20, clamp_lo, clamp_hi); + addsub_neon(bf1[22], bf1[21], bf1 + 22, bf1 + 21, clamp_lo, clamp_hi); + addsub_neon(bf1[24], bf1[27], bf1 + 24, bf1 + 27, clamp_lo, clamp_hi); + addsub_neon(bf1[25], bf1[26], bf1 + 25, bf1 + 26, clamp_lo, clamp_hi); + addsub_neon(bf1[31], bf1[28], bf1 + 31, bf1 + 28, clamp_lo, clamp_hi); + addsub_neon(bf1[30], bf1[29], bf1 + 30, bf1 + 29, clamp_lo, clamp_hi); +} + +static INLINE void idct32_stage6_neon(int32x4_t *bf1, const int32_t *cospi, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int32x4_t temp1, temp2; + temp1 = half_btf_neon_mode10_r(&cospi[32], &bf1[5], &cospi[32], &bf1[6], + v_bit, rnding); + bf1[6] = + half_btf_neon_r(&cospi[32], &bf1[5], &cospi[32], &bf1[6], v_bit, rnding); + bf1[5] = temp1; + + addsub_neon(bf1[8], bf1[11], bf1 + 8, bf1 + 11, clamp_lo, clamp_hi); + addsub_neon(bf1[9], bf1[10], bf1 + 9, bf1 + 10, clamp_lo, clamp_hi); + addsub_neon(bf1[15], bf1[12], bf1 + 15, bf1 + 12, clamp_lo, clamp_hi); + addsub_neon(bf1[14], bf1[13], bf1 + 14, bf1 + 13, clamp_lo, clamp_hi); + + temp1 = half_btf_neon_mode10_r(&cospi[16], &bf1[18], &cospi[48], &bf1[29], + v_bit, rnding); + bf1[29] = half_btf_neon_r(&cospi[48], &bf1[18], &cospi[16], &bf1[29], v_bit, + rnding); + bf1[18] = temp1; + temp2 = half_btf_neon_mode10_r(&cospi[16], &bf1[19], &cospi[48], &bf1[28], + v_bit, rnding); + bf1[28] = half_btf_neon_r(&cospi[48], &bf1[19], &cospi[16], &bf1[28], v_bit, + rnding); + bf1[19] = temp2; + temp1 = half_btf_neon_mode11_r(&cospi[48], &bf1[20], &cospi[16], &bf1[27], + v_bit, rnding); + bf1[27] = half_btf_neon_mode10_r(&cospi[16], &bf1[20], &cospi[48], &bf1[27], + v_bit, rnding); + bf1[20] = temp1; + temp2 = half_btf_neon_mode11_r(&cospi[48], &bf1[21], &cospi[16], &bf1[26], + v_bit, rnding); + bf1[26] = half_btf_neon_mode10_r(&cospi[16], &bf1[21], &cospi[48], &bf1[26], + v_bit, rnding); + bf1[21] = temp2; +} + +static INLINE void idct32_stage7_neon(int32x4_t *bf1, const int32_t *cospi, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int32x4_t temp1, temp2; + addsub_neon(bf1[0], bf1[7], bf1 + 0, bf1 + 7, clamp_lo, clamp_hi); + addsub_neon(bf1[1], bf1[6], bf1 + 1, bf1 + 6, clamp_lo, clamp_hi); + addsub_neon(bf1[2], bf1[5], bf1 + 2, bf1 + 5, clamp_lo, clamp_hi); + addsub_neon(bf1[3], bf1[4], bf1 + 3, bf1 + 4, clamp_lo, clamp_hi); + temp1 = half_btf_neon_mode10_r(&cospi[32], &bf1[10], &cospi[32], &bf1[13], + v_bit, rnding); + bf1[13] = half_btf_neon_r(&cospi[32], &bf1[10], &cospi[32], &bf1[13], v_bit, + rnding); + bf1[10] = temp1; + temp2 = half_btf_neon_mode10_r(&cospi[32], &bf1[11], &cospi[32], &bf1[12], + v_bit, rnding); + bf1[12] = half_btf_neon_r(&cospi[32], &bf1[11], &cospi[32], &bf1[12], v_bit, + rnding); + bf1[11] = temp2; + + addsub_neon(bf1[16], bf1[23], bf1 + 16, bf1 + 23, clamp_lo, clamp_hi); + addsub_neon(bf1[17], bf1[22], bf1 + 17, bf1 + 22, clamp_lo, clamp_hi); + addsub_neon(bf1[18], bf1[21], bf1 + 18, bf1 + 21, clamp_lo, clamp_hi); + addsub_neon(bf1[19], bf1[20], bf1 + 19, bf1 + 20, clamp_lo, clamp_hi); + addsub_neon(bf1[31], bf1[24], bf1 + 31, bf1 + 24, clamp_lo, clamp_hi); + addsub_neon(bf1[30], bf1[25], bf1 + 30, bf1 + 25, clamp_lo, clamp_hi); + addsub_neon(bf1[29], bf1[26], bf1 + 29, bf1 + 26, clamp_lo, clamp_hi); + addsub_neon(bf1[28], bf1[27], bf1 + 28, bf1 + 27, clamp_lo, clamp_hi); +} + +static INLINE void idct32_stage8_neon(int32x4_t *bf1, const int32_t *cospi, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int32x4_t temp1, temp2; + addsub_neon(bf1[0], bf1[15], bf1 + 0, bf1 + 15, clamp_lo, clamp_hi); + addsub_neon(bf1[1], bf1[14], bf1 + 1, bf1 + 14, clamp_lo, clamp_hi); + addsub_neon(bf1[2], bf1[13], bf1 + 2, bf1 + 13, clamp_lo, clamp_hi); + addsub_neon(bf1[3], bf1[12], bf1 + 3, bf1 + 12, clamp_lo, clamp_hi); + addsub_neon(bf1[4], bf1[11], bf1 + 4, bf1 + 11, clamp_lo, clamp_hi); + addsub_neon(bf1[5], bf1[10], bf1 + 5, bf1 + 10, clamp_lo, clamp_hi); + addsub_neon(bf1[6], bf1[9], bf1 + 6, bf1 + 9, clamp_lo, clamp_hi); + addsub_neon(bf1[7], bf1[8], bf1 + 7, bf1 + 8, clamp_lo, clamp_hi); + temp1 = half_btf_neon_mode10_r(&cospi[32], &bf1[20], &cospi[32], &bf1[27], + v_bit, rnding); + bf1[27] = half_btf_neon_r(&cospi[32], &bf1[20], &cospi[32], &bf1[27], v_bit, + rnding); + bf1[20] = temp1; + temp2 = half_btf_neon_mode10_r(&cospi[32], &bf1[21], &cospi[32], &bf1[26], + v_bit, rnding); + bf1[26] = half_btf_neon_r(&cospi[32], &bf1[21], &cospi[32], &bf1[26], v_bit, + rnding); + bf1[21] = temp2; + temp1 = half_btf_neon_mode10_r(&cospi[32], &bf1[22], &cospi[32], &bf1[25], + v_bit, rnding); + bf1[25] = half_btf_neon_r(&cospi[32], &bf1[22], &cospi[32], &bf1[25], v_bit, + rnding); + bf1[22] = temp1; + temp2 = half_btf_neon_mode10_r(&cospi[32], &bf1[23], &cospi[32], &bf1[24], + v_bit, rnding); + bf1[24] = half_btf_neon_r(&cospi[32], &bf1[23], &cospi[32], &bf1[24], v_bit, + rnding); + bf1[23] = temp2; +} + +static INLINE void idct32_stage9_neon(int32x4_t *bf1, int32x4_t *out, + const int do_cols, const int bd, + const int out_shift, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi) { + addsub_neon(bf1[0], bf1[31], out + 0, out + 31, clamp_lo, clamp_hi); + addsub_neon(bf1[1], bf1[30], out + 1, out + 30, clamp_lo, clamp_hi); + addsub_neon(bf1[2], bf1[29], out + 2, out + 29, clamp_lo, clamp_hi); + addsub_neon(bf1[3], bf1[28], out + 3, out + 28, clamp_lo, clamp_hi); + addsub_neon(bf1[4], bf1[27], out + 4, out + 27, clamp_lo, clamp_hi); + addsub_neon(bf1[5], bf1[26], out + 5, out + 26, clamp_lo, clamp_hi); + addsub_neon(bf1[6], bf1[25], out + 6, out + 25, clamp_lo, clamp_hi); + addsub_neon(bf1[7], bf1[24], out + 7, out + 24, clamp_lo, clamp_hi); + addsub_neon(bf1[8], bf1[23], out + 8, out + 23, clamp_lo, clamp_hi); + addsub_neon(bf1[9], bf1[22], out + 9, out + 22, clamp_lo, clamp_hi); + addsub_neon(bf1[10], bf1[21], out + 10, out + 21, clamp_lo, clamp_hi); + addsub_neon(bf1[11], bf1[20], out + 11, out + 20, clamp_lo, clamp_hi); + addsub_neon(bf1[12], bf1[19], out + 12, out + 19, clamp_lo, clamp_hi); + addsub_neon(bf1[13], bf1[18], out + 13, out + 18, clamp_lo, clamp_hi); + addsub_neon(bf1[14], bf1[17], out + 14, out + 17, clamp_lo, clamp_hi); + addsub_neon(bf1[15], bf1[16], out + 15, out + 16, clamp_lo, clamp_hi); + + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + for (int i = 0; i < 32; i += 8) { + round_shift_4x4(out + i, out_shift); + round_shift_4x4(out + i + 4, out_shift); + } + highbd_clamp_s32_neon(out, out, &clamp_lo_out, &clamp_hi_out, 32); + } +} + +static void neg_shift_neon(const int32x4_t *in0, const int32x4_t *in1, + int32x4_t *out0, int32x4_t *out1, + const int32x4_t *clamp_lo, const int32x4_t *clamp_hi, + const int32x4_t *v_shift, int32x4_t *offset) { + int32x4_t a0 = vaddq_s32(*offset, *in0); + int32x4_t a1 = vsubq_s32(*offset, *in1); + + a0 = vshlq_s32(a0, *v_shift); + a1 = vshlq_s32(a1, *v_shift); + + a0 = vmaxq_s32(a0, *clamp_lo); + a0 = vminq_s32(a0, *clamp_hi); + a1 = vmaxq_s32(a1, *clamp_lo); + a1 = vminq_s32(a1, *clamp_hi); + + *out0 = a0; + *out1 = a1; +} + +static void idct4x4_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + + int32x4_t u0, u1, u2, u3; + int32x4_t v0, v1, v2, v3, x, y; + + // Stage 0-1-2 + + u0 = in[0]; + u1 = in[1]; + u2 = in[2]; + u3 = in[3]; + + const int32x4_t v_bit = vdupq_n_s32(-bit); + + x = vmlaq_n_s32(rnding, u0, cospi[32]); + y = vmulq_n_s32(u2, cospi[32]); + v0 = vaddq_s32(x, y); + v0 = vshlq_s32(v0, v_bit); + + v1 = vsubq_s32(x, y); + v1 = vshlq_s32(v1, v_bit); + + x = vmlaq_n_s32(rnding, u1, cospi[48]); + v2 = vmlsq_n_s32(x, u3, cospi[16]); + v2 = vshlq_s32(v2, v_bit); + + x = vmlaq_n_s32(rnding, u1, cospi[16]); + v3 = vmlaq_n_s32(x, u3, cospi[48]); + v3 = vshlq_s32(v3, v_bit); + // Stage 3 + addsub_neon(v0, v3, out + 0, out + 3, &clamp_lo, &clamp_hi); + addsub_neon(v1, v2, out + 1, out + 2, &clamp_lo, &clamp_hi); + + if (!do_cols) { + log_range = AOMMAX(16, bd + 6); + clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + const int32x4_t v_shift = vdupq_n_s32(-out_shift); + shift_and_clamp_neon(out + 0, out + 3, &clamp_lo, &clamp_hi, &v_shift); + shift_and_clamp_neon(out + 1, out + 2, &clamp_lo, &clamp_hi, &v_shift); + } +} + +static void iadst4x4_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + const int32_t *sinpi = sinpi_arr(bit); + const int32x4_t zero = vdupq_n_s32(0); + int64x2_t rnding = vdupq_n_s64(1ll << (bit + 4 - 1)); + const int32x2_t mul = vdup_n_s32(1 << 4); + int32x4_t t; + int32x4_t s0, s1, s2, s3, s4, s5, s6, s7; + int32x4_t x0, x1, x2, x3; + int32x4_t u0, u1, u2, u3; + + x0 = in[0]; + x1 = in[1]; + x2 = in[2]; + x3 = in[3]; + + s0 = vmulq_n_s32(x0, sinpi[1]); + s1 = vmulq_n_s32(x0, sinpi[2]); + s2 = vmulq_n_s32(x1, sinpi[3]); + s3 = vmulq_n_s32(x2, sinpi[4]); + s4 = vmulq_n_s32(x2, sinpi[1]); + s5 = vmulq_n_s32(x3, sinpi[2]); + s6 = vmulq_n_s32(x3, sinpi[4]); + t = vsubq_s32(x0, x2); + s7 = vaddq_s32(t, x3); + + t = vaddq_s32(s0, s3); + s0 = vaddq_s32(t, s5); + t = vsubq_s32(s1, s4); + s1 = vsubq_s32(t, s6); + s3 = s2; + s2 = vmulq_n_s32(s7, sinpi[3]); + + u0 = vaddq_s32(s0, s3); + u1 = vaddq_s32(s1, s3); + u2 = s2; + t = vaddq_s32(s0, s1); + u3 = vsubq_s32(t, s3); + + // u0 + int32x4x2_t u0x; + u0x.val[0] = vreinterpretq_s32_s64( + vmull_s32(vmovn_s64(vreinterpretq_s64_s32(u0)), mul)); + u0x.val[0] = vreinterpretq_s32_s64( + vaddq_s64(vreinterpretq_s64_s32(u0x.val[0]), rnding)); + + u0 = vextq_s32(u0, zero, 1); + u0x.val[1] = vreinterpretq_s32_s64( + vmull_s32(vmovn_s64(vreinterpretq_s64_s32(u0)), mul)); + u0x.val[1] = vreinterpretq_s32_s64( + vaddq_s64(vreinterpretq_s64_s32(u0x.val[1]), rnding)); + + u0x.val[0] = vreinterpretq_s32_s16(vextq_s16( + vreinterpretq_s16_s32(u0x.val[0]), vreinterpretq_s16_s32(zero), 1)); + u0x.val[1] = vreinterpretq_s32_s16(vextq_s16( + vreinterpretq_s16_s32(u0x.val[1]), vreinterpretq_s16_s32(zero), 1)); + + u0x = vzipq_s32(u0x.val[0], u0x.val[1]); +#if AOM_ARCH_AARCH64 + u0 = vreinterpretq_s32_s64(vzip1q_s64(vreinterpretq_s64_s32(u0x.val[0]), + vreinterpretq_s64_s32(u0x.val[1]))); +#else + u0 = vcombine_s32(vget_low_s32(u0x.val[0]), vget_low_s32(u0x.val[1])); +#endif // AOM_ARCH_AARCH64 + // u1 + int32x4x2_t u1x; + u1x.val[0] = vreinterpretq_s32_s64( + vmull_s32(vmovn_s64(vreinterpretq_s64_s32(u1)), mul)); + u1x.val[0] = vreinterpretq_s32_s64( + vaddq_s64(vreinterpretq_s64_s32(u1x.val[0]), rnding)); + + u1 = vextq_s32(u1, zero, 1); + u1x.val[1] = vreinterpretq_s32_s64( + vmull_s32(vmovn_s64(vreinterpretq_s64_s32(u1)), mul)); + u1x.val[1] = vreinterpretq_s32_s64( + vaddq_s64(vreinterpretq_s64_s32(u1x.val[1]), rnding)); + + u1x.val[0] = vreinterpretq_s32_s16(vextq_s16( + vreinterpretq_s16_s32(u1x.val[0]), vreinterpretq_s16_s32(zero), 1)); + u1x.val[1] = vreinterpretq_s32_s16(vextq_s16( + vreinterpretq_s16_s32(u1x.val[1]), vreinterpretq_s16_s32(zero), 1)); + + u1x = vzipq_s32(u1x.val[0], u1x.val[1]); +#if AOM_ARCH_AARCH64 + u1 = vreinterpretq_s32_s64(vzip1q_s64(vreinterpretq_s64_s32(u1x.val[0]), + vreinterpretq_s64_s32(u1x.val[1]))); +#else + u1 = vcombine_s32(vget_low_s32(u1x.val[0]), vget_low_s32(u1x.val[1])); +#endif // AOM_ARCH_AARCH64 + + // u2 + int32x4x2_t u2x; + u2x.val[0] = vreinterpretq_s32_s64( + vmull_s32(vmovn_s64(vreinterpretq_s64_s32(u2)), mul)); + u2x.val[0] = vreinterpretq_s32_s64( + vaddq_s64(vreinterpretq_s64_s32(u2x.val[0]), rnding)); + + u2 = vextq_s32(u2, zero, 1); + u2x.val[1] = vreinterpretq_s32_s64( + vmull_s32(vmovn_s64(vreinterpretq_s64_s32(u2)), mul)); + u2x.val[1] = vreinterpretq_s32_s64( + vaddq_s64(vreinterpretq_s64_s32(u2x.val[1]), rnding)); + + u2x.val[0] = vreinterpretq_s32_s16(vextq_s16( + vreinterpretq_s16_s32(u2x.val[0]), vreinterpretq_s16_s32(zero), 1)); + u2x.val[1] = vreinterpretq_s32_s16(vextq_s16( + vreinterpretq_s16_s32(u2x.val[1]), vreinterpretq_s16_s32(zero), 1)); + + u2x = vzipq_s32(u2x.val[0], u2x.val[1]); +#if AOM_ARCH_AARCH64 + u2 = vreinterpretq_s32_s64(vzip1q_s64(vreinterpretq_s64_s32(u2x.val[0]), + vreinterpretq_s64_s32(u2x.val[1]))); +#else + u2 = vcombine_s32(vget_low_s32(u2x.val[0]), vget_low_s32(u2x.val[1])); +#endif // AOM_ARCH_AARCH64 + + // u3 + int32x4x2_t u3x; + u3x.val[0] = vreinterpretq_s32_s64( + vmull_s32(vmovn_s64(vreinterpretq_s64_s32(u3)), mul)); + u3x.val[0] = vreinterpretq_s32_s64( + vaddq_s64(vreinterpretq_s64_s32(u3x.val[0]), rnding)); + + u3 = vextq_s32(u3, zero, 1); + u3x.val[1] = vreinterpretq_s32_s64( + vmull_s32(vmovn_s64(vreinterpretq_s64_s32(u3)), mul)); + u3x.val[1] = vreinterpretq_s32_s64( + vaddq_s64(vreinterpretq_s64_s32(u3x.val[1]), rnding)); + + u3x.val[0] = vreinterpretq_s32_s16(vextq_s16( + vreinterpretq_s16_s32(u3x.val[0]), vreinterpretq_s16_s32(zero), 1)); + u3x.val[1] = vreinterpretq_s32_s16(vextq_s16( + vreinterpretq_s16_s32(u3x.val[1]), vreinterpretq_s16_s32(zero), 1)); + + u3x = vzipq_s32(u3x.val[0], u3x.val[1]); +#if AOM_ARCH_AARCH64 + u3 = vreinterpretq_s32_s64(vzip1q_s64(vreinterpretq_s64_s32(u3x.val[0]), + vreinterpretq_s64_s32(u3x.val[1]))); +#else + u3 = vcombine_s32(vget_low_s32(u3x.val[0]), vget_low_s32(u3x.val[1])); +#endif // AOM_ARCH_AARCH64 + + out[0] = u0; + out[1] = u1; + out[2] = u2; + out[3] = u3; + + if (!do_cols) { + const int log_range = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + round_shift_4x4(out, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo, &clamp_hi, 4); + } +} + +static void write_buffer_4x4(int32x4_t *in, uint16_t *output, int stride, + int fliplr, int flipud, int shift, int bd) { + uint32x4_t u0, u1, u2, u3; + uint16x4_t v0, v1, v2, v3; + round_shift_4x4(in, shift); + + v0 = vld1_u16(output + 0 * stride); + v1 = vld1_u16(output + 1 * stride); + v2 = vld1_u16(output + 2 * stride); + v3 = vld1_u16(output + 3 * stride); + + if (fliplr) { + u0 = vrev64q_u32(vreinterpretq_u32_s32(in[0])); + in[0] = vreinterpretq_s32_u32(vextq_u32(u0, u0, 2)); + u0 = vrev64q_u32(vreinterpretq_u32_s32(in[1])); + in[1] = vreinterpretq_s32_u32(vextq_u32(u0, u0, 2)); + u0 = vrev64q_u32(vreinterpretq_u32_s32(in[2])); + in[2] = vreinterpretq_s32_u32(vextq_u32(u0, u0, 2)); + u0 = vrev64q_u32(vreinterpretq_u32_s32(in[3])); + in[3] = vreinterpretq_s32_u32(vextq_u32(u0, u0, 2)); + } + + if (flipud) { + u0 = vaddw_u16(vreinterpretq_u32_s32(in[3]), v0); + u1 = vaddw_u16(vreinterpretq_u32_s32(in[2]), v1); + u2 = vaddw_u16(vreinterpretq_u32_s32(in[1]), v2); + u3 = vaddw_u16(vreinterpretq_u32_s32(in[0]), v3); + } else { + u0 = vaddw_u16(vreinterpretq_u32_s32(in[0]), v0); + u1 = vaddw_u16(vreinterpretq_u32_s32(in[1]), v1); + u2 = vaddw_u16(vreinterpretq_u32_s32(in[2]), v2); + u3 = vaddw_u16(vreinterpretq_u32_s32(in[3]), v3); + } + + uint16x8_t u4 = vcombine_u16(vqmovn_u32(u0), vqmovn_u32(u1)); + uint16x8_t u5 = vcombine_u16(vqmovn_u32(u2), vqmovn_u32(u3)); + const uint16x8_t vmin = vdupq_n_u16(0); + const uint16x8_t vmax = vdupq_n_u16((1 << bd) - 1); + u4 = highbd_clamp_u16(&u4, &vmin, &vmax); + u5 = highbd_clamp_u16(&u5, &vmin, &vmax); + + vst1_u16(output + 0 * stride, vget_low_u16(u4)); + vst1_u16(output + 1 * stride, vget_high_u16(u4)); + vst1_u16(output + 2 * stride, vget_low_u16(u5)); + vst1_u16(output + 3 * stride, vget_high_u16(u5)); +} + +static void iidentity4_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + (void)bit; + int32x4_t zero = vdupq_n_s32(0); + int32x2_t fact = vdup_n_s32(NewSqrt2); + int32x4x2_t a0; + const int64x2_t rnding = vdupq_n_s64(1 << (NewSqrt2Bits - 1)); + + for (int i = 0; i < 4; i++) { + a0.val[0] = vreinterpretq_s32_s64( + vmlal_s32(rnding, vmovn_s64(vreinterpretq_s64_s32(in[i])), fact)); + a0.val[0] = vreinterpretq_s32_s64( + vshrq_n_s64(vreinterpretq_s64_s32(a0.val[0]), NewSqrt2Bits)); + a0.val[1] = vextq_s32(in[i], zero, 1); + a0.val[1] = vreinterpretq_s32_s64( + vmlal_s32(rnding, vmovn_s64(vreinterpretq_s64_s32(a0.val[1])), fact)); + a0.val[1] = vreinterpretq_s32_s64( + vshrq_n_s64(vreinterpretq_s64_s32(a0.val[1]), NewSqrt2Bits)); + + a0 = vzipq_s32(a0.val[0], a0.val[1]); +#if AOM_ARCH_AARCH64 + out[i] = vreinterpretq_s32_s64(vzip1q_s64( + vreinterpretq_s64_s32(a0.val[0]), vreinterpretq_s64_s32(a0.val[1]))); +#else + out[i] = vextq_s32(vextq_s32(a0.val[0], a0.val[0], 2), a0.val[1], 2); +#endif + } + if (!do_cols) { + const int log_range = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + round_shift_4x4(out, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo, &clamp_hi, 4); + } +} + +void av1_inv_txfm2d_add_4x4_neon(const int32_t *input, uint16_t *output, + int stride, TX_TYPE tx_type, int bd) { + int32x4_t in[4]; + + const int8_t *shift = av1_inv_txfm_shift_ls[TX_4X4]; + + switch (tx_type) { + case DCT_DCT: + load_buffer_4x4(input, in); + idct4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + idct4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 0, -shift[1], bd); + break; + case ADST_DCT: + load_buffer_4x4(input, in); + idct4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iadst4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 0, -shift[1], bd); + break; + case DCT_ADST: + load_buffer_4x4(input, in); + iadst4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + idct4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 0, -shift[1], bd); + break; + case ADST_ADST: + load_buffer_4x4(input, in); + iadst4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iadst4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 0, -shift[1], bd); + break; + case FLIPADST_DCT: + load_buffer_4x4(input, in); + idct4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iadst4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 1, -shift[1], bd); + break; + case DCT_FLIPADST: + load_buffer_4x4(input, in); + iadst4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + idct4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 1, 0, -shift[1], bd); + break; + case FLIPADST_FLIPADST: + load_buffer_4x4(input, in); + iadst4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iadst4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 1, 1, -shift[1], bd); + break; + case ADST_FLIPADST: + load_buffer_4x4(input, in); + iadst4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iadst4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 1, 0, -shift[1], bd); + break; + case FLIPADST_ADST: + load_buffer_4x4(input, in); + iadst4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iadst4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 1, -shift[1], bd); + break; + case IDTX: + load_buffer_4x4(input, in); + iidentity4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iidentity4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 0, -shift[1], bd); + break; + case V_DCT: + load_buffer_4x4(input, in); + iidentity4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + idct4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 0, -shift[1], bd); + break; + case H_DCT: + load_buffer_4x4(input, in); + idct4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iidentity4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 0, -shift[1], bd); + break; + case V_ADST: + load_buffer_4x4(input, in); + iidentity4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iadst4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 0, -shift[1], bd); + break; + case H_ADST: + load_buffer_4x4(input, in); + iadst4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iidentity4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 0, -shift[1], bd); + break; + case V_FLIPADST: + load_buffer_4x4(input, in); + iidentity4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iadst4x4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 0, 1, -shift[1], bd); + break; + case H_FLIPADST: + load_buffer_4x4(input, in); + iadst4x4_neon(in, in, INV_COS_BIT, 0, bd, 0); + transpose_4x4(in, in); + iidentity4_neon(in, in, INV_COS_BIT, 1, bd, 0); + write_buffer_4x4(in, output, stride, 1, 0, -shift[1], bd); + break; + default: assert(0); + } +} + +// 8x8 +static void load_buffer_8x8(const int32_t *coeff, int32x4_t *in) { + in[0] = vld1q_s32(coeff + 0); + in[1] = vld1q_s32(coeff + 4); + in[2] = vld1q_s32(coeff + 8); + in[3] = vld1q_s32(coeff + 12); + in[4] = vld1q_s32(coeff + 16); + in[5] = vld1q_s32(coeff + 20); + in[6] = vld1q_s32(coeff + 24); + in[7] = vld1q_s32(coeff + 28); + in[8] = vld1q_s32(coeff + 32); + in[9] = vld1q_s32(coeff + 36); + in[10] = vld1q_s32(coeff + 40); + in[11] = vld1q_s32(coeff + 44); + in[12] = vld1q_s32(coeff + 48); + in[13] = vld1q_s32(coeff + 52); + in[14] = vld1q_s32(coeff + 56); + in[15] = vld1q_s32(coeff + 60); +} + +static void idct8x8_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t u0, u1, u2, u3, u4, u5, u6, u7; + int32x4_t v0, v1, v2, v3, v4, v5, v6, v7; + int32x4_t x, y; + int col; + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + const int32x4_t v_bit = vdupq_n_s32(-bit); + // Note: + // Even column: 0, 2, ..., 14 + // Odd column: 1, 3, ..., 15 + // one even column plus one odd column constructs one row (8 coeffs) + // total we have 8 rows (8x8). + for (col = 0; col < 2; ++col) { + // stage 0 + // stage 1 + // stage 2 + u0 = in[0 * 2 + col]; + u1 = in[4 * 2 + col]; + u2 = in[2 * 2 + col]; + u3 = in[6 * 2 + col]; + + x = vmulq_n_s32(in[1 * 2 + col], cospi[56]); + u4 = vmlaq_n_s32(x, in[7 * 2 + col], -cospi[8]); + u4 = vaddq_s32(u4, rnding); + u4 = vshlq_s32(u4, v_bit); + + x = vmulq_n_s32(in[1 * 2 + col], cospi[8]); + u7 = vmlaq_n_s32(x, in[7 * 2 + col], cospi[56]); + u7 = vaddq_s32(u7, rnding); + u7 = vshlq_s32(u7, v_bit); + + x = vmulq_n_s32(in[5 * 2 + col], cospi[24]); + u5 = vmlaq_n_s32(x, in[3 * 2 + col], -cospi[40]); + u5 = vaddq_s32(u5, rnding); + u5 = vshlq_s32(u5, v_bit); + + x = vmulq_n_s32(in[5 * 2 + col], cospi[40]); + u6 = vmlaq_n_s32(x, in[3 * 2 + col], cospi[24]); + u6 = vaddq_s32(u6, rnding); + u6 = vshlq_s32(u6, v_bit); + + // stage 3 + x = vmulq_n_s32(u0, cospi[32]); + y = vmulq_n_s32(u1, cospi[32]); + v0 = vaddq_s32(x, y); + v0 = vaddq_s32(v0, rnding); + v0 = vshlq_s32(v0, v_bit); + + v1 = vsubq_s32(x, y); + v1 = vaddq_s32(v1, rnding); + v1 = vshlq_s32(v1, v_bit); + + x = vmulq_n_s32(u2, cospi[48]); + v2 = vmlaq_n_s32(x, u3, -cospi[16]); + v2 = vaddq_s32(v2, rnding); + v2 = vshlq_s32(v2, v_bit); + + x = vmulq_n_s32(u2, cospi[16]); + v3 = vmlaq_n_s32(x, u3, cospi[48]); + v3 = vaddq_s32(v3, rnding); + v3 = vshlq_s32(v3, v_bit); + + addsub_neon(u4, u5, &v4, &v5, &clamp_lo, &clamp_hi); + addsub_neon(u7, u6, &v7, &v6, &clamp_lo, &clamp_hi); + + // stage 4 + addsub_neon(v0, v3, &u0, &u3, &clamp_lo, &clamp_hi); + addsub_neon(v1, v2, &u1, &u2, &clamp_lo, &clamp_hi); + u4 = v4; + u7 = v7; + + x = vmulq_n_s32(v5, cospi[32]); + y = vmulq_n_s32(v6, cospi[32]); + u6 = vaddq_s32(y, x); + u6 = vaddq_s32(u6, rnding); + u6 = vshlq_s32(u6, v_bit); + + u5 = vsubq_s32(y, x); + u5 = vaddq_s32(u5, rnding); + u5 = vshlq_s32(u5, v_bit); + + // stage 5 + addsub_neon(u0, u7, out + 0 * 2 + col, out + 7 * 2 + col, &clamp_lo, + &clamp_hi); + addsub_neon(u1, u6, out + 1 * 2 + col, out + 6 * 2 + col, &clamp_lo, + &clamp_hi); + addsub_neon(u2, u5, out + 2 * 2 + col, out + 5 * 2 + col, &clamp_lo, + &clamp_hi); + addsub_neon(u3, u4, out + 3 * 2 + col, out + 4 * 2 + col, &clamp_lo, + &clamp_hi); + } + + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + round_shift_8x8(out, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo_out, &clamp_hi_out, 16); + } +} + +static void iadst8x8_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int32x4_t kZero = vdupq_n_s32(0); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t u[8], v[8], x; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + // stage 0-1-2 + // (1) + u[0] = vmlaq_n_s32(rnding, in[14], cospi[4]); + u[0] = vmlaq_n_s32(u[0], in[0], cospi[60]); + u[0] = vshlq_s32(u[0], v_bit); + + u[1] = vmlaq_n_s32(rnding, in[14], cospi[60]); + u[1] = vmlsq_n_s32(u[1], in[0], cospi[4]); + u[1] = vshlq_s32(u[1], v_bit); + + // (2) + u[2] = vmlaq_n_s32(rnding, in[10], cospi[20]); + u[2] = vmlaq_n_s32(u[2], in[4], cospi[44]); + u[2] = vshlq_s32(u[2], v_bit); + + u[3] = vmlaq_n_s32(rnding, in[10], cospi[44]); + u[3] = vmlsq_n_s32(u[3], in[4], cospi[20]); + u[3] = vshlq_s32(u[3], v_bit); + + // (3) + u[4] = vmlaq_n_s32(rnding, in[6], cospi[36]); + u[4] = vmlaq_n_s32(u[4], in[8], cospi[28]); + u[4] = vshlq_s32(u[4], v_bit); + + u[5] = vmlaq_n_s32(rnding, in[6], cospi[28]); + u[5] = vmlsq_n_s32(u[5], in[8], cospi[36]); + u[5] = vshlq_s32(u[5], v_bit); + + // (4) + u[6] = vmlaq_n_s32(rnding, in[2], cospi[52]); + u[6] = vmlaq_n_s32(u[6], in[12], cospi[12]); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vmlaq_n_s32(rnding, in[2], cospi[12]); + u[7] = vmlsq_n_s32(u[7], in[12], cospi[52]); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 3 + addsub_neon(u[0], u[4], &v[0], &v[4], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[5], &v[1], &v[5], &clamp_lo, &clamp_hi); + addsub_neon(u[2], u[6], &v[2], &v[6], &clamp_lo, &clamp_hi); + addsub_neon(u[3], u[7], &v[3], &v[7], &clamp_lo, &clamp_hi); + + // stage 4 + u[0] = v[0]; + u[1] = v[1]; + u[2] = v[2]; + u[3] = v[3]; + + u[4] = vmlaq_n_s32(rnding, v[4], cospi[16]); + u[4] = vmlaq_n_s32(u[4], v[5], cospi[48]); + u[4] = vshlq_s32(u[4], v_bit); + + u[5] = vmlaq_n_s32(rnding, v[4], cospi[48]); + u[5] = vmlsq_n_s32(u[5], v[5], cospi[16]); + u[5] = vshlq_s32(u[5], v_bit); + + u[6] = vmlaq_n_s32(rnding, v[7], cospi[16]); + u[6] = vmlsq_n_s32(u[6], v[6], cospi[48]); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vmlaq_n_s32(rnding, v[7], cospi[48]); + u[7] = vmlaq_n_s32(u[7], v[6], cospi[16]); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 5 + addsub_neon(u[0], u[2], &v[0], &v[2], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[3], &v[1], &v[3], &clamp_lo, &clamp_hi); + addsub_neon(u[4], u[6], &v[4], &v[6], &clamp_lo, &clamp_hi); + addsub_neon(u[5], u[7], &v[5], &v[7], &clamp_lo, &clamp_hi); + + // stage 6 + u[0] = v[0]; + u[1] = v[1]; + u[4] = v[4]; + u[5] = v[5]; + + v[0] = vmlaq_n_s32(rnding, v[2], cospi[32]); + x = vmulq_n_s32(v[3], cospi[32]); + u[2] = vaddq_s32(v[0], x); + u[2] = vshlq_s32(u[2], v_bit); + + u[3] = vsubq_s32(v[0], x); + u[3] = vshlq_s32(u[3], v_bit); + + v[0] = vmlaq_n_s32(rnding, v[6], cospi[32]); + x = vmulq_n_s32(v[7], cospi[32]); + u[6] = vaddq_s32(v[0], x); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vsubq_s32(v[0], x); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 7 + if (do_cols) { + out[0] = u[0]; + out[2] = vsubq_s32(kZero, u[4]); + out[4] = u[6]; + out[6] = vsubq_s32(kZero, u[2]); + out[8] = u[3]; + out[10] = vsubq_s32(kZero, u[7]); + out[12] = u[5]; + out[14] = vsubq_s32(kZero, u[1]); + } else { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + const int32x4_t v_shift = vdupq_n_s32(-out_shift); + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + neg_shift_neon(&u[0], &u[4], out + 0, out + 2, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[6], &u[2], out + 4, out + 6, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[3], &u[7], out + 8, out + 10, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&u[5], &u[1], out + 12, out + 14, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + } + + // Odd 8 points: 1, 3, ..., 15 + // stage 0 + // stage 1 + // stage 2 + // (1) + u[0] = vmlaq_n_s32(rnding, in[15], cospi[4]); + u[0] = vmlaq_n_s32(u[0], in[1], cospi[60]); + u[0] = vshlq_s32(u[0], v_bit); + + u[1] = vmlaq_n_s32(rnding, in[15], cospi[60]); + u[1] = vmlsq_n_s32(u[1], in[1], cospi[4]); + u[1] = vshlq_s32(u[1], v_bit); + + // (2) + u[2] = vmlaq_n_s32(rnding, in[11], cospi[20]); + u[2] = vmlaq_n_s32(u[2], in[5], cospi[44]); + u[2] = vshlq_s32(u[2], v_bit); + + u[3] = vmlaq_n_s32(rnding, in[11], cospi[44]); + u[3] = vmlsq_n_s32(u[3], in[5], cospi[20]); + u[3] = vshlq_s32(u[3], v_bit); + + // (3) + u[4] = vmlaq_n_s32(rnding, in[7], cospi[36]); + u[4] = vmlaq_n_s32(u[4], in[9], cospi[28]); + u[4] = vshlq_s32(u[4], v_bit); + + u[5] = vmlaq_n_s32(rnding, in[7], cospi[28]); + u[5] = vmlsq_n_s32(u[5], in[9], cospi[36]); + u[5] = vshlq_s32(u[5], v_bit); + + // (4) + u[6] = vmlaq_n_s32(rnding, in[3], cospi[52]); + u[6] = vmlaq_n_s32(u[6], in[13], cospi[12]); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vmlaq_n_s32(rnding, in[3], cospi[12]); + u[7] = vmlsq_n_s32(u[7], in[13], cospi[52]); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 3 + addsub_neon(u[0], u[4], &v[0], &v[4], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[5], &v[1], &v[5], &clamp_lo, &clamp_hi); + addsub_neon(u[2], u[6], &v[2], &v[6], &clamp_lo, &clamp_hi); + addsub_neon(u[3], u[7], &v[3], &v[7], &clamp_lo, &clamp_hi); + + // stage 4 + u[0] = v[0]; + u[1] = v[1]; + u[2] = v[2]; + u[3] = v[3]; + + u[4] = vmlaq_n_s32(rnding, v[4], cospi[16]); + u[4] = vmlaq_n_s32(u[4], v[5], cospi[48]); + u[4] = vshlq_s32(u[4], v_bit); + + u[5] = vmlaq_n_s32(rnding, v[4], cospi[48]); + u[5] = vmlsq_n_s32(u[5], v[5], cospi[16]); + u[5] = vshlq_s32(u[5], v_bit); + + u[6] = vmlaq_n_s32(rnding, v[7], cospi[16]); + u[6] = vmlsq_n_s32(u[6], v[6], cospi[48]); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vmlaq_n_s32(rnding, v[6], cospi[16]); + u[7] = vmlaq_n_s32(u[7], v[7], cospi[48]); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 5 + addsub_neon(u[0], u[2], &v[0], &v[2], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[3], &v[1], &v[3], &clamp_lo, &clamp_hi); + addsub_neon(u[4], u[6], &v[4], &v[6], &clamp_lo, &clamp_hi); + addsub_neon(u[5], u[7], &v[5], &v[7], &clamp_lo, &clamp_hi); + + // stage 6 + u[0] = v[0]; + u[1] = v[1]; + u[4] = v[4]; + u[5] = v[5]; + + v[0] = vmlaq_n_s32(rnding, v[2], cospi[32]); + x = vmulq_n_s32(v[3], cospi[32]); + u[2] = vaddq_s32(v[0], x); + u[2] = vshlq_s32(u[2], v_bit); + + u[3] = vsubq_s32(v[0], x); + u[3] = vshlq_s32(u[3], v_bit); + + v[0] = vmlaq_n_s32(rnding, v[6], cospi[32]); + x = vmulq_n_s32(v[7], cospi[32]); + u[6] = vaddq_s32(v[0], x); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vsubq_s32(v[0], x); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 7 + if (do_cols) { + out[1] = u[0]; + out[3] = vsubq_s32(kZero, u[4]); + out[5] = u[6]; + out[7] = vsubq_s32(kZero, u[2]); + out[9] = u[3]; + out[11] = vsubq_s32(kZero, u[7]); + out[13] = u[5]; + out[15] = vsubq_s32(kZero, u[1]); + } else { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + const int32x4_t v_shift = vdupq_n_s32(-out_shift); + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + neg_shift_neon(&u[0], &u[4], out + 1, out + 3, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[6], &u[2], out + 5, out + 7, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[3], &u[7], out + 9, out + 11, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&u[5], &u[1], out + 13, out + 15, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + } +} + +static void iidentity8_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + (void)bit; + out[0] = vaddq_s32(in[0], in[0]); + out[1] = vaddq_s32(in[1], in[1]); + out[2] = vaddq_s32(in[2], in[2]); + out[3] = vaddq_s32(in[3], in[3]); + out[4] = vaddq_s32(in[4], in[4]); + out[5] = vaddq_s32(in[5], in[5]); + out[6] = vaddq_s32(in[6], in[6]); + out[7] = vaddq_s32(in[7], in[7]); + + if (!do_cols) { + const int log_range = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + round_shift_4x4(out, out_shift); + round_shift_4x4(out + 4, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo, &clamp_hi, 8); + } +} + +static uint16x8_t get_recon_8x8(const uint16x8_t pred, int32x4_t res_lo, + int32x4_t res_hi, int fliplr, int bd) { + uint16x8x2_t x; + + if (fliplr) { + res_lo = vrev64q_s32(res_lo); + res_lo = vextq_s32(res_lo, res_lo, 2); + res_hi = vrev64q_s32(res_hi); + res_hi = vextq_s32(res_hi, res_hi, 2); + x.val[0] = vreinterpretq_u16_s32( + vaddw_s16(res_hi, vreinterpret_s16_u16(vget_low_u16(pred)))); + x.val[1] = vreinterpretq_u16_s32( + vaddw_s16(res_lo, vreinterpret_s16_u16(vget_high_u16(pred)))); + + } else { + x.val[0] = vreinterpretq_u16_s32( + vaddw_s16(res_lo, vreinterpret_s16_u16(vget_low_u16(pred)))); + x.val[1] = vreinterpretq_u16_s32( + vaddw_s16(res_hi, vreinterpret_s16_u16(vget_high_u16(pred)))); + } + + uint16x8_t x2 = vcombine_u16(vqmovn_u32(vreinterpretq_u32_u16(x.val[0])), + vqmovn_u32(vreinterpretq_u32_u16(x.val[1]))); + const uint16x8_t vmin = vdupq_n_u16(0); + const uint16x8_t vmax = vdupq_n_u16((1 << bd) - 1); + return highbd_clamp_u16(&x2, &vmin, &vmax); +} + +static void write_buffer_8x8(int32x4_t *in, uint16_t *output, int stride, + int fliplr, int flipud, int shift, int bd) { + uint16x8_t u0, u1, u2, u3, u4, u5, u6, u7; + uint16x8_t v0, v1, v2, v3, v4, v5, v6, v7; + round_shift_8x8(in, shift); + + v0 = vld1q_u16(output + 0 * stride); + v1 = vld1q_u16(output + 1 * stride); + v2 = vld1q_u16(output + 2 * stride); + v3 = vld1q_u16(output + 3 * stride); + v4 = vld1q_u16(output + 4 * stride); + v5 = vld1q_u16(output + 5 * stride); + v6 = vld1q_u16(output + 6 * stride); + v7 = vld1q_u16(output + 7 * stride); + + if (flipud) { + u0 = get_recon_8x8(v0, in[14], in[15], fliplr, bd); + u1 = get_recon_8x8(v1, in[12], in[13], fliplr, bd); + u2 = get_recon_8x8(v2, in[10], in[11], fliplr, bd); + u3 = get_recon_8x8(v3, in[8], in[9], fliplr, bd); + u4 = get_recon_8x8(v4, in[6], in[7], fliplr, bd); + u5 = get_recon_8x8(v5, in[4], in[5], fliplr, bd); + u6 = get_recon_8x8(v6, in[2], in[3], fliplr, bd); + u7 = get_recon_8x8(v7, in[0], in[1], fliplr, bd); + } else { + u0 = get_recon_8x8(v0, in[0], in[1], fliplr, bd); + u1 = get_recon_8x8(v1, in[2], in[3], fliplr, bd); + u2 = get_recon_8x8(v2, in[4], in[5], fliplr, bd); + u3 = get_recon_8x8(v3, in[6], in[7], fliplr, bd); + u4 = get_recon_8x8(v4, in[8], in[9], fliplr, bd); + u5 = get_recon_8x8(v5, in[10], in[11], fliplr, bd); + u6 = get_recon_8x8(v6, in[12], in[13], fliplr, bd); + u7 = get_recon_8x8(v7, in[14], in[15], fliplr, bd); + } + + vst1q_u16(output + 0 * stride, u0); + vst1q_u16(output + 1 * stride, u1); + vst1q_u16(output + 2 * stride, u2); + vst1q_u16(output + 3 * stride, u3); + vst1q_u16(output + 4 * stride, u4); + vst1q_u16(output + 5 * stride, u5); + vst1q_u16(output + 6 * stride, u6); + vst1q_u16(output + 7 * stride, u7); +} + +void av1_inv_txfm2d_add_8x8_neon(const int32_t *input, uint16_t *output, + int stride, TX_TYPE tx_type, int bd) { + int32x4_t in[16], out[16]; + const int8_t *shift = av1_inv_txfm_shift_ls[TX_8X8]; + + switch (tx_type) { + case DCT_DCT: + load_buffer_8x8(input, in); + idct8x8_neon(in, out, INV_COS_BIT, 0, bd, -shift[0]); + transpose_8x8(out, in); + idct8x8_neon(in, out, INV_COS_BIT, 1, bd, 0); + write_buffer_8x8(out, output, stride, 0, 0, -shift[1], bd); + break; + case DCT_ADST: + load_buffer_8x8(input, in); + iadst8x8_neon(in, out, INV_COS_BIT, 0, bd, -shift[0]); + transpose_8x8(out, in); + idct8x8_neon(in, out, INV_COS_BIT, 1, bd, 0); + write_buffer_8x8(out, output, stride, 0, 0, -shift[1], bd); + break; + case ADST_DCT: + load_buffer_8x8(input, in); + idct8x8_neon(in, out, INV_COS_BIT, 0, bd, -shift[0]); + transpose_8x8(out, in); + iadst8x8_neon(in, out, INV_COS_BIT, 1, bd, 0); + write_buffer_8x8(out, output, stride, 0, 0, -shift[1], bd); + break; + case ADST_ADST: + load_buffer_8x8(input, in); + iadst8x8_neon(in, out, INV_COS_BIT, 0, bd, -shift[0]); + transpose_8x8(out, in); + iadst8x8_neon(in, out, INV_COS_BIT, 1, bd, 0); + write_buffer_8x8(out, output, stride, 0, 0, -shift[1], bd); + break; + case FLIPADST_DCT: + load_buffer_8x8(input, in); + idct8x8_neon(in, out, INV_COS_BIT, 0, bd, -shift[0]); + transpose_8x8(out, in); + iadst8x8_neon(in, out, INV_COS_BIT, 1, bd, 0); + write_buffer_8x8(out, output, stride, 0, 1, -shift[1], bd); + break; + case DCT_FLIPADST: + load_buffer_8x8(input, in); + iadst8x8_neon(in, out, INV_COS_BIT, 0, bd, -shift[0]); + transpose_8x8(out, in); + idct8x8_neon(in, out, INV_COS_BIT, 1, bd, 0); + write_buffer_8x8(out, output, stride, 1, 0, -shift[1], bd); + break; + case ADST_FLIPADST: + load_buffer_8x8(input, in); + iadst8x8_neon(in, out, INV_COS_BIT, 0, bd, -shift[0]); + transpose_8x8(out, in); + iadst8x8_neon(in, out, INV_COS_BIT, 1, bd, 0); + write_buffer_8x8(out, output, stride, 1, 0, -shift[1], bd); + break; + case FLIPADST_FLIPADST: + load_buffer_8x8(input, in); + iadst8x8_neon(in, out, INV_COS_BIT, 0, bd, -shift[0]); + transpose_8x8(out, in); + iadst8x8_neon(in, out, INV_COS_BIT, 1, bd, 0); + write_buffer_8x8(out, output, stride, 1, 1, -shift[1], bd); + break; + case FLIPADST_ADST: + load_buffer_8x8(input, in); + iadst8x8_neon(in, out, INV_COS_BIT, 0, bd, -shift[0]); + transpose_8x8(out, in); + iadst8x8_neon(in, out, INV_COS_BIT, 1, bd, 0); + write_buffer_8x8(out, output, stride, 0, 1, -shift[1], bd); + break; + default: assert(0); + } +} + +static void idct8x8_low1_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t x; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + // stage 0-1-2-3 + x = vmulq_n_s32(in[0], cospi[32]); + x = vaddq_s32(vshlq_s32(x, v_bit), rnding); + + // stage 4-5 + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + clamp_lo = vdupq_n_s32(-(1 << (log_range_out - 1))); + clamp_hi = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + x = vaddq_s32(x, offset); + x = vshlq_s32(x, vdupq_n_s32(-out_shift)); + } + + x = vmaxq_s32(x, clamp_lo); + x = vminq_s32(x, clamp_hi); + out[0] = x; + out[1] = x; + out[2] = x; + out[3] = x; + out[4] = x; + out[5] = x; + out[6] = x; + out[7] = x; +} + +static void idct8x8_new_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t u0, u1, u2, u3, u4, u5, u6, u7; + int32x4_t v0, v1, v2, v3, v4, v5, v6, v7; + int32x4_t x, y; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + + // stage 0 + // stage 1 + // stage 2 + u0 = in[0]; + u1 = in[4]; + u2 = in[2]; + u3 = in[6]; + + x = vmlaq_n_s32(rnding, in[1], cospi[56]); + u4 = vmlaq_n_s32(x, in[7], -cospi[8]); + u4 = vshlq_s32(u4, v_bit); + + x = vmlaq_n_s32(rnding, in[1], cospi[8]); + u7 = vmlaq_n_s32(x, in[7], cospi[56]); + u7 = vshlq_s32(u7, v_bit); + + x = vmlaq_n_s32(rnding, in[5], cospi[24]); + u5 = vmlaq_n_s32(x, in[3], -cospi[40]); + u5 = vshlq_s32(u5, v_bit); + + x = vmlaq_n_s32(rnding, in[5], cospi[40]); + u6 = vmlaq_n_s32(x, in[3], cospi[24]); + u6 = vshlq_s32(u6, v_bit); + + // stage 3 + x = vmlaq_n_s32(rnding, u0, cospi[32]); + y = vmulq_n_s32(u1, cospi[32]); + v0 = vaddq_s32(x, y); + v0 = vshlq_s32(v0, v_bit); + + v1 = vsubq_s32(x, y); + v1 = vshlq_s32(v1, v_bit); + + x = vmlaq_n_s32(rnding, u2, cospi[48]); + v2 = vmlaq_n_s32(x, u3, -cospi[16]); + v2 = vshlq_s32(v2, v_bit); + + x = vmlaq_n_s32(rnding, u2, cospi[16]); + v3 = vmlaq_n_s32(x, u3, cospi[48]); + v3 = vshlq_s32(v3, v_bit); + + addsub_neon(u4, u5, &v4, &v5, &clamp_lo, &clamp_hi); + addsub_neon(u7, u6, &v7, &v6, &clamp_lo, &clamp_hi); + + // stage 4 + addsub_neon(v0, v3, &u0, &u3, &clamp_lo, &clamp_hi); + addsub_neon(v1, v2, &u1, &u2, &clamp_lo, &clamp_hi); + u4 = v4; + u7 = v7; + + x = vmulq_n_s32(v5, cospi[32]); + y = vmlaq_n_s32(rnding, v6, cospi[32]); + u6 = vaddq_s32(y, x); + u6 = vshlq_s32(u6, v_bit); + + u5 = vsubq_s32(y, x); + u5 = vshlq_s32(u5, v_bit); + + // stage 5 + addsub_neon(u0, u7, out + 0, out + 7, &clamp_lo, &clamp_hi); + addsub_neon(u1, u6, out + 1, out + 6, &clamp_lo, &clamp_hi); + addsub_neon(u2, u5, out + 2, out + 5, &clamp_lo, &clamp_hi); + addsub_neon(u3, u4, out + 3, out + 4, &clamp_lo, &clamp_hi); + + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + round_shift_4x4(out, out_shift); + round_shift_4x4(out + 4, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo_out, &clamp_hi_out, 8); + } +} + +static void iadst8x8_low1_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + int32x4_t u[8], x; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + // stage 0-2 + + u[0] = vmlaq_n_s32(rnding, in[0], cospi[60]); + u[0] = vshlq_s32(u[0], v_bit); + + u[1] = vmlaq_n_s32(rnding, in[0], cospi[4]); + u[1] = vshlq_s32(vnegq_s32(u[1]), v_bit); + + // stage 3-4 + int32x4_t temp1, temp2; + temp1 = vmlaq_n_s32(rnding, u[0], cospi[16]); + temp1 = vmlaq_n_s32(temp1, u[1], cospi[48]); + temp1 = vshlq_s32(temp1, v_bit); + u[4] = temp1; + + temp2 = vmlaq_n_s32(rnding, u[0], cospi[48]); + u[5] = vmlsq_n_s32(temp2, u[1], cospi[16]); + u[5] = vshlq_s32(u[5], v_bit); + + // stage 5-6 + temp1 = vmlaq_n_s32(rnding, u[0], cospi[32]); + x = vmulq_n_s32(u[1], cospi[32]); + u[2] = vaddq_s32(temp1, x); + u[2] = vshlq_s32(u[2], v_bit); + + u[3] = vsubq_s32(temp1, x); + u[3] = vshlq_s32(u[3], v_bit); + + temp1 = vmlaq_n_s32(rnding, u[4], cospi[32]); + x = vmulq_n_s32(u[5], cospi[32]); + u[6] = vaddq_s32(temp1, x); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vsubq_s32(temp1, x); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 7 + if (do_cols) { + out[0] = u[0]; + out[1] = vnegq_s32(u[4]); + out[2] = u[6]; + out[3] = vnegq_s32(u[2]); + out[4] = u[3]; + out[5] = vnegq_s32(u[7]); + out[6] = u[5]; + out[7] = vnegq_s32(u[1]); + } else { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + const int32x4_t v_shift = vdupq_n_s32(-out_shift); + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + neg_shift_neon(&u[0], &u[4], out + 0, out + 1, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[6], &u[2], out + 2, out + 3, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[3], &u[7], out + 4, out + 5, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[5], &u[1], out + 6, out + 7, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + } +} + +static void iadst8x8_new_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + // const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t u[8], v[8], x; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + // stage 0-2 + + u[0] = vmlaq_n_s32(rnding, in[7], cospi[4]); + u[0] = vmlaq_n_s32(u[0], in[0], cospi[60]); + u[0] = vshlq_s32(u[0], v_bit); + + u[1] = vmlaq_n_s32(rnding, in[7], cospi[60]); + u[1] = vmlsq_n_s32(u[1], in[0], cospi[4]); + u[1] = vshlq_s32(u[1], v_bit); + + // (2) + u[2] = vmlaq_n_s32(rnding, in[5], cospi[20]); + u[2] = vmlaq_n_s32(u[2], in[2], cospi[44]); + u[2] = vshlq_s32(u[2], v_bit); + + u[3] = vmlaq_n_s32(rnding, in[5], cospi[44]); + u[3] = vmlsq_n_s32(u[3], in[2], cospi[20]); + u[3] = vshlq_s32(u[3], v_bit); + + // (3) + u[4] = vmlaq_n_s32(rnding, in[3], cospi[36]); + u[4] = vmlaq_n_s32(u[4], in[4], cospi[28]); + u[4] = vshlq_s32(u[4], v_bit); + + u[5] = vmlaq_n_s32(rnding, in[3], cospi[28]); + u[5] = vmlsq_n_s32(u[5], in[4], cospi[36]); + u[5] = vshlq_s32(u[5], v_bit); + + // (4) + u[6] = vmulq_n_s32(in[1], cospi[52]); + u[6] = vmlaq_n_s32(u[6], in[6], cospi[12]); + u[6] = vaddq_s32(u[6], rnding); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vmulq_n_s32(in[1], cospi[12]); + u[7] = vmlsq_n_s32(u[7], in[6], cospi[52]); + u[7] = vaddq_s32(u[7], rnding); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 3 + addsub_neon(u[0], u[4], &v[0], &v[4], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[5], &v[1], &v[5], &clamp_lo, &clamp_hi); + addsub_neon(u[2], u[6], &v[2], &v[6], &clamp_lo, &clamp_hi); + addsub_neon(u[3], u[7], &v[3], &v[7], &clamp_lo, &clamp_hi); + + // stage 4 + u[0] = v[0]; + u[1] = v[1]; + u[2] = v[2]; + u[3] = v[3]; + + u[4] = vmlaq_n_s32(rnding, v[4], cospi[16]); + u[4] = vmlaq_n_s32(u[4], v[5], cospi[48]); + u[4] = vshlq_s32(u[4], v_bit); + + u[5] = vmlaq_n_s32(rnding, v[4], cospi[48]); + u[5] = vmlsq_n_s32(u[5], v[5], cospi[16]); + u[5] = vshlq_s32(u[5], v_bit); + + u[6] = vmlsq_n_s32(rnding, v[6], cospi[48]); + u[6] = vmlaq_n_s32(u[6], v[7], cospi[16]); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vmlaq_n_s32(rnding, v[6], cospi[16]); + u[7] = vmlaq_n_s32(u[7], v[7], cospi[48]); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 5 + addsub_neon(u[0], u[2], &v[0], &v[2], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[3], &v[1], &v[3], &clamp_lo, &clamp_hi); + addsub_neon(u[4], u[6], &v[4], &v[6], &clamp_lo, &clamp_hi); + addsub_neon(u[5], u[7], &v[5], &v[7], &clamp_lo, &clamp_hi); + + // stage 6 + u[0] = v[0]; + u[1] = v[1]; + u[4] = v[4]; + u[5] = v[5]; + + v[0] = vmlaq_n_s32(rnding, v[2], cospi[32]); + x = vmulq_n_s32(v[3], cospi[32]); + u[2] = vaddq_s32(v[0], x); + u[2] = vshlq_s32(u[2], v_bit); + + u[3] = vsubq_s32(v[0], x); + u[3] = vshlq_s32(u[3], v_bit); + + v[0] = vmlaq_n_s32(rnding, v[6], cospi[32]); + x = vmulq_n_s32(v[7], cospi[32]); + u[6] = vaddq_s32(v[0], x); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vsubq_s32(v[0], x); + u[7] = vshlq_s32(u[7], v_bit); + + // stage 7 + if (do_cols) { + out[0] = u[0]; + out[1] = vnegq_s32(u[4]); + out[2] = u[6]; + out[3] = vnegq_s32(u[2]); + out[4] = u[3]; + out[5] = vnegq_s32(u[7]); + out[6] = u[5]; + out[7] = vnegq_s32(u[1]); + } else { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + const int32x4_t v_shift = vdupq_n_s32(-out_shift); + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + neg_shift_neon(&u[0], &u[4], out + 0, out + 1, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[6], &u[2], out + 2, out + 3, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[3], &u[7], out + 4, out + 5, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[5], &u[1], out + 6, out + 7, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + } +} + +static void idct16x16_low1_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + // stage 0-4 + in[0] = vmlaq_n_s32(rnding, in[0], cospi[32]); + in[0] = vshlq_s32(in[0], v_bit); + + // stage 5-7 + if (!do_cols) { + log_range = AOMMAX(16, bd + 6); + clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + if (out_shift != 0) { + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + in[0] = vaddq_s32(in[0], offset); + in[0] = vshlq_s32(in[0], vdupq_n_s32(-out_shift)); + } + } + + in[0] = vmaxq_s32(in[0], clamp_lo); + in[0] = vminq_s32(in[0], clamp_hi); + out[0] = in[0]; + out[1] = in[0]; + out[2] = in[0]; + out[3] = in[0]; + out[4] = in[0]; + out[5] = in[0]; + out[6] = in[0]; + out[7] = in[0]; + out[8] = in[0]; + out[9] = in[0]; + out[10] = in[0]; + out[11] = in[0]; + out[12] = in[0]; + out[13] = in[0]; + out[14] = in[0]; + out[15] = in[0]; +} + +static void idct16x16_low8_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + int32x4_t u[16], x, y; + // stage 0-1 + u[0] = in[0]; + u[2] = in[4]; + u[4] = in[2]; + u[6] = in[6]; + u[8] = in[1]; + u[10] = in[5]; + u[12] = in[3]; + u[14] = in[7]; + + // stage 2 + u[15] = half_btf_0_neon_r(&cospi[4], &u[8], &v_bit, &rnding); + u[8] = half_btf_0_neon_r(&cospi[60], &u[8], &v_bit, &rnding); + + u[9] = half_btf_0_m_neon_r(&cospi[36], &u[14], &v_bit, &rnding); + u[14] = half_btf_0_neon_r(&cospi[28], &u[14], &v_bit, &rnding); + + u[13] = half_btf_0_neon_r(&cospi[20], &u[10], &v_bit, &rnding); + u[10] = half_btf_0_neon_r(&cospi[44], &u[10], &v_bit, &rnding); + + u[11] = half_btf_0_m_neon_r(&cospi[52], &u[12], &v_bit, &rnding); + u[12] = half_btf_0_neon_r(&cospi[12], &u[12], &v_bit, &rnding); + + // stage 3 + u[7] = half_btf_0_neon_r(&cospi[8], &u[4], &v_bit, &rnding); + u[4] = half_btf_0_neon_r(&cospi[56], &u[4], &v_bit, &rnding); + u[5] = half_btf_0_m_neon_r(&cospi[40], &u[6], &v_bit, &rnding); + u[6] = half_btf_0_neon_r(&cospi[24], &u[6], &v_bit, &rnding); + + addsub_neon(u[8], u[9], &u[8], &u[9], &clamp_lo, &clamp_hi); + addsub_neon(u[11], u[10], &u[11], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(u[12], u[13], &u[12], &u[13], &clamp_lo, &clamp_hi); + addsub_neon(u[15], u[14], &u[15], &u[14], &clamp_lo, &clamp_hi); + + // stage 4 + x = vmlaq_n_s32(rnding, u[0], cospi[32]); + u[0] = vshlq_s32(x, v_bit); + u[1] = u[0]; + + u[3] = half_btf_0_neon_r(&cospi[16], &u[2], &v_bit, &rnding); + u[2] = half_btf_0_neon_r(&cospi[48], &u[2], &v_bit, &rnding); + + addsub_neon(u[4], u[5], &u[4], &u[5], &clamp_lo, &clamp_hi); + addsub_neon(u[7], u[6], &u[7], &u[6], &clamp_lo, &clamp_hi); + + x = half_btf_neon_mode10_r(&cospi[16], &u[9], &cospi[48], &u[14], &v_bit, + &rnding); + u[14] = + half_btf_neon_r(&cospi[48], &u[9], &cospi[16], &u[14], &v_bit, &rnding); + u[9] = x; + y = half_btf_neon_mode11_r(&cospi[48], &u[10], &cospi[16], &u[13], &v_bit, + &rnding); + u[13] = half_btf_neon_mode10_r(&cospi[16], &u[10], &cospi[48], &u[13], &v_bit, + &rnding); + u[10] = y; + + // stage 5 + addsub_neon(u[0], u[3], &u[0], &u[3], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[2], &u[1], &u[2], &clamp_lo, &clamp_hi); + + x = vmulq_n_s32(u[5], cospi[32]); + y = vmlaq_n_s32(rnding, u[6], cospi[32]); + u[5] = vsubq_s32(y, x); + u[5] = vshlq_s32(u[5], v_bit); + + u[6] = vaddq_s32(y, x); + u[6] = vshlq_s32(u[6], v_bit); + + addsub_neon(u[8], u[11], &u[8], &u[11], &clamp_lo, &clamp_hi); + addsub_neon(u[9], u[10], &u[9], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(u[15], u[12], &u[15], &u[12], &clamp_lo, &clamp_hi); + addsub_neon(u[14], u[13], &u[14], &u[13], &clamp_lo, &clamp_hi); + + // stage 6 + addsub_neon(u[0], u[7], &u[0], &u[7], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[6], &u[1], &u[6], &clamp_lo, &clamp_hi); + addsub_neon(u[2], u[5], &u[2], &u[5], &clamp_lo, &clamp_hi); + addsub_neon(u[3], u[4], &u[3], &u[4], &clamp_lo, &clamp_hi); + + x = vmulq_n_s32(u[10], cospi[32]); + y = vmlaq_n_s32(rnding, u[13], cospi[32]); + u[10] = vsubq_s32(y, x); + u[10] = vshlq_s32(u[10], v_bit); + + u[13] = vaddq_s32(x, y); + u[13] = vshlq_s32(u[13], v_bit); + + x = vmulq_n_s32(u[11], cospi[32]); + y = vmlaq_n_s32(rnding, u[12], cospi[32]); + u[11] = vsubq_s32(y, x); + u[11] = vshlq_s32(u[11], v_bit); + + u[12] = vaddq_s32(x, y); + u[12] = vshlq_s32(u[12], v_bit); + // stage 7 + addsub_neon(u[0], u[15], out + 0, out + 15, &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[14], out + 1, out + 14, &clamp_lo, &clamp_hi); + addsub_neon(u[2], u[13], out + 2, out + 13, &clamp_lo, &clamp_hi); + addsub_neon(u[3], u[12], out + 3, out + 12, &clamp_lo, &clamp_hi); + addsub_neon(u[4], u[11], out + 4, out + 11, &clamp_lo, &clamp_hi); + addsub_neon(u[5], u[10], out + 5, out + 10, &clamp_lo, &clamp_hi); + addsub_neon(u[6], u[9], out + 6, out + 9, &clamp_lo, &clamp_hi); + addsub_neon(u[7], u[8], out + 7, out + 8, &clamp_lo, &clamp_hi); + + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + round_shift_8x8(out, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo_out, &clamp_hi_out, 16); + } +} + +static void iadst16x16_low1_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + int32x4_t v[16], x, y, temp1, temp2; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + // stage 0 + // stage 1 + // stage 2 + v[0] = vmlaq_n_s32(rnding, in[0], cospi[62]); + v[0] = vshlq_s32(v[0], v_bit); + + v[1] = vmlsq_n_s32(rnding, in[0], cospi[2]); + v[1] = vshlq_s32(v[1], v_bit); + + // stage 3 + v[8] = v[0]; + v[9] = v[1]; + + // stage 4 + temp1 = vmlaq_n_s32(rnding, v[8], cospi[8]); + temp1 = vmlaq_n_s32(temp1, v[9], cospi[56]); + temp1 = vshlq_s32(temp1, v_bit); + + temp2 = vmlaq_n_s32(rnding, v[8], cospi[56]); + temp2 = vmlsq_n_s32(temp2, v[9], cospi[8]); + temp2 = vshlq_s32(temp2, v_bit); + v[8] = temp1; + v[9] = temp2; + + // stage 5 + v[4] = v[0]; + v[5] = v[1]; + v[12] = v[8]; + v[13] = v[9]; + + // stage 6 + temp1 = vmlaq_n_s32(rnding, v[4], cospi[16]); + temp1 = vmlaq_n_s32(temp1, v[5], cospi[48]); + temp1 = vshlq_s32(temp1, v_bit); + + temp2 = vmlaq_n_s32(rnding, v[4], cospi[48]); + temp2 = vmlsq_n_s32(temp2, v[5], cospi[16]); + temp2 = vshlq_s32(temp2, v_bit); + v[4] = temp1; + v[5] = temp2; + + temp1 = vmlaq_n_s32(rnding, v[12], cospi[16]); + temp1 = vmlaq_n_s32(temp1, v[13], cospi[48]); + temp1 = vshlq_s32(temp1, v_bit); + + temp2 = vmlaq_n_s32(rnding, v[12], cospi[48]); + temp2 = vmlsq_n_s32(temp2, v[13], cospi[16]); + temp2 = vshlq_s32(temp2, v_bit); + v[12] = temp1; + v[13] = temp2; + + // stage 7 + v[2] = v[0]; + v[3] = v[1]; + v[6] = v[4]; + v[7] = v[5]; + v[10] = v[8]; + v[11] = v[9]; + v[14] = v[12]; + v[15] = v[13]; + + // stage 8 + y = vmlaq_n_s32(rnding, v[2], cospi[32]); + x = vmulq_n_s32(v[3], cospi[32]); + v[2] = vaddq_s32(y, x); + v[2] = vshlq_s32(v[2], v_bit); + + v[3] = vsubq_s32(y, x); + v[3] = vshlq_s32(v[3], v_bit); + + y = vmlaq_n_s32(rnding, v[6], cospi[32]); + x = vmulq_n_s32(v[7], cospi[32]); + v[6] = vaddq_s32(y, x); + v[6] = vshlq_s32(v[6], v_bit); + + v[7] = vsubq_s32(y, x); + v[7] = vshlq_s32(v[7], v_bit); + + y = vmlaq_n_s32(rnding, v[10], cospi[32]); + x = vmulq_n_s32(v[11], cospi[32]); + v[10] = vaddq_s32(y, x); + v[10] = vshlq_s32(v[10], v_bit); + + v[11] = vsubq_s32(y, x); + v[11] = vshlq_s32(v[11], v_bit); + + y = vmlaq_n_s32(rnding, v[14], cospi[32]); + x = vmulq_n_s32(v[15], cospi[32]); + v[14] = vaddq_s32(y, x); + v[14] = vshlq_s32(v[14], v_bit); + + v[15] = vsubq_s32(y, x); + v[15] = vshlq_s32(v[15], v_bit); + + // stage 9 + if (do_cols) { + out[0] = v[0]; + out[1] = vnegq_s32(v[8]); + out[2] = v[12]; + out[3] = vnegq_s32(v[4]); + out[4] = v[6]; + out[5] = vnegq_s32(v[14]); + out[6] = v[10]; + out[7] = vnegq_s32(v[2]); + out[8] = v[3]; + out[9] = vnegq_s32(v[11]); + out[10] = v[15]; + out[11] = vnegq_s32(v[7]); + out[12] = v[5]; + out[13] = vnegq_s32(v[13]); + out[14] = v[9]; + out[15] = vnegq_s32(v[1]); + } else { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + const int32x4_t v_shift = vdupq_n_s32(-out_shift); + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + neg_shift_neon(&v[0], &v[8], out + 0, out + 1, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&v[12], &v[4], out + 2, out + 3, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[6], &v[14], out + 4, out + 5, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[10], &v[2], out + 6, out + 7, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[3], &v[11], out + 8, out + 9, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[15], &v[7], out + 10, out + 11, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[5], &v[13], out + 12, out + 13, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[9], &v[1], out + 14, out + 15, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + } +} + +static void iadst16x16_low8_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t zero = vdupq_n_s32(0); + int32x4_t u[16], x, y; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + // stage 0-2 + u[0] = vmlaq_n_s32(rnding, in[0], cospi[62]); + u[0] = vshlq_s32(u[0], v_bit); + + u[1] = vmlsq_n_s32(rnding, in[0], cospi[2]); + u[1] = vshlq_s32(u[1], v_bit); + + u[2] = vmlaq_n_s32(rnding, in[2], cospi[54]); + u[2] = vshlq_s32(u[2], v_bit); + + u[3] = vmlsq_n_s32(rnding, in[2], cospi[10]); + u[3] = vshlq_s32(u[3], v_bit); + + u[4] = vmlaq_n_s32(rnding, in[4], cospi[46]); + u[4] = vshlq_s32(u[4], v_bit); + + u[5] = vmlsq_n_s32(rnding, in[4], cospi[18]); + u[5] = vshlq_s32(u[5], v_bit); + + u[6] = vmlaq_n_s32(rnding, in[6], cospi[38]); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vmlsq_n_s32(rnding, in[6], cospi[26]); + u[7] = vshlq_s32(u[7], v_bit); + + u[8] = vmlaq_n_s32(rnding, in[7], cospi[34]); + u[8] = vshlq_s32(u[8], v_bit); + + u[9] = vmlaq_n_s32(rnding, in[7], cospi[30]); + u[9] = vshlq_s32(u[9], v_bit); + + u[10] = vmlaq_n_s32(rnding, in[5], cospi[42]); + u[10] = vshlq_s32(u[10], v_bit); + + u[11] = vmlaq_n_s32(rnding, in[5], cospi[22]); + u[11] = vshlq_s32(u[11], v_bit); + + u[12] = vmlaq_n_s32(rnding, in[3], cospi[50]); + u[12] = vshlq_s32(u[12], v_bit); + + u[13] = vmlaq_n_s32(rnding, in[3], cospi[14]); + u[13] = vshlq_s32(u[13], v_bit); + + u[14] = vmlaq_n_s32(rnding, in[1], cospi[58]); + u[14] = vshlq_s32(u[14], v_bit); + + u[15] = vmlaq_n_s32(rnding, in[1], cospi[6]); + u[15] = vshlq_s32(u[15], v_bit); + + // stage 3 + addsub_neon(u[0], u[8], &u[0], &u[8], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[9], &u[1], &u[9], &clamp_lo, &clamp_hi); + addsub_neon(u[2], u[10], &u[2], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(u[3], u[11], &u[3], &u[11], &clamp_lo, &clamp_hi); + addsub_neon(u[4], u[12], &u[4], &u[12], &clamp_lo, &clamp_hi); + addsub_neon(u[5], u[13], &u[5], &u[13], &clamp_lo, &clamp_hi); + addsub_neon(u[6], u[14], &u[6], &u[14], &clamp_lo, &clamp_hi); + addsub_neon(u[7], u[15], &u[7], &u[15], &clamp_lo, &clamp_hi); + + // stage 4 + y = vmlaq_n_s32(rnding, u[8], cospi[56]); + u[8] = vmlaq_n_s32(rnding, u[8], cospi[8]); + u[8] = vmlaq_n_s32(u[8], u[9], cospi[56]); + u[8] = vshlq_s32(u[8], v_bit); + + u[9] = vmlsq_n_s32(y, u[9], cospi[8]); + u[9] = vshlq_s32(u[9], v_bit); + + y = vmlaq_n_s32(rnding, u[10], cospi[24]); + u[10] = vmlaq_n_s32(rnding, u[10], cospi[40]); + u[10] = vmlaq_n_s32(u[10], u[11], cospi[24]); + u[10] = vshlq_s32(u[10], v_bit); + + u[11] = vmlsq_n_s32(y, u[11], cospi[40]); + u[11] = vshlq_s32(u[11], v_bit); + + y = vmlaq_n_s32(rnding, u[12], cospi[8]); + u[12] = vmlsq_n_s32(rnding, u[12], cospi[56]); + u[12] = vmlaq_n_s32(u[12], u[13], cospi[8]); + u[12] = vshlq_s32(u[12], v_bit); + + u[13] = vmlaq_n_s32(y, u[13], cospi[56]); + u[13] = vshlq_s32(u[13], v_bit); + + y = vmlaq_n_s32(rnding, u[14], cospi[40]); + u[14] = vmlsq_n_s32(rnding, u[14], cospi[24]); + u[14] = vmlaq_n_s32(u[14], u[15], cospi[40]); + u[14] = vshlq_s32(u[14], v_bit); + + u[15] = vmlaq_n_s32(y, u[15], cospi[24]); + u[15] = vshlq_s32(u[15], v_bit); + + // stage 5 + addsub_neon(u[0], u[4], &u[0], &u[4], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[5], &u[1], &u[5], &clamp_lo, &clamp_hi); + addsub_neon(u[2], u[6], &u[2], &u[6], &clamp_lo, &clamp_hi); + addsub_neon(u[3], u[7], &u[3], &u[7], &clamp_lo, &clamp_hi); + addsub_neon(u[8], u[12], &u[8], &u[12], &clamp_lo, &clamp_hi); + addsub_neon(u[9], u[13], &u[9], &u[13], &clamp_lo, &clamp_hi); + addsub_neon(u[10], u[14], &u[10], &u[14], &clamp_lo, &clamp_hi); + addsub_neon(u[11], u[15], &u[11], &u[15], &clamp_lo, &clamp_hi); + + // stage 6 + y = vmlaq_n_s32(rnding, u[4], cospi[48]); + u[4] = vmlaq_n_s32(rnding, u[4], cospi[16]); + u[4] = vmlaq_n_s32(u[4], u[5], cospi[48]); + u[4] = vshlq_s32(u[4], v_bit); + + u[5] = vmlsq_n_s32(y, u[5], cospi[16]); + u[5] = vshlq_s32(u[5], v_bit); + + y = vmlaq_n_s32(rnding, u[6], cospi[16]); + u[6] = vmlsq_n_s32(rnding, u[6], cospi[48]); + u[6] = vmlaq_n_s32(u[6], u[7], cospi[16]); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vmlaq_n_s32(y, u[7], cospi[48]); + u[7] = vshlq_s32(u[7], v_bit); + + y = vmlaq_n_s32(rnding, u[12], cospi[48]); + u[12] = vmulq_n_s32(u[12], cospi[16]); + u[12] = vmlaq_n_s32(u[12], u[13], cospi[48]); + u[12] = vshlq_s32(u[12], v_bit); + + u[13] = vmlsq_n_s32(y, u[13], cospi[16]); + u[13] = vshlq_s32(u[13], v_bit); + + y = vmlaq_n_s32(rnding, u[14], cospi[16]); + u[14] = vmlsq_n_s32(rnding, u[14], cospi[48]); + u[14] = vmlaq_n_s32(u[14], u[15], cospi[16]); + u[14] = vshlq_s32(u[14], v_bit); + + u[15] = vmlaq_n_s32(y, u[15], cospi[48]); + u[15] = vshlq_s32(u[15], v_bit); + + // stage 7 + addsub_neon(u[0], u[2], &u[0], &u[2], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[3], &u[1], &u[3], &clamp_lo, &clamp_hi); + addsub_neon(u[4], u[6], &u[4], &u[6], &clamp_lo, &clamp_hi); + addsub_neon(u[5], u[7], &u[5], &u[7], &clamp_lo, &clamp_hi); + addsub_neon(u[8], u[10], &u[8], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(u[9], u[11], &u[9], &u[11], &clamp_lo, &clamp_hi); + addsub_neon(u[12], u[14], &u[12], &u[14], &clamp_lo, &clamp_hi); + addsub_neon(u[13], u[15], &u[13], &u[15], &clamp_lo, &clamp_hi); + + // stage 8 + y = vmlaq_n_s32(rnding, u[2], cospi[32]); + x = vmulq_n_s32(u[3], cospi[32]); + u[2] = vaddq_s32(y, x); + u[2] = vshlq_s32(u[2], v_bit); + + u[3] = vsubq_s32(y, x); + u[3] = vshlq_s32(u[3], v_bit); + y = vmlaq_n_s32(rnding, u[6], cospi[32]); + x = vmulq_n_s32(u[7], cospi[32]); + u[6] = vaddq_s32(y, x); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = vsubq_s32(y, x); + u[7] = vshlq_s32(u[7], v_bit); + + y = vmlaq_n_s32(rnding, u[10], cospi[32]); + x = vmulq_n_s32(u[11], cospi[32]); + u[10] = vaddq_s32(y, x); + u[10] = vshlq_s32(u[10], v_bit); + + u[11] = vsubq_s32(y, x); + u[11] = vshlq_s32(u[11], v_bit); + + y = vmlaq_n_s32(rnding, u[14], cospi[32]); + x = vmulq_n_s32(u[15], cospi[32]); + u[14] = vaddq_s32(y, x); + u[14] = vshlq_s32(u[14], v_bit); + + u[15] = vsubq_s32(y, x); + u[15] = vshlq_s32(u[15], v_bit); + + // stage 9 + if (do_cols) { + out[0] = u[0]; + out[1] = vsubq_s32(zero, u[8]); + out[2] = u[12]; + out[3] = vsubq_s32(zero, u[4]); + out[4] = u[6]; + out[5] = vsubq_s32(zero, u[14]); + out[6] = u[10]; + out[7] = vsubq_s32(zero, u[2]); + out[8] = u[3]; + out[9] = vsubq_s32(zero, u[11]); + out[10] = u[15]; + out[11] = vsubq_s32(zero, u[7]); + out[12] = u[5]; + out[13] = vsubq_s32(zero, u[13]); + out[14] = u[9]; + out[15] = vsubq_s32(zero, u[1]); + } else { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + const int32x4_t v_shift = vdupq_n_s32(-out_shift); + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + neg_shift_neon(&u[0], &u[8], out + 0, out + 1, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&u[12], &u[4], out + 2, out + 3, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&u[6], &u[14], out + 4, out + 5, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&u[10], &u[2], out + 6, out + 7, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&u[3], &u[11], out + 8, out + 9, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&u[15], &u[7], out + 10, out + 11, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&u[5], &u[13], out + 12, out + 13, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&u[9], &u[1], out + 14, out + 15, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + } +} + +static void idct16x16_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t u[16], v[16], x, y; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + + { + // stage 0-1 + u[0] = in[0]; + u[1] = in[8]; + u[2] = in[4]; + u[3] = in[12]; + u[4] = in[2]; + u[5] = in[10]; + u[6] = in[6]; + u[7] = in[14]; + u[8] = in[1]; + u[9] = in[9]; + u[10] = in[5]; + u[11] = in[13]; + u[12] = in[3]; + u[13] = in[11]; + u[14] = in[7]; + u[15] = in[15]; + + // stage 2 + v[0] = u[0]; + v[1] = u[1]; + v[2] = u[2]; + v[3] = u[3]; + v[4] = u[4]; + v[5] = u[5]; + v[6] = u[6]; + v[7] = u[7]; + + v[8] = half_btf_neon_mode01_r(&cospi[60], &u[8], &cospi[4], &u[15], &v_bit, + &rnding); + v[9] = half_btf_neon_mode01_r(&cospi[28], &u[9], &cospi[36], &u[14], &v_bit, + &rnding); + v[10] = half_btf_neon_mode01_r(&cospi[44], &u[10], &cospi[20], &u[13], + &v_bit, &rnding); + v[11] = half_btf_neon_mode01_r(&cospi[12], &u[11], &cospi[52], &u[12], + &v_bit, &rnding); + v[12] = half_btf_neon_r(&cospi[52], &u[11], &cospi[12], &u[12], &v_bit, + &rnding); + v[13] = half_btf_neon_r(&cospi[20], &u[10], &cospi[44], &u[13], &v_bit, + &rnding); + v[14] = + half_btf_neon_r(&cospi[36], &u[9], &cospi[28], &u[14], &v_bit, &rnding); + v[15] = + half_btf_neon_r(&cospi[4], &u[8], &cospi[60], &u[15], &v_bit, &rnding); + + // stage 3 + u[0] = v[0]; + u[1] = v[1]; + u[2] = v[2]; + u[3] = v[3]; + u[4] = half_btf_neon_mode01_r(&cospi[56], &v[4], &cospi[8], &v[7], &v_bit, + &rnding); + u[5] = half_btf_neon_mode01_r(&cospi[24], &v[5], &cospi[40], &v[6], &v_bit, + &rnding); + u[6] = + half_btf_neon_r(&cospi[40], &v[5], &cospi[24], &v[6], &v_bit, &rnding); + u[7] = + half_btf_neon_r(&cospi[8], &v[4], &cospi[56], &v[7], &v_bit, &rnding); + addsub_neon(v[8], v[9], &u[8], &u[9], &clamp_lo, &clamp_hi); + addsub_neon(v[11], v[10], &u[11], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(v[12], v[13], &u[12], &u[13], &clamp_lo, &clamp_hi); + addsub_neon(v[15], v[14], &u[15], &u[14], &clamp_lo, &clamp_hi); + + // stage 4 + x = vmlaq_n_s32(rnding, u[0], cospi[32]); + y = vmulq_n_s32(u[1], cospi[32]); + v[0] = vaddq_s32(x, y); + v[0] = vshlq_s32(v[0], v_bit); + + v[1] = vsubq_s32(x, y); + v[1] = vshlq_s32(v[1], v_bit); + + v[2] = half_btf_neon_mode01_r(&cospi[48], &u[2], &cospi[16], &u[3], &v_bit, + &rnding); + v[3] = + half_btf_neon_r(&cospi[16], &u[2], &cospi[48], &u[3], &v_bit, &rnding); + addsub_neon(u[4], u[5], &v[4], &v[5], &clamp_lo, &clamp_hi); + addsub_neon(u[7], u[6], &v[7], &v[6], &clamp_lo, &clamp_hi); + v[8] = u[8]; + v[9] = half_btf_neon_mode10_r(&cospi[16], &u[9], &cospi[48], &u[14], &v_bit, + &rnding); + v[10] = half_btf_neon_mode11_r(&cospi[48], &u[10], &cospi[16], &u[13], + &v_bit, &rnding); + v[11] = u[11]; + v[12] = u[12]; + v[13] = half_btf_neon_mode10_r(&cospi[16], &u[10], &cospi[48], &u[13], + &v_bit, &rnding); + v[14] = + half_btf_neon_r(&cospi[48], &u[9], &cospi[16], &u[14], &v_bit, &rnding); + v[15] = u[15]; + + // stage 5 + addsub_neon(v[0], v[3], &u[0], &u[3], &clamp_lo, &clamp_hi); + addsub_neon(v[1], v[2], &u[1], &u[2], &clamp_lo, &clamp_hi); + u[4] = v[4]; + + x = vmulq_n_s32(v[5], cospi[32]); + y = vmlaq_n_s32(rnding, v[6], cospi[32]); + u[5] = vsubq_s32(y, x); + u[5] = vshlq_s32(u[5], v_bit); + + u[6] = vaddq_s32(y, x); + u[6] = vshlq_s32(u[6], v_bit); + + u[7] = v[7]; + addsub_neon(v[8], v[11], &u[8], &u[11], &clamp_lo, &clamp_hi); + addsub_neon(v[9], v[10], &u[9], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(v[15], v[12], &u[15], &u[12], &clamp_lo, &clamp_hi); + addsub_neon(v[14], v[13], &u[14], &u[13], &clamp_lo, &clamp_hi); + + // stage 6 + addsub_neon(u[0], u[7], &v[0], &v[7], &clamp_lo, &clamp_hi); + addsub_neon(u[1], u[6], &v[1], &v[6], &clamp_lo, &clamp_hi); + addsub_neon(u[2], u[5], &v[2], &v[5], &clamp_lo, &clamp_hi); + addsub_neon(u[3], u[4], &v[3], &v[4], &clamp_lo, &clamp_hi); + v[8] = u[8]; + v[9] = u[9]; + + x = vmulq_n_s32(u[10], cospi[32]); + y = vmlaq_n_s32(rnding, u[13], cospi[32]); + v[10] = vsubq_s32(y, x); + v[10] = vshlq_s32(v[10], v_bit); + + v[13] = vaddq_s32(x, y); + v[13] = vshlq_s32(v[13], v_bit); + + x = vmulq_n_s32(u[11], cospi[32]); + y = vmlaq_n_s32(rnding, u[12], cospi[32]); + v[11] = vsubq_s32(y, x); + v[11] = vshlq_s32(v[11], v_bit); + + v[12] = vaddq_s32(x, y); + v[12] = vshlq_s32(v[12], v_bit); + + v[14] = u[14]; + v[15] = u[15]; + + // stage 7 + addsub_neon(v[0], v[15], out + 0, out + 15, &clamp_lo, &clamp_hi); + addsub_neon(v[1], v[14], out + 1, out + 14, &clamp_lo, &clamp_hi); + addsub_neon(v[2], v[13], out + 2, out + 13, &clamp_lo, &clamp_hi); + addsub_neon(v[3], v[12], out + 3, out + 12, &clamp_lo, &clamp_hi); + addsub_neon(v[4], v[11], out + 4, out + 11, &clamp_lo, &clamp_hi); + addsub_neon(v[5], v[10], out + 5, out + 10, &clamp_lo, &clamp_hi); + addsub_neon(v[6], v[9], out + 6, out + 9, &clamp_lo, &clamp_hi); + addsub_neon(v[7], v[8], out + 7, out + 8, &clamp_lo, &clamp_hi); + + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = + vdupq_n_s32((1 << (log_range_out - 1)) - 1); + round_shift_8x8(out, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo_out, &clamp_hi_out, 16); + } + } +} + +static void iadst16x16_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + const int32x4_t zero = vdupq_n_s32(0); + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + int32x4_t u[16], v[16], x, y; + // Calculate the column 0, 1, 2, 3 + // stage 0 + // stage 1 + // stage 2 + v[0] = vmlaq_n_s32(rnding, in[15], cospi[2]); + v[0] = vmlaq_n_s32(v[0], in[0], cospi[62]); + v[0] = vshlq_s32(v[0], v_bit); + + v[1] = vmlaq_n_s32(rnding, in[15], cospi[62]); + v[1] = vmlsq_n_s32(v[1], in[0], cospi[2]); + v[1] = vshlq_s32(v[1], v_bit); + + v[2] = vmlaq_n_s32(rnding, in[13], cospi[10]); + v[2] = vmlaq_n_s32(v[2], in[2], cospi[54]); + v[2] = vshlq_s32(v[2], v_bit); + + v[3] = vmlaq_n_s32(rnding, in[13], cospi[54]); + v[3] = vmlsq_n_s32(v[3], in[2], cospi[10]); + v[3] = vshlq_s32(v[3], v_bit); + + v[4] = vmlaq_n_s32(rnding, in[11], cospi[18]); + v[4] = vmlaq_n_s32(v[4], in[4], cospi[46]); + v[4] = vshlq_s32(v[4], v_bit); + + v[5] = vmlaq_n_s32(rnding, in[11], cospi[46]); + v[5] = vmlsq_n_s32(v[5], in[4], cospi[18]); + v[5] = vshlq_s32(v[5], v_bit); + + v[6] = vmlaq_n_s32(rnding, in[9], cospi[26]); + v[6] = vmlaq_n_s32(v[6], in[6], cospi[38]); + v[6] = vshlq_s32(v[6], v_bit); + + v[7] = vmlaq_n_s32(rnding, in[9], cospi[38]); + v[7] = vmlsq_n_s32(v[7], in[6], cospi[26]); + v[7] = vshlq_s32(v[7], v_bit); + + v[8] = vmlaq_n_s32(rnding, in[7], cospi[34]); + v[8] = vmlaq_n_s32(v[8], in[8], cospi[30]); + v[8] = vshlq_s32(v[8], v_bit); + + v[9] = vmlaq_n_s32(rnding, in[7], cospi[30]); + v[9] = vmlsq_n_s32(v[9], in[8], cospi[34]); + v[9] = vshlq_s32(v[9], v_bit); + + v[10] = vmlaq_n_s32(rnding, in[5], cospi[42]); + v[10] = vmlaq_n_s32(v[10], in[10], cospi[22]); + v[10] = vshlq_s32(v[10], v_bit); + + v[11] = vmlaq_n_s32(rnding, in[5], cospi[22]); + v[11] = vmlsq_n_s32(v[11], in[10], cospi[42]); + v[11] = vshlq_s32(v[11], v_bit); + + v[12] = vmlaq_n_s32(rnding, in[3], cospi[50]); + v[12] = vmlaq_n_s32(v[12], in[12], cospi[14]); + v[12] = vshlq_s32(v[12], v_bit); + + v[13] = vmlaq_n_s32(rnding, in[3], cospi[14]); + v[13] = vmlsq_n_s32(v[13], in[12], cospi[50]); + v[13] = vshlq_s32(v[13], v_bit); + + v[14] = vmlaq_n_s32(rnding, in[1], cospi[58]); + v[14] = vmlaq_n_s32(v[14], in[14], cospi[6]); + v[14] = vshlq_s32(v[14], v_bit); + + v[15] = vmlaq_n_s32(rnding, in[1], cospi[6]); + v[15] = vmlsq_n_s32(v[15], in[14], cospi[58]); + v[15] = vshlq_s32(v[15], v_bit); + + // stage 3 + addsub_neon(v[0], v[8], &u[0], &u[8], &clamp_lo, &clamp_hi); + addsub_neon(v[1], v[9], &u[1], &u[9], &clamp_lo, &clamp_hi); + addsub_neon(v[2], v[10], &u[2], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(v[3], v[11], &u[3], &u[11], &clamp_lo, &clamp_hi); + addsub_neon(v[4], v[12], &u[4], &u[12], &clamp_lo, &clamp_hi); + addsub_neon(v[5], v[13], &u[5], &u[13], &clamp_lo, &clamp_hi); + addsub_neon(v[6], v[14], &u[6], &u[14], &clamp_lo, &clamp_hi); + addsub_neon(v[7], v[15], &u[7], &u[15], &clamp_lo, &clamp_hi); + + // stage 4 + v[0] = u[0]; + v[1] = u[1]; + v[2] = u[2]; + v[3] = u[3]; + v[4] = u[4]; + v[5] = u[5]; + v[6] = u[6]; + v[7] = u[7]; + + v[8] = vmlaq_n_s32(rnding, u[8], cospi[8]); + v[8] = vmlaq_n_s32(v[8], u[9], cospi[56]); + v[8] = vshlq_s32(v[8], v_bit); + + v[9] = vmlaq_n_s32(rnding, u[8], cospi[56]); + v[9] = vmlsq_n_s32(v[9], u[9], cospi[8]); + v[9] = vshlq_s32(v[9], v_bit); + + v[10] = vmlaq_n_s32(rnding, u[10], cospi[40]); + v[10] = vmlaq_n_s32(v[10], u[11], cospi[24]); + v[10] = vshlq_s32(v[10], v_bit); + + v[11] = vmlaq_n_s32(rnding, u[10], cospi[24]); + v[11] = vmlsq_n_s32(v[11], u[11], cospi[40]); + v[11] = vshlq_s32(v[11], v_bit); + + v[12] = vmlaq_n_s32(rnding, u[12], -cospi[56]); + v[12] = vmlaq_n_s32(v[12], u[13], cospi[8]); + v[12] = vshlq_s32(v[12], v_bit); + + v[13] = vmlaq_n_s32(rnding, u[12], cospi[8]); + v[13] = vmlsq_n_s32(v[13], u[13], -cospi[56]); + v[13] = vshlq_s32(v[13], v_bit); + + v[14] = vmlaq_n_s32(rnding, u[14], -cospi[24]); + v[14] = vmlaq_n_s32(v[14], u[15], cospi[40]); + v[14] = vshlq_s32(v[14], v_bit); + + v[15] = vmlaq_n_s32(rnding, u[14], cospi[40]); + v[15] = vmlsq_n_s32(v[15], u[15], -cospi[24]); + v[15] = vshlq_s32(v[15], v_bit); + + // stage 5 + addsub_neon(v[0], v[4], &u[0], &u[4], &clamp_lo, &clamp_hi); + addsub_neon(v[1], v[5], &u[1], &u[5], &clamp_lo, &clamp_hi); + addsub_neon(v[2], v[6], &u[2], &u[6], &clamp_lo, &clamp_hi); + addsub_neon(v[3], v[7], &u[3], &u[7], &clamp_lo, &clamp_hi); + addsub_neon(v[8], v[12], &u[8], &u[12], &clamp_lo, &clamp_hi); + addsub_neon(v[9], v[13], &u[9], &u[13], &clamp_lo, &clamp_hi); + addsub_neon(v[10], v[14], &u[10], &u[14], &clamp_lo, &clamp_hi); + addsub_neon(v[11], v[15], &u[11], &u[15], &clamp_lo, &clamp_hi); + + // stage 6 + v[0] = u[0]; + v[1] = u[1]; + v[2] = u[2]; + v[3] = u[3]; + + v[4] = vmlaq_n_s32(rnding, u[4], cospi[16]); + v[4] = vmlaq_n_s32(v[4], u[5], cospi[48]); + v[4] = vshlq_s32(v[4], v_bit); + + v[5] = vmlaq_n_s32(rnding, u[4], cospi[48]); + v[5] = vmlsq_n_s32(v[5], u[5], cospi[16]); + v[5] = vshlq_s32(v[5], v_bit); + + v[6] = vmlaq_n_s32(rnding, u[6], -cospi[48]); + v[6] = vmlaq_n_s32(v[6], u[7], cospi[16]); + v[6] = vshlq_s32(v[6], v_bit); + + v[7] = vmlaq_n_s32(rnding, u[6], cospi[16]); + v[7] = vmlsq_n_s32(v[7], u[7], -cospi[48]); + v[7] = vshlq_s32(v[7], v_bit); + + v[8] = u[8]; + v[9] = u[9]; + v[10] = u[10]; + v[11] = u[11]; + + v[12] = vmlaq_n_s32(rnding, u[12], cospi[16]); + v[12] = vmlaq_n_s32(v[12], u[13], cospi[48]); + v[12] = vshlq_s32(v[12], v_bit); + + v[13] = vmlaq_n_s32(rnding, u[12], cospi[48]); + v[13] = vmlsq_n_s32(v[13], u[13], cospi[16]); + v[13] = vshlq_s32(v[13], v_bit); + + v[14] = vmlaq_n_s32(rnding, u[14], -cospi[48]); + v[14] = vmlaq_n_s32(v[14], u[15], cospi[16]); + v[14] = vshlq_s32(v[14], v_bit); + + v[15] = vmlaq_n_s32(rnding, u[14], cospi[16]); + v[15] = vmlsq_n_s32(v[15], u[15], -cospi[48]); + v[15] = vshlq_s32(v[15], v_bit); + + // stage 7 + addsub_neon(v[0], v[2], &u[0], &u[2], &clamp_lo, &clamp_hi); + addsub_neon(v[1], v[3], &u[1], &u[3], &clamp_lo, &clamp_hi); + addsub_neon(v[4], v[6], &u[4], &u[6], &clamp_lo, &clamp_hi); + addsub_neon(v[5], v[7], &u[5], &u[7], &clamp_lo, &clamp_hi); + addsub_neon(v[8], v[10], &u[8], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(v[9], v[11], &u[9], &u[11], &clamp_lo, &clamp_hi); + addsub_neon(v[12], v[14], &u[12], &u[14], &clamp_lo, &clamp_hi); + addsub_neon(v[13], v[15], &u[13], &u[15], &clamp_lo, &clamp_hi); + + // stage 8 + v[0] = u[0]; + v[1] = u[1]; + + y = vmlaq_n_s32(rnding, u[2], cospi[32]); + x = vmulq_n_s32(u[3], cospi[32]); + v[2] = vaddq_s32(y, x); + v[2] = vshlq_s32(v[2], v_bit); + + v[3] = vsubq_s32(y, x); + v[3] = vshlq_s32(v[3], v_bit); + + v[4] = u[4]; + v[5] = u[5]; + + y = vmlaq_n_s32(rnding, u[6], cospi[32]); + x = vmulq_n_s32(u[7], cospi[32]); + v[6] = vaddq_s32(y, x); + v[6] = vshlq_s32(v[6], v_bit); + + v[7] = vsubq_s32(y, x); + v[7] = vshlq_s32(v[7], v_bit); + + v[8] = u[8]; + v[9] = u[9]; + + y = vmlaq_n_s32(rnding, u[10], cospi[32]); + x = vmulq_n_s32(u[11], cospi[32]); + v[10] = vaddq_s32(y, x); + v[10] = vshlq_s32(v[10], v_bit); + + v[11] = vsubq_s32(y, x); + v[11] = vshlq_s32(v[11], v_bit); + + v[12] = u[12]; + v[13] = u[13]; + + y = vmlaq_n_s32(rnding, u[14], cospi[32]); + x = vmulq_n_s32(u[15], cospi[32]); + v[14] = vaddq_s32(y, x); + v[14] = vshlq_s32(v[14], v_bit); + + v[15] = vsubq_s32(y, x); + v[15] = vshlq_s32(v[15], v_bit); + + // stage 9 + if (do_cols) { + out[0] = v[0]; + out[1] = vsubq_s32(zero, v[8]); + out[2] = v[12]; + out[3] = vsubq_s32(zero, v[4]); + out[4] = v[6]; + out[5] = vsubq_s32(zero, v[14]); + out[6] = v[10]; + out[7] = vsubq_s32(zero, v[2]); + out[8] = v[3]; + out[9] = vsubq_s32(zero, v[11]); + out[10] = v[15]; + out[11] = vsubq_s32(zero, v[7]); + out[12] = v[5]; + out[13] = vsubq_s32(zero, v[13]); + out[14] = v[9]; + out[15] = vsubq_s32(zero, v[1]); + } else { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + const int32x4_t v_shift = vdupq_n_s32(-out_shift); + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + neg_shift_neon(&v[0], &v[8], out + 0, out + 1, &clamp_lo_out, &clamp_hi_out, + &v_shift, &offset); + neg_shift_neon(&v[12], &v[4], out + 2, out + 3, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[6], &v[14], out + 4, out + 5, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[10], &v[2], out + 6, out + 7, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[3], &v[11], out + 8, out + 9, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[15], &v[7], out + 10, out + 11, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[5], &v[13], out + 12, out + 13, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + neg_shift_neon(&v[9], &v[1], out + 14, out + 15, &clamp_lo_out, + &clamp_hi_out, &v_shift, &offset); + } +} + +static void iidentity16_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + (void)bit; + int32x2_t fact = vdup_n_s32(2 * NewSqrt2); + int32x4x2_t a0; + int32x4_t zero = vdupq_n_s32(0); + const int64x2_t rnding = vdupq_n_s64(1 << (NewSqrt2Bits - 1)); + for (int i = 0; i < 16; i++) { + a0.val[0] = vreinterpretq_s32_s64( + vmlal_s32(rnding, vmovn_s64(vreinterpretq_s64_s32(in[i])), fact)); + a0.val[0] = vreinterpretq_s32_s64( + vshrq_n_s64(vreinterpretq_s64_s32(a0.val[0]), NewSqrt2Bits)); + a0.val[1] = vextq_s32(in[i], zero, 1); + a0.val[1] = vreinterpretq_s32_s64( + vmlal_s32(rnding, vmovn_s64(vreinterpretq_s64_s32(a0.val[1])), fact)); + a0.val[1] = vreinterpretq_s32_s64( + vshrq_n_s64(vreinterpretq_s64_s32(a0.val[1]), NewSqrt2Bits)); + a0 = vzipq_s32(a0.val[0], a0.val[1]); +#if AOM_ARCH_AARCH64 + out[i] = vreinterpretq_s32_s64(vzip1q_s64( + vreinterpretq_s64_s32(a0.val[0]), vreinterpretq_s64_s32(a0.val[1]))); +#else + out[i] = vextq_s32(vextq_s32(a0.val[0], a0.val[0], 2), a0.val[1], 2); +#endif + } + + if (!do_cols) { + const int log_range = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + round_shift_8x8(out, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo, &clamp_hi, 16); + } +} + +static INLINE void idct64_stage8_neon(int32x4_t *u, const int32_t *cospi, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int i; + int32x4_t temp1, temp2, temp3, temp4; + temp1 = half_btf_neon_mode10_r(&cospi[32], &u[10], &cospi[32], &u[13], v_bit, + rnding); + u[13] = + half_btf_neon_r(&cospi[32], &u[10], &cospi[32], &u[13], v_bit, rnding); + u[10] = temp1; + temp2 = half_btf_neon_mode10_r(&cospi[32], &u[11], &cospi[32], &u[12], v_bit, + rnding); + u[12] = + half_btf_neon_r(&cospi[32], &u[11], &cospi[32], &u[12], v_bit, rnding); + u[11] = temp2; + + for (i = 16; i < 20; ++i) { + addsub_neon(u[i], u[i ^ 7], &u[i], &u[i ^ 7], clamp_lo, clamp_hi); + addsub_neon(u[i ^ 15], u[i ^ 8], &u[i ^ 15], &u[i ^ 8], clamp_lo, clamp_hi); + } + + temp1 = half_btf_neon_mode10_r(&cospi[16], &u[36], &cospi[48], &u[59], v_bit, + rnding); + temp2 = half_btf_neon_mode10_r(&cospi[16], &u[37], &cospi[48], &u[58], v_bit, + rnding); + temp3 = half_btf_neon_mode10_r(&cospi[16], &u[38], &cospi[48], &u[57], v_bit, + rnding); + temp4 = half_btf_neon_mode10_r(&cospi[16], &u[39], &cospi[48], &u[56], v_bit, + rnding); + u[56] = + half_btf_neon_r(&cospi[48], &u[39], &cospi[16], &u[56], v_bit, rnding); + u[57] = + half_btf_neon_r(&cospi[48], &u[38], &cospi[16], &u[57], v_bit, rnding); + u[58] = + half_btf_neon_r(&cospi[48], &u[37], &cospi[16], &u[58], v_bit, rnding); + u[59] = + half_btf_neon_r(&cospi[48], &u[36], &cospi[16], &u[59], v_bit, rnding); + u[36] = temp1; + u[37] = temp2; + u[38] = temp3; + u[39] = temp4; + + temp1 = half_btf_neon_mode11_r(&cospi[48], &u[40], &cospi[16], &u[55], v_bit, + rnding); + temp2 = half_btf_neon_mode11_r(&cospi[48], &u[41], &cospi[16], &u[54], v_bit, + rnding); + temp3 = half_btf_neon_mode11_r(&cospi[48], &u[42], &cospi[16], &u[53], v_bit, + rnding); + temp4 = half_btf_neon_mode11_r(&cospi[48], &u[43], &cospi[16], &u[52], v_bit, + rnding); + u[52] = half_btf_neon_mode10_r(&cospi[16], &u[43], &cospi[48], &u[52], v_bit, + rnding); + u[53] = half_btf_neon_mode10_r(&cospi[16], &u[42], &cospi[48], &u[53], v_bit, + rnding); + u[54] = half_btf_neon_mode10_r(&cospi[16], &u[41], &cospi[48], &u[54], v_bit, + rnding); + u[55] = half_btf_neon_mode10_r(&cospi[16], &u[40], &cospi[48], &u[55], v_bit, + rnding); + u[40] = temp1; + u[41] = temp2; + u[42] = temp3; + u[43] = temp4; +} + +static INLINE void idct64_stage9_neon(int32x4_t *u, const int32_t *cospi, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int i; + int32x4_t temp1, temp2, temp3, temp4; + for (i = 0; i < 8; ++i) { + addsub_neon(u[i], u[15 - i], &u[i], &u[15 - i], clamp_lo, clamp_hi); + } + temp1 = half_btf_neon_mode10_r(&cospi[32], &u[20], &cospi[32], &u[27], v_bit, + rnding); + temp2 = half_btf_neon_mode10_r(&cospi[32], &u[21], &cospi[32], &u[26], v_bit, + rnding); + temp3 = half_btf_neon_mode10_r(&cospi[32], &u[22], &cospi[32], &u[25], v_bit, + rnding); + temp4 = half_btf_neon_mode10_r(&cospi[32], &u[23], &cospi[32], &u[24], v_bit, + rnding); + u[24] = + half_btf_neon_r(&cospi[32], &u[23], &cospi[32], &u[24], v_bit, rnding); + u[25] = + half_btf_neon_r(&cospi[32], &u[22], &cospi[32], &u[25], v_bit, rnding); + u[26] = + half_btf_neon_r(&cospi[32], &u[21], &cospi[32], &u[26], v_bit, rnding); + u[27] = + half_btf_neon_r(&cospi[32], &u[20], &cospi[32], &u[27], v_bit, rnding); + u[20] = temp1; + u[21] = temp2; + u[22] = temp3; + u[23] = temp4; + for (i = 32; i < 40; i++) { + addsub_neon(u[i], u[i ^ 15], &u[i], &u[i ^ 15], clamp_lo, clamp_hi); + } + + for (i = 48; i < 56; i++) { + addsub_neon(u[i ^ 15], u[i], &u[i ^ 15], &u[i], clamp_lo, clamp_hi); + } +} + +static INLINE void idct64_stage10_neon(int32x4_t *u, const int32_t *cospi, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi, + const int32x4_t *v_bit, + const int32x4_t *rnding) { + int32x4_t temp1, temp2, temp3, temp4; + for (int i = 0; i < 16; i++) { + addsub_neon(u[i], u[31 - i], &u[i], &u[31 - i], clamp_lo, clamp_hi); + } + temp1 = half_btf_neon_mode10_r(&cospi[32], &u[40], &cospi[32], &u[55], v_bit, + rnding); + temp2 = half_btf_neon_mode10_r(&cospi[32], &u[41], &cospi[32], &u[54], v_bit, + rnding); + temp3 = half_btf_neon_mode10_r(&cospi[32], &u[42], &cospi[32], &u[53], v_bit, + rnding); + temp4 = half_btf_neon_mode10_r(&cospi[32], &u[43], &cospi[32], &u[52], v_bit, + rnding); + u[52] = + half_btf_neon_r(&cospi[32], &u[43], &cospi[32], &u[52], v_bit, rnding); + u[53] = + half_btf_neon_r(&cospi[32], &u[42], &cospi[32], &u[53], v_bit, rnding); + u[54] = + half_btf_neon_r(&cospi[32], &u[41], &cospi[32], &u[54], v_bit, rnding); + u[55] = + half_btf_neon_r(&cospi[32], &u[40], &cospi[32], &u[55], v_bit, rnding); + u[40] = temp1; + u[41] = temp2; + u[42] = temp3; + u[43] = temp4; + + temp1 = half_btf_neon_mode10_r(&cospi[32], &u[44], &cospi[32], &u[51], v_bit, + rnding); + temp2 = half_btf_neon_mode10_r(&cospi[32], &u[45], &cospi[32], &u[50], v_bit, + rnding); + temp3 = half_btf_neon_mode10_r(&cospi[32], &u[46], &cospi[32], &u[49], v_bit, + rnding); + temp4 = half_btf_neon_mode10_r(&cospi[32], &u[47], &cospi[32], &u[48], v_bit, + rnding); + u[48] = + half_btf_neon_r(&cospi[32], &u[47], &cospi[32], &u[48], v_bit, rnding); + u[49] = + half_btf_neon_r(&cospi[32], &u[46], &cospi[32], &u[49], v_bit, rnding); + u[50] = + half_btf_neon_r(&cospi[32], &u[45], &cospi[32], &u[50], v_bit, rnding); + u[51] = + half_btf_neon_r(&cospi[32], &u[44], &cospi[32], &u[51], v_bit, rnding); + u[44] = temp1; + u[45] = temp2; + u[46] = temp3; + u[47] = temp4; +} + +static INLINE void idct64_stage11_neon(int32x4_t *u, int32x4_t *out, + int do_cols, int bd, int out_shift, + const int32x4_t *clamp_lo, + const int32x4_t *clamp_hi) { + for (int i = 0; i < 32; i++) { + addsub_neon(u[i], u[63 - i], out + i, out + 63 - i, clamp_lo, clamp_hi); + } + + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + for (int i = 0; i < 64; i += 4) { + round_shift_4x4(out + i, out_shift); + highbd_clamp_s32_neon(out + i, out + i, &clamp_lo_out, &clamp_hi_out, 4); + } + } +} + +static void idct64x64_low1_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + { + int32x4_t x; + + // stage 1 + // stage 2 + // stage 3 + // stage 4 + // stage 5 + // stage 6 + x = half_btf_0_neon_r(&cospi[32], &in[0], &v_bit, &rnding); + + // stage 8 + // stage 9 + // stage 10 + // stage 11 + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + clamp_lo = vdupq_n_s32(-(1 << (log_range_out - 1))); + clamp_hi = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + if (out_shift != 0) { + int32x4_t offset = vdupq_n_s32((1 << out_shift) >> 1); + x = vaddq_s32(x, offset); + x = vshlq_s32(x, vdupq_n_s32(-out_shift)); + } + } + x = vmaxq_s32(x, clamp_lo); + x = vminq_s32(x, clamp_hi); + out[0] = x; + out[1] = x; + out[2] = x; + out[3] = x; + out[4] = x; + out[5] = x; + out[6] = x; + out[7] = x; + out[8] = x; + out[9] = x; + out[10] = x; + out[11] = x; + out[12] = x; + out[13] = x; + out[14] = x; + out[15] = x; + out[16] = x; + out[17] = x; + out[18] = x; + out[19] = x; + out[20] = x; + out[21] = x; + out[22] = x; + out[23] = x; + out[24] = x; + out[25] = x; + out[26] = x; + out[27] = x; + out[28] = x; + out[29] = x; + out[30] = x; + out[31] = x; + out[32] = x; + out[33] = x; + out[34] = x; + out[35] = x; + out[36] = x; + out[37] = x; + out[38] = x; + out[39] = x; + out[40] = x; + out[41] = x; + out[42] = x; + out[43] = x; + out[44] = x; + out[45] = x; + out[46] = x; + out[47] = x; + out[48] = x; + out[49] = x; + out[50] = x; + out[51] = x; + out[52] = x; + out[53] = x; + out[54] = x; + out[55] = x; + out[56] = x; + out[57] = x; + out[58] = x; + out[59] = x; + out[60] = x; + out[61] = x; + out[62] = x; + out[63] = x; + } +} + +static void idct64x64_low8_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + int i, j; + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + { + int32x4_t u[64]; + + // stage 1 + u[0] = in[0]; + u[8] = in[4]; + u[16] = in[2]; + u[24] = in[6]; + u[32] = in[1]; + u[40] = in[5]; + u[48] = in[3]; + u[56] = in[7]; + + // stage 2 + u[63] = half_btf_0_neon_r(&cospi[1], &u[32], &v_bit, &rnding); + u[32] = half_btf_0_neon_r(&cospi[63], &u[32], &v_bit, &rnding); + u[39] = half_btf_0_m_neon_r(&cospi[57], &u[56], &v_bit, &rnding); + u[56] = half_btf_0_neon_r(&cospi[7], &u[56], &v_bit, &rnding); + u[55] = half_btf_0_neon_r(&cospi[5], &u[40], &v_bit, &rnding); + u[40] = half_btf_0_neon_r(&cospi[59], &u[40], &v_bit, &rnding); + u[47] = half_btf_0_m_neon_r(&cospi[61], &u[48], &v_bit, &rnding); + u[48] = half_btf_0_neon_r(&cospi[3], &u[48], &v_bit, &rnding); + + // stage 3 + u[31] = half_btf_0_neon_r(&cospi[2], &u[16], &v_bit, &rnding); + u[16] = half_btf_0_neon_r(&cospi[62], &u[16], &v_bit, &rnding); + u[23] = half_btf_0_m_neon_r(&cospi[58], &u[24], &v_bit, &rnding); + u[24] = half_btf_0_neon_r(&cospi[6], &u[24], &v_bit, &rnding); + u[33] = u[32]; + u[38] = u[39]; + u[41] = u[40]; + u[46] = u[47]; + u[49] = u[48]; + u[54] = u[55]; + u[57] = u[56]; + u[62] = u[63]; + + // stage 4 + int32x4_t temp1, temp2; + u[15] = half_btf_0_neon_r(&cospi[4], &u[8], &v_bit, &rnding); + u[8] = half_btf_0_neon_r(&cospi[60], &u[8], &v_bit, &rnding); + u[17] = u[16]; + u[22] = u[23]; + u[25] = u[24]; + u[30] = u[31]; + + temp1 = half_btf_neon_mode10_r(&cospi[4], &u[33], &cospi[60], &u[62], + &v_bit, &rnding); + u[62] = + half_btf_neon_r(&cospi[60], &u[33], &cospi[4], &u[62], &v_bit, &rnding); + u[33] = temp1; + + temp2 = half_btf_neon_mode10_r(&cospi[36], &u[38], &cospi[28], &u[57], + &v_bit, &rnding); + u[38] = half_btf_neon_mode11_r(&cospi[28], &u[38], &cospi[36], &u[57], + &v_bit, &rnding); + u[57] = temp2; + + temp1 = half_btf_neon_mode10_r(&cospi[20], &u[41], &cospi[44], &u[54], + &v_bit, &rnding); + u[54] = half_btf_neon_r(&cospi[44], &u[41], &cospi[20], &u[54], &v_bit, + &rnding); + u[41] = temp1; + + temp2 = half_btf_neon_mode11_r(&cospi[12], &u[46], &cospi[52], &u[49], + &v_bit, &rnding); + u[49] = half_btf_neon_mode10_r(&cospi[52], &u[46], &cospi[12], &u[49], + &v_bit, &rnding); + u[46] = temp2; + + // stage 5 + u[9] = u[8]; + u[14] = u[15]; + + temp1 = half_btf_neon_mode10_r(&cospi[8], &u[17], &cospi[56], &u[30], + &v_bit, &rnding); + u[30] = + half_btf_neon_r(&cospi[56], &u[17], &cospi[8], &u[30], &v_bit, &rnding); + u[17] = temp1; + + temp2 = half_btf_neon_mode11_r(&cospi[24], &u[22], &cospi[40], &u[25], + &v_bit, &rnding); + u[25] = half_btf_neon_mode10_r(&cospi[40], &u[22], &cospi[24], &u[25], + &v_bit, &rnding); + u[22] = temp2; + + u[35] = u[32]; + u[34] = u[33]; + u[36] = u[39]; + u[37] = u[38]; + u[43] = u[40]; + u[42] = u[41]; + u[44] = u[47]; + u[45] = u[46]; + u[51] = u[48]; + u[50] = u[49]; + u[52] = u[55]; + u[53] = u[54]; + u[59] = u[56]; + u[58] = u[57]; + u[60] = u[63]; + u[61] = u[62]; + + // stage 6 + temp1 = half_btf_0_neon_r(&cospi[32], &u[0], &v_bit, &rnding); + u[1] = half_btf_0_neon_r(&cospi[32], &u[0], &v_bit, &rnding); + u[0] = temp1; + + temp2 = half_btf_neon_mode10_r(&cospi[16], &u[9], &cospi[48], &u[14], + &v_bit, &rnding); + u[14] = + half_btf_neon_r(&cospi[48], &u[9], &cospi[16], &u[14], &v_bit, &rnding); + u[9] = temp2; + u[19] = u[16]; + u[18] = u[17]; + u[20] = u[23]; + u[21] = u[22]; + u[27] = u[24]; + u[26] = u[25]; + u[28] = u[31]; + u[29] = u[30]; + + temp1 = half_btf_neon_mode10_r(&cospi[8], &u[34], &cospi[56], &u[61], + &v_bit, &rnding); + u[61] = + half_btf_neon_r(&cospi[56], &u[34], &cospi[8], &u[61], &v_bit, &rnding); + u[34] = temp1; + temp2 = half_btf_neon_mode10_r(&cospi[8], &u[35], &cospi[56], &u[60], + &v_bit, &rnding); + u[60] = + half_btf_neon_r(&cospi[56], &u[35], &cospi[8], &u[60], &v_bit, &rnding); + u[35] = temp2; + temp1 = half_btf_neon_mode11_r(&cospi[56], &u[36], &cospi[8], &u[59], + &v_bit, &rnding); + u[59] = half_btf_neon_mode10_r(&cospi[8], &u[36], &cospi[56], &u[59], + &v_bit, &rnding); + u[36] = temp1; + temp2 = half_btf_neon_mode11_r(&cospi[56], &u[37], &cospi[8], &u[58], + &v_bit, &rnding); + u[58] = half_btf_neon_mode10_r(&cospi[8], &u[37], &cospi[56], &u[58], + &v_bit, &rnding); + u[37] = temp2; + temp1 = half_btf_neon_mode10_r(&cospi[40], &u[42], &cospi[24], &u[53], + &v_bit, &rnding); + u[53] = half_btf_neon_r(&cospi[24], &u[42], &cospi[40], &u[53], &v_bit, + &rnding); + u[42] = temp1; + temp2 = half_btf_neon_mode10_r(&cospi[40], &u[43], &cospi[24], &u[52], + &v_bit, &rnding); + u[52] = half_btf_neon_r(&cospi[24], &u[43], &cospi[40], &u[52], &v_bit, + &rnding); + u[43] = temp2; + temp1 = half_btf_neon_mode11_r(&cospi[24], &u[44], &cospi[40], &u[51], + &v_bit, &rnding); + u[51] = half_btf_neon_mode10_r(&cospi[40], &u[44], &cospi[24], &u[51], + &v_bit, &rnding); + u[44] = temp1; + temp2 = half_btf_neon_mode11_r(&cospi[24], &u[45], &cospi[40], &u[50], + &v_bit, &rnding); + u[50] = half_btf_neon_mode10_r(&cospi[40], &u[45], &cospi[24], &u[50], + &v_bit, &rnding); + u[45] = temp2; + + // stage 7 + u[3] = u[0]; + u[2] = u[1]; + u[11] = u[8]; + u[10] = u[9]; + u[12] = u[15]; + u[13] = u[14]; + + temp1 = half_btf_neon_mode10_r(&cospi[16], &u[18], &cospi[48], &u[29], + &v_bit, &rnding); + u[29] = half_btf_neon_r(&cospi[48], &u[18], &cospi[16], &u[29], &v_bit, + &rnding); + u[18] = temp1; + temp2 = half_btf_neon_mode10_r(&cospi[16], &u[19], &cospi[48], &u[28], + &v_bit, &rnding); + u[28] = half_btf_neon_r(&cospi[48], &u[19], &cospi[16], &u[28], &v_bit, + &rnding); + u[19] = temp2; + temp1 = half_btf_neon_mode11_r(&cospi[48], &u[20], &cospi[16], &u[27], + &v_bit, &rnding); + u[27] = half_btf_neon_mode10_r(&cospi[16], &u[20], &cospi[48], &u[27], + &v_bit, &rnding); + u[20] = temp1; + temp2 = half_btf_neon_mode11_r(&cospi[48], &u[21], &cospi[16], &u[26], + &v_bit, &rnding); + u[26] = half_btf_neon_mode10_r(&cospi[16], &u[21], &cospi[48], &u[26], + &v_bit, &rnding); + u[21] = temp2; + for (i = 32; i < 64; i += 16) { + for (j = i; j < i + 4; j++) { + addsub_neon(u[j], u[j ^ 7], &u[j], &u[j ^ 7], &clamp_lo, &clamp_hi); + addsub_neon(u[j ^ 15], u[j ^ 8], &u[j ^ 15], &u[j ^ 8], &clamp_lo, + &clamp_hi); + } + } + + // stage 8 + u[7] = u[0]; + u[6] = u[1]; + u[5] = u[2]; + u[4] = u[3]; + u[9] = u[9]; + + idct64_stage8_neon(u, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 9 + idct64_stage9_neon(u, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 10 + idct64_stage10_neon(u, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 11 + idct64_stage11_neon(u, out, do_cols, bd, out_shift, &clamp_lo, &clamp_hi); + } +} + +static void idct64x64_low16_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + int i, j; + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + + { + int32x4_t u[64]; + int32x4_t tmp1, tmp2, tmp3, tmp4; + // stage 1 + u[0] = in[0]; + u[32] = in[1]; + u[36] = in[9]; + u[40] = in[5]; + u[44] = in[13]; + u[48] = in[3]; + u[52] = in[11]; + u[56] = in[7]; + u[60] = in[15]; + u[16] = in[2]; + u[20] = in[10]; + u[24] = in[6]; + u[28] = in[14]; + u[4] = in[8]; + u[8] = in[4]; + u[12] = in[12]; + + // stage 2 + u[63] = half_btf_0_neon_r(&cospi[1], &u[32], &v_bit, &rnding); + u[32] = half_btf_0_neon_r(&cospi[63], &u[32], &v_bit, &rnding); + u[35] = half_btf_0_m_neon_r(&cospi[49], &u[60], &v_bit, &rnding); + u[60] = half_btf_0_neon_r(&cospi[15], &u[60], &v_bit, &rnding); + u[59] = half_btf_0_neon_r(&cospi[9], &u[36], &v_bit, &rnding); + u[36] = half_btf_0_neon_r(&cospi[55], &u[36], &v_bit, &rnding); + u[39] = half_btf_0_m_neon_r(&cospi[57], &u[56], &v_bit, &rnding); + u[56] = half_btf_0_neon_r(&cospi[7], &u[56], &v_bit, &rnding); + u[55] = half_btf_0_neon_r(&cospi[5], &u[40], &v_bit, &rnding); + u[40] = half_btf_0_neon_r(&cospi[59], &u[40], &v_bit, &rnding); + u[43] = half_btf_0_m_neon_r(&cospi[53], &u[52], &v_bit, &rnding); + u[52] = half_btf_0_neon_r(&cospi[11], &u[52], &v_bit, &rnding); + u[47] = half_btf_0_m_neon_r(&cospi[61], &u[48], &v_bit, &rnding); + u[48] = half_btf_0_neon_r(&cospi[3], &u[48], &v_bit, &rnding); + u[51] = half_btf_0_neon_r(&cospi[13], &u[44], &v_bit, &rnding); + u[44] = half_btf_0_neon_r(&cospi[51], &u[44], &v_bit, &rnding); + + // stage 3 + u[31] = half_btf_0_neon_r(&cospi[2], &u[16], &v_bit, &rnding); + u[16] = half_btf_0_neon_r(&cospi[62], &u[16], &v_bit, &rnding); + u[19] = half_btf_0_m_neon_r(&cospi[50], &u[28], &v_bit, &rnding); + u[28] = half_btf_0_neon_r(&cospi[14], &u[28], &v_bit, &rnding); + u[27] = half_btf_0_neon_r(&cospi[10], &u[20], &v_bit, &rnding); + u[20] = half_btf_0_neon_r(&cospi[54], &u[20], &v_bit, &rnding); + u[23] = half_btf_0_m_neon_r(&cospi[58], &u[24], &v_bit, &rnding); + u[24] = half_btf_0_neon_r(&cospi[6], &u[24], &v_bit, &rnding); + u[33] = u[32]; + u[34] = u[35]; + u[37] = u[36]; + u[38] = u[39]; + u[41] = u[40]; + u[42] = u[43]; + u[45] = u[44]; + u[46] = u[47]; + u[49] = u[48]; + u[50] = u[51]; + u[53] = u[52]; + u[54] = u[55]; + u[57] = u[56]; + u[58] = u[59]; + u[61] = u[60]; + u[62] = u[63]; + + // stage 4 + u[15] = half_btf_0_neon_r(&cospi[4], &u[8], &v_bit, &rnding); + u[8] = half_btf_0_neon_r(&cospi[60], &u[8], &v_bit, &rnding); + u[11] = half_btf_0_m_neon_r(&cospi[52], &u[12], &v_bit, &rnding); + u[12] = half_btf_0_neon_r(&cospi[12], &u[12], &v_bit, &rnding); + + u[17] = u[16]; + u[18] = u[19]; + u[21] = u[20]; + u[22] = u[23]; + u[25] = u[24]; + u[26] = u[27]; + u[29] = u[28]; + u[30] = u[31]; + + tmp1 = half_btf_neon_mode10_r(&cospi[4], &u[33], &cospi[60], &u[62], &v_bit, + &rnding); + tmp2 = half_btf_neon_mode11_r(&cospi[60], &u[34], &cospi[4], &u[61], &v_bit, + &rnding); + tmp3 = half_btf_neon_mode10_r(&cospi[36], &u[37], &cospi[28], &u[58], + &v_bit, &rnding); + tmp4 = half_btf_neon_mode11_r(&cospi[28], &u[38], &cospi[36], &u[57], + &v_bit, &rnding); + u[57] = half_btf_neon_mode10_r(&cospi[36], &u[38], &cospi[28], &u[57], + &v_bit, &rnding); + u[58] = half_btf_neon_r(&cospi[28], &u[37], &cospi[36], &u[58], &v_bit, + &rnding); + u[61] = half_btf_neon_mode10_r(&cospi[4], &u[34], &cospi[60], &u[61], + &v_bit, &rnding); + u[62] = + half_btf_neon_r(&cospi[60], &u[33], &cospi[4], &u[62], &v_bit, &rnding); + u[33] = tmp1; + u[34] = tmp2; + u[37] = tmp3; + u[38] = tmp4; + + tmp1 = half_btf_neon_mode10_r(&cospi[20], &u[41], &cospi[44], &u[54], + &v_bit, &rnding); + tmp2 = half_btf_neon_mode11_r(&cospi[44], &u[42], &cospi[20], &u[53], + &v_bit, &rnding); + tmp3 = half_btf_neon_r(&cospi[52], &u[45], &cospi[12], &u[50], &v_bit, + &rnding); + tmp4 = half_btf_neon_mode11_r(&cospi[12], &u[46], &cospi[52], &u[49], + &v_bit, &rnding); + u[49] = half_btf_neon_mode10_r(&cospi[52], &u[46], &cospi[12], &u[49], + &v_bit, &rnding); + u[50] = half_btf_neon_r(&cospi[12], &u[45], &cospi[52], &u[50], &v_bit, + &rnding); + u[53] = half_btf_neon_mode10_r(&cospi[20], &u[42], &cospi[44], &u[53], + &v_bit, &rnding); + u[54] = half_btf_neon_r(&cospi[44], &u[41], &cospi[20], &u[54], &v_bit, + &rnding); + u[41] = tmp1; + u[42] = tmp2; + u[45] = tmp3; + u[46] = tmp4; + + // stage 5 + u[7] = half_btf_0_neon_r(&cospi[8], &u[4], &v_bit, &rnding); + u[4] = half_btf_0_neon_r(&cospi[56], &u[4], &v_bit, &rnding); + + u[9] = u[8]; + u[10] = u[11]; + u[13] = u[12]; + u[14] = u[15]; + + tmp1 = half_btf_neon_mode10_r(&cospi[8], &u[17], &cospi[56], &u[30], &v_bit, + &rnding); + tmp2 = half_btf_neon_mode11_r(&cospi[56], &u[18], &cospi[8], &u[29], &v_bit, + &rnding); + tmp3 = half_btf_neon_mode10_r(&cospi[40], &u[21], &cospi[24], &u[26], + &v_bit, &rnding); + tmp4 = half_btf_neon_mode11_r(&cospi[24], &u[22], &cospi[40], &u[25], + &v_bit, &rnding); + u[25] = half_btf_neon_mode10_r(&cospi[40], &u[22], &cospi[24], &u[25], + &v_bit, &rnding); + u[26] = half_btf_neon_r(&cospi[24], &u[21], &cospi[40], &u[26], &v_bit, + &rnding); + u[29] = half_btf_neon_mode10_r(&cospi[8], &u[18], &cospi[56], &u[29], + &v_bit, &rnding); + u[30] = + half_btf_neon_r(&cospi[56], &u[17], &cospi[8], &u[30], &v_bit, &rnding); + u[17] = tmp1; + u[18] = tmp2; + u[21] = tmp3; + u[22] = tmp4; + + for (i = 32; i < 64; i += 8) { + addsub_neon(u[i + 0], u[i + 3], &u[i + 0], &u[i + 3], &clamp_lo, + &clamp_hi); + addsub_neon(u[i + 1], u[i + 2], &u[i + 1], &u[i + 2], &clamp_lo, + &clamp_hi); + + addsub_neon(u[i + 7], u[i + 4], &u[i + 7], &u[i + 4], &clamp_lo, + &clamp_hi); + addsub_neon(u[i + 6], u[i + 5], &u[i + 6], &u[i + 5], &clamp_lo, + &clamp_hi); + } + + // stage 6 + tmp1 = half_btf_0_neon_r(&cospi[32], &u[0], &v_bit, &rnding); + u[1] = half_btf_0_neon_r(&cospi[32], &u[0], &v_bit, &rnding); + u[0] = tmp1; + u[5] = u[4]; + u[6] = u[7]; + + tmp1 = half_btf_neon_mode10_r(&cospi[16], &u[9], &cospi[48], &u[14], &v_bit, + &rnding); + u[14] = + half_btf_neon_r(&cospi[48], &u[9], &cospi[16], &u[14], &v_bit, &rnding); + u[9] = tmp1; + tmp2 = half_btf_neon_mode01_r(&cospi[48], &u[10], &cospi[16], &u[13], + &v_bit, &rnding); + u[13] = half_btf_neon_mode10_r(&cospi[16], &u[10], &cospi[48], &u[13], + &v_bit, &rnding); + u[10] = tmp2; + + for (i = 16; i < 32; i += 8) { + addsub_neon(u[i + 0], u[i + 3], &u[i + 0], &u[i + 3], &clamp_lo, + &clamp_hi); + addsub_neon(u[i + 1], u[i + 2], &u[i + 1], &u[i + 2], &clamp_lo, + &clamp_hi); + + addsub_neon(u[i + 7], u[i + 4], &u[i + 7], &u[i + 4], &clamp_lo, + &clamp_hi); + addsub_neon(u[i + 6], u[i + 5], &u[i + 6], &u[i + 5], &clamp_lo, + &clamp_hi); + } + + tmp1 = half_btf_neon_mode10_r(&cospi[8], &u[34], &cospi[56], &u[61], &v_bit, + &rnding); + tmp2 = half_btf_neon_mode10_r(&cospi[8], &u[35], &cospi[56], &u[60], &v_bit, + &rnding); + tmp3 = half_btf_neon_mode11_r(&cospi[56], &u[36], &cospi[8], &u[59], &v_bit, + &rnding); + tmp4 = half_btf_neon_mode11_r(&cospi[56], &u[37], &cospi[8], &u[58], &v_bit, + &rnding); + u[58] = half_btf_neon_mode10_r(&cospi[8], &u[37], &cospi[56], &u[58], + &v_bit, &rnding); + u[59] = half_btf_neon_mode10_r(&cospi[8], &u[36], &cospi[56], &u[59], + &v_bit, &rnding); + u[60] = + half_btf_neon_r(&cospi[56], &u[35], &cospi[8], &u[60], &v_bit, &rnding); + u[61] = + half_btf_neon_r(&cospi[56], &u[34], &cospi[8], &u[61], &v_bit, &rnding); + u[34] = tmp1; + u[35] = tmp2; + u[36] = tmp3; + u[37] = tmp4; + + tmp1 = half_btf_neon_mode10_r(&cospi[40], &u[42], &cospi[24], &u[53], + &v_bit, &rnding); + tmp2 = half_btf_neon_mode10_r(&cospi[40], &u[43], &cospi[24], &u[52], + &v_bit, &rnding); + tmp3 = half_btf_neon_mode11_r(&cospi[24], &u[44], &cospi[40], &u[51], + &v_bit, &rnding); + tmp4 = half_btf_neon_mode11_r(&cospi[24], &u[45], &cospi[40], &u[50], + &v_bit, &rnding); + u[50] = half_btf_neon_mode10_r(&cospi[40], &u[45], &cospi[24], &u[50], + &v_bit, &rnding); + u[51] = half_btf_neon_mode10_r(&cospi[40], &u[44], &cospi[24], &u[51], + &v_bit, &rnding); + u[52] = half_btf_neon_r(&cospi[24], &u[43], &cospi[40], &u[52], &v_bit, + &rnding); + u[53] = half_btf_neon_r(&cospi[24], &u[42], &cospi[40], &u[53], &v_bit, + &rnding); + u[42] = tmp1; + u[43] = tmp2; + u[44] = tmp3; + u[45] = tmp4; + + // stage 7 + u[3] = u[0]; + u[2] = u[1]; + tmp1 = half_btf_neon_mode10_r(&cospi[32], &u[5], &cospi[32], &u[6], &v_bit, + &rnding); + u[6] = + half_btf_neon_r(&cospi[32], &u[5], &cospi[32], &u[6], &v_bit, &rnding); + u[5] = tmp1; + addsub_neon(u[8], u[11], &u[8], &u[11], &clamp_lo, &clamp_hi); + addsub_neon(u[9], u[10], &u[9], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(u[15], u[12], &u[15], &u[12], &clamp_lo, &clamp_hi); + addsub_neon(u[14], u[13], &u[14], &u[13], &clamp_lo, &clamp_hi); + + tmp1 = half_btf_neon_mode10_r(&cospi[16], &u[18], &cospi[48], &u[29], + &v_bit, &rnding); + tmp2 = half_btf_neon_mode10_r(&cospi[16], &u[19], &cospi[48], &u[28], + &v_bit, &rnding); + tmp3 = half_btf_neon_mode11_r(&cospi[48], &u[20], &cospi[16], &u[27], + &v_bit, &rnding); + tmp4 = half_btf_neon_mode11_r(&cospi[48], &u[21], &cospi[16], &u[26], + &v_bit, &rnding); + u[26] = half_btf_neon_mode10_r(&cospi[16], &u[21], &cospi[48], &u[26], + &v_bit, &rnding); + u[27] = half_btf_neon_mode10_r(&cospi[16], &u[20], &cospi[48], &u[27], + &v_bit, &rnding); + u[28] = half_btf_neon_r(&cospi[48], &u[19], &cospi[16], &u[28], &v_bit, + &rnding); + u[29] = half_btf_neon_r(&cospi[48], &u[18], &cospi[16], &u[29], &v_bit, + &rnding); + u[18] = tmp1; + u[19] = tmp2; + u[20] = tmp3; + u[21] = tmp4; + + for (i = 32; i < 64; i += 16) { + for (j = i; j < i + 4; j++) { + addsub_neon(u[j], u[j ^ 7], &u[j], &u[j ^ 7], &clamp_lo, &clamp_hi); + addsub_neon(u[j ^ 15], u[j ^ 8], &u[j ^ 15], &u[j ^ 8], &clamp_lo, + &clamp_hi); + } + } + + // stage 8 + for (i = 0; i < 4; ++i) { + addsub_neon(u[i], u[7 - i], &u[i], &u[7 - i], &clamp_lo, &clamp_hi); + } + + idct64_stage8_neon(u, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 9 + idct64_stage9_neon(u, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 10 + idct64_stage10_neon(u, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 11 + idct64_stage11_neon(u, out, do_cols, bd, out_shift, &clamp_lo, &clamp_hi); + } +} + +static void idct64x64_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + int i, j; + const int32_t *cospi = cospi_arr(bit); + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + + { + int32x4_t u[64], v[64]; + + // stage 1 + u[32] = in[1]; + u[34] = in[17]; + u[36] = in[9]; + u[38] = in[25]; + u[40] = in[5]; + u[42] = in[21]; + u[44] = in[13]; + u[46] = in[29]; + u[48] = in[3]; + u[50] = in[19]; + u[52] = in[11]; + u[54] = in[27]; + u[56] = in[7]; + u[58] = in[23]; + u[60] = in[15]; + u[62] = in[31]; + + v[16] = in[2]; + v[18] = in[18]; + v[20] = in[10]; + v[22] = in[26]; + v[24] = in[6]; + v[26] = in[22]; + v[28] = in[14]; + v[30] = in[30]; + + u[8] = in[4]; + u[10] = in[20]; + u[12] = in[12]; + u[14] = in[28]; + + v[4] = in[8]; + v[6] = in[24]; + + u[0] = in[0]; + u[2] = in[16]; + + // stage 2 + v[32] = half_btf_0_neon_r(&cospi[63], &u[32], &v_bit, &rnding); + v[33] = half_btf_0_m_neon_r(&cospi[33], &u[62], &v_bit, &rnding); + v[34] = half_btf_0_neon_r(&cospi[47], &u[34], &v_bit, &rnding); + v[35] = half_btf_0_m_neon_r(&cospi[49], &u[60], &v_bit, &rnding); + v[36] = half_btf_0_neon_r(&cospi[55], &u[36], &v_bit, &rnding); + v[37] = half_btf_0_m_neon_r(&cospi[41], &u[58], &v_bit, &rnding); + v[38] = half_btf_0_neon_r(&cospi[39], &u[38], &v_bit, &rnding); + v[39] = half_btf_0_m_neon_r(&cospi[57], &u[56], &v_bit, &rnding); + v[40] = half_btf_0_neon_r(&cospi[59], &u[40], &v_bit, &rnding); + v[41] = half_btf_0_m_neon_r(&cospi[37], &u[54], &v_bit, &rnding); + v[42] = half_btf_0_neon_r(&cospi[43], &u[42], &v_bit, &rnding); + v[43] = half_btf_0_m_neon_r(&cospi[53], &u[52], &v_bit, &rnding); + v[44] = half_btf_0_neon_r(&cospi[51], &u[44], &v_bit, &rnding); + v[45] = half_btf_0_m_neon_r(&cospi[45], &u[50], &v_bit, &rnding); + v[46] = half_btf_0_neon_r(&cospi[35], &u[46], &v_bit, &rnding); + v[47] = half_btf_0_m_neon_r(&cospi[61], &u[48], &v_bit, &rnding); + v[48] = half_btf_0_neon_r(&cospi[3], &u[48], &v_bit, &rnding); + v[49] = half_btf_0_neon_r(&cospi[29], &u[46], &v_bit, &rnding); + v[50] = half_btf_0_neon_r(&cospi[19], &u[50], &v_bit, &rnding); + v[51] = half_btf_0_neon_r(&cospi[13], &u[44], &v_bit, &rnding); + v[52] = half_btf_0_neon_r(&cospi[11], &u[52], &v_bit, &rnding); + v[53] = half_btf_0_neon_r(&cospi[21], &u[42], &v_bit, &rnding); + v[54] = half_btf_0_neon_r(&cospi[27], &u[54], &v_bit, &rnding); + v[55] = half_btf_0_neon_r(&cospi[5], &u[40], &v_bit, &rnding); + v[56] = half_btf_0_neon_r(&cospi[7], &u[56], &v_bit, &rnding); + v[57] = half_btf_0_neon_r(&cospi[25], &u[38], &v_bit, &rnding); + v[58] = half_btf_0_neon_r(&cospi[23], &u[58], &v_bit, &rnding); + v[59] = half_btf_0_neon_r(&cospi[9], &u[36], &v_bit, &rnding); + v[60] = half_btf_0_neon_r(&cospi[15], &u[60], &v_bit, &rnding); + v[61] = half_btf_0_neon_r(&cospi[17], &u[34], &v_bit, &rnding); + v[62] = half_btf_0_neon_r(&cospi[31], &u[62], &v_bit, &rnding); + v[63] = half_btf_0_neon_r(&cospi[1], &u[32], &v_bit, &rnding); + + // stage 3 + u[16] = half_btf_0_neon_r(&cospi[62], &v[16], &v_bit, &rnding); + u[17] = half_btf_0_m_neon_r(&cospi[34], &v[30], &v_bit, &rnding); + u[18] = half_btf_0_neon_r(&cospi[46], &v[18], &v_bit, &rnding); + u[19] = half_btf_0_m_neon_r(&cospi[50], &v[28], &v_bit, &rnding); + u[20] = half_btf_0_neon_r(&cospi[54], &v[20], &v_bit, &rnding); + u[21] = half_btf_0_m_neon_r(&cospi[42], &v[26], &v_bit, &rnding); + u[22] = half_btf_0_neon_r(&cospi[38], &v[22], &v_bit, &rnding); + u[23] = half_btf_0_m_neon_r(&cospi[58], &v[24], &v_bit, &rnding); + u[24] = half_btf_0_neon_r(&cospi[6], &v[24], &v_bit, &rnding); + u[25] = half_btf_0_neon_r(&cospi[26], &v[22], &v_bit, &rnding); + u[26] = half_btf_0_neon_r(&cospi[22], &v[26], &v_bit, &rnding); + u[27] = half_btf_0_neon_r(&cospi[10], &v[20], &v_bit, &rnding); + u[28] = half_btf_0_neon_r(&cospi[14], &v[28], &v_bit, &rnding); + u[29] = half_btf_0_neon_r(&cospi[18], &v[18], &v_bit, &rnding); + u[30] = half_btf_0_neon_r(&cospi[30], &v[30], &v_bit, &rnding); + u[31] = half_btf_0_neon_r(&cospi[2], &v[16], &v_bit, &rnding); + + for (i = 32; i < 64; i += 4) { + addsub_neon(v[i + 0], v[i + 1], &u[i + 0], &u[i + 1], &clamp_lo, + &clamp_hi); + addsub_neon(v[i + 3], v[i + 2], &u[i + 3], &u[i + 2], &clamp_lo, + &clamp_hi); + } + + // stage 4 + v[8] = half_btf_0_neon_r(&cospi[60], &u[8], &v_bit, &rnding); + v[9] = half_btf_0_m_neon_r(&cospi[36], &u[14], &v_bit, &rnding); + v[10] = half_btf_0_neon_r(&cospi[44], &u[10], &v_bit, &rnding); + v[11] = half_btf_0_m_neon_r(&cospi[52], &u[12], &v_bit, &rnding); + v[12] = half_btf_0_neon_r(&cospi[12], &u[12], &v_bit, &rnding); + v[13] = half_btf_0_neon_r(&cospi[20], &u[10], &v_bit, &rnding); + v[14] = half_btf_0_neon_r(&cospi[28], &u[14], &v_bit, &rnding); + v[15] = half_btf_0_neon_r(&cospi[4], &u[8], &v_bit, &rnding); + + for (i = 16; i < 32; i += 4) { + addsub_neon(u[i + 0], u[i + 1], &v[i + 0], &v[i + 1], &clamp_lo, + &clamp_hi); + addsub_neon(u[i + 3], u[i + 2], &v[i + 3], &v[i + 2], &clamp_lo, + &clamp_hi); + } + + for (i = 32; i < 64; i += 4) { + v[i + 0] = u[i + 0]; + v[i + 3] = u[i + 3]; + } + + v[33] = half_btf_neon_mode10_r(&cospi[4], &u[33], &cospi[60], &u[62], + &v_bit, &rnding); + v[34] = half_btf_neon_mode11_r(&cospi[60], &u[34], &cospi[4], &u[61], + &v_bit, &rnding); + v[37] = half_btf_neon_mode10_r(&cospi[36], &u[37], &cospi[28], &u[58], + &v_bit, &rnding); + v[38] = half_btf_neon_mode11_r(&cospi[28], &u[38], &cospi[36], &u[57], + &v_bit, &rnding); + v[41] = half_btf_neon_mode10_r(&cospi[20], &u[41], &cospi[44], &u[54], + &v_bit, &rnding); + v[42] = half_btf_neon_mode11_r(&cospi[44], &u[42], &cospi[20], &u[53], + &v_bit, &rnding); + v[45] = half_btf_neon_mode10_r(&cospi[52], &u[45], &cospi[12], &u[50], + &v_bit, &rnding); + v[46] = half_btf_neon_mode11_r(&cospi[12], &u[46], &cospi[52], &u[49], + &v_bit, &rnding); + v[49] = half_btf_neon_mode10_r(&cospi[52], &u[46], &cospi[12], &u[49], + &v_bit, &rnding); + v[50] = half_btf_neon_r(&cospi[12], &u[45], &cospi[52], &u[50], &v_bit, + &rnding); + v[53] = half_btf_neon_mode10_r(&cospi[20], &u[42], &cospi[44], &u[53], + &v_bit, &rnding); + v[54] = half_btf_neon_r(&cospi[44], &u[41], &cospi[20], &u[54], &v_bit, + &rnding); + v[57] = half_btf_neon_mode10_r(&cospi[36], &u[38], &cospi[28], &u[57], + &v_bit, &rnding); + v[58] = half_btf_neon_r(&cospi[28], &u[37], &cospi[36], &u[58], &v_bit, + &rnding); + v[61] = half_btf_neon_mode10_r(&cospi[4], &u[34], &cospi[60], &u[61], + &v_bit, &rnding); + v[62] = + half_btf_neon_r(&cospi[60], &u[33], &cospi[4], &u[62], &v_bit, &rnding); + + // stage 5 + u[4] = half_btf_0_neon_r(&cospi[56], &v[4], &v_bit, &rnding); + u[5] = half_btf_0_m_neon_r(&cospi[40], &v[6], &v_bit, &rnding); + u[6] = half_btf_0_neon_r(&cospi[24], &v[6], &v_bit, &rnding); + u[7] = half_btf_0_neon_r(&cospi[8], &v[4], &v_bit, &rnding); + + for (i = 8; i < 16; i += 4) { + addsub_neon(v[i + 0], v[i + 1], &u[i + 0], &u[i + 1], &clamp_lo, + &clamp_hi); + addsub_neon(v[i + 3], v[i + 2], &u[i + 3], &u[i + 2], &clamp_lo, + &clamp_hi); + } + + for (i = 16; i < 32; i += 4) { + u[i + 0] = v[i + 0]; + u[i + 3] = v[i + 3]; + } + + u[17] = half_btf_neon_mode10_r(&cospi[8], &v[17], &cospi[56], &v[30], + &v_bit, &rnding); + u[18] = half_btf_neon_mode11_r(&cospi[56], &v[18], &cospi[8], &v[29], + &v_bit, &rnding); + u[21] = half_btf_neon_mode10_r(&cospi[40], &v[21], &cospi[24], &v[26], + &v_bit, &rnding); + u[22] = half_btf_neon_mode11_r(&cospi[24], &v[22], &cospi[40], &v[25], + &v_bit, &rnding); + u[25] = half_btf_neon_mode10_r(&cospi[40], &v[22], &cospi[24], &v[25], + &v_bit, &rnding); + u[26] = half_btf_neon_r(&cospi[24], &v[21], &cospi[40], &v[26], &v_bit, + &rnding); + u[29] = half_btf_neon_mode10_r(&cospi[8], &v[18], &cospi[56], &v[29], + &v_bit, &rnding); + u[30] = + half_btf_neon_r(&cospi[56], &v[17], &cospi[8], &v[30], &v_bit, &rnding); + + for (i = 32; i < 64; i += 8) { + addsub_neon(v[i + 0], v[i + 3], &u[i + 0], &u[i + 3], &clamp_lo, + &clamp_hi); + addsub_neon(v[i + 1], v[i + 2], &u[i + 1], &u[i + 2], &clamp_lo, + &clamp_hi); + + addsub_neon(v[i + 7], v[i + 4], &u[i + 7], &u[i + 4], &clamp_lo, + &clamp_hi); + addsub_neon(v[i + 6], v[i + 5], &u[i + 6], &u[i + 5], &clamp_lo, + &clamp_hi); + } + + // stage 6 + v[0] = half_btf_0_neon_r(&cospi[32], &u[0], &v_bit, &rnding); + v[1] = half_btf_0_neon_r(&cospi[32], &u[0], &v_bit, &rnding); + v[2] = half_btf_0_neon_r(&cospi[48], &u[2], &v_bit, &rnding); + v[3] = half_btf_0_neon_r(&cospi[16], &u[2], &v_bit, &rnding); + + addsub_neon(u[4], u[5], &v[4], &v[5], &clamp_lo, &clamp_hi); + addsub_neon(u[7], u[6], &v[7], &v[6], &clamp_lo, &clamp_hi); + + for (i = 8; i < 16; i += 4) { + v[i + 0] = u[i + 0]; + v[i + 3] = u[i + 3]; + } + + v[9] = half_btf_neon_mode10_r(&cospi[16], &u[9], &cospi[48], &u[14], &v_bit, + &rnding); + v[10] = half_btf_neon_mode11_r(&cospi[48], &u[10], &cospi[16], &u[13], + &v_bit, &rnding); + v[13] = half_btf_neon_mode10_r(&cospi[16], &u[10], &cospi[48], &u[13], + &v_bit, &rnding); + v[14] = + half_btf_neon_r(&cospi[48], &u[9], &cospi[16], &u[14], &v_bit, &rnding); + + for (i = 16; i < 32; i += 8) { + addsub_neon(u[i + 0], u[i + 3], &v[i + 0], &v[i + 3], &clamp_lo, + &clamp_hi); + addsub_neon(u[i + 1], u[i + 2], &v[i + 1], &v[i + 2], &clamp_lo, + &clamp_hi); + + addsub_neon(u[i + 7], u[i + 4], &v[i + 7], &v[i + 4], &clamp_lo, + &clamp_hi); + addsub_neon(u[i + 6], u[i + 5], &v[i + 6], &v[i + 5], &clamp_lo, + &clamp_hi); + } + + for (i = 32; i < 64; i += 8) { + v[i + 0] = u[i + 0]; + v[i + 1] = u[i + 1]; + v[i + 6] = u[i + 6]; + v[i + 7] = u[i + 7]; + } + + v[34] = half_btf_neon_mode10_r(&cospi[8], &u[34], &cospi[56], &u[61], + &v_bit, &rnding); + v[35] = half_btf_neon_mode10_r(&cospi[8], &u[35], &cospi[56], &u[60], + &v_bit, &rnding); + v[36] = half_btf_neon_mode11_r(&cospi[56], &u[36], &cospi[8], &u[59], + &v_bit, &rnding); + v[37] = half_btf_neon_mode11_r(&cospi[56], &u[37], &cospi[8], &u[58], + &v_bit, &rnding); + v[42] = half_btf_neon_mode10_r(&cospi[40], &u[42], &cospi[24], &u[53], + &v_bit, &rnding); + v[43] = half_btf_neon_mode10_r(&cospi[40], &u[43], &cospi[24], &u[52], + &v_bit, &rnding); + v[44] = half_btf_neon_mode11_r(&cospi[24], &u[44], &cospi[40], &u[51], + &v_bit, &rnding); + v[45] = half_btf_neon_mode11_r(&cospi[24], &u[45], &cospi[40], &u[50], + &v_bit, &rnding); + v[50] = half_btf_neon_mode10_r(&cospi[40], &u[45], &cospi[24], &u[50], + &v_bit, &rnding); + v[51] = half_btf_neon_mode10_r(&cospi[40], &u[44], &cospi[24], &u[51], + &v_bit, &rnding); + v[52] = half_btf_neon_r(&cospi[24], &u[43], &cospi[40], &u[52], &v_bit, + &rnding); + v[53] = half_btf_neon_r(&cospi[24], &u[42], &cospi[40], &u[53], &v_bit, + &rnding); + v[58] = half_btf_neon_mode10_r(&cospi[8], &u[37], &cospi[56], &u[58], + &v_bit, &rnding); + v[59] = half_btf_neon_mode10_r(&cospi[8], &u[36], &cospi[56], &u[59], + &v_bit, &rnding); + v[60] = + half_btf_neon_r(&cospi[56], &u[35], &cospi[8], &u[60], &v_bit, &rnding); + v[61] = + half_btf_neon_r(&cospi[56], &u[34], &cospi[8], &u[61], &v_bit, &rnding); + + // stage 7 + addsub_neon(v[0], v[3], &u[0], &u[3], &clamp_lo, &clamp_hi); + addsub_neon(v[1], v[2], &u[1], &u[2], &clamp_lo, &clamp_hi); + + u[4] = v[4]; + u[7] = v[7]; + u[5] = half_btf_neon_mode10_r(&cospi[32], &v[5], &cospi[32], &v[6], &v_bit, + &rnding); + u[6] = + half_btf_neon_r(&cospi[32], &v[5], &cospi[32], &v[6], &v_bit, &rnding); + + addsub_neon(v[8], v[11], &u[8], &u[11], &clamp_lo, &clamp_hi); + addsub_neon(v[9], v[10], &u[9], &u[10], &clamp_lo, &clamp_hi); + addsub_neon(v[15], v[12], &u[15], &u[12], &clamp_lo, &clamp_hi); + addsub_neon(v[14], v[13], &u[14], &u[13], &clamp_lo, &clamp_hi); + + for (i = 16; i < 32; i += 8) { + u[i + 0] = v[i + 0]; + u[i + 1] = v[i + 1]; + u[i + 6] = v[i + 6]; + u[i + 7] = v[i + 7]; + } + + u[18] = half_btf_neon_mode10_r(&cospi[16], &v[18], &cospi[48], &v[29], + &v_bit, &rnding); + u[19] = half_btf_neon_mode10_r(&cospi[16], &v[19], &cospi[48], &v[28], + &v_bit, &rnding); + u[20] = half_btf_neon_mode11_r(&cospi[48], &v[20], &cospi[16], &v[27], + &v_bit, &rnding); + u[21] = half_btf_neon_mode11_r(&cospi[48], &v[21], &cospi[16], &v[26], + &v_bit, &rnding); + u[26] = half_btf_neon_mode10_r(&cospi[16], &v[21], &cospi[48], &v[26], + &v_bit, &rnding); + u[27] = half_btf_neon_mode10_r(&cospi[16], &v[20], &cospi[48], &v[27], + &v_bit, &rnding); + u[28] = half_btf_neon_r(&cospi[48], &v[19], &cospi[16], &v[28], &v_bit, + &rnding); + u[29] = half_btf_neon_r(&cospi[48], &v[18], &cospi[16], &v[29], &v_bit, + &rnding); + + for (i = 32; i < 64; i += 16) { + for (j = i; j < i + 4; j++) { + addsub_neon(v[j], v[j ^ 7], &u[j], &u[j ^ 7], &clamp_lo, &clamp_hi); + addsub_neon(v[j ^ 15], v[j ^ 8], &u[j ^ 15], &u[j ^ 8], &clamp_lo, + &clamp_hi); + } + } + + // stage 8 + for (i = 0; i < 4; ++i) { + addsub_neon(u[i], u[7 - i], &v[i], &v[7 - i], &clamp_lo, &clamp_hi); + } + + v[8] = u[8]; + v[9] = u[9]; + v[14] = u[14]; + v[15] = u[15]; + + v[10] = half_btf_neon_mode10_r(&cospi[32], &u[10], &cospi[32], &u[13], + &v_bit, &rnding); + v[11] = half_btf_neon_mode10_r(&cospi[32], &u[11], &cospi[32], &u[12], + &v_bit, &rnding); + v[12] = half_btf_neon_r(&cospi[32], &u[11], &cospi[32], &u[12], &v_bit, + &rnding); + v[13] = half_btf_neon_r(&cospi[32], &u[10], &cospi[32], &u[13], &v_bit, + &rnding); + + for (i = 16; i < 20; ++i) { + addsub_neon(u[i], u[i ^ 7], &v[i], &v[i ^ 7], &clamp_lo, &clamp_hi); + addsub_neon(u[i ^ 15], u[i ^ 8], &v[i ^ 15], &v[i ^ 8], &clamp_lo, + &clamp_hi); + } + + for (i = 32; i < 36; ++i) { + v[i] = u[i]; + v[i + 12] = u[i + 12]; + v[i + 16] = u[i + 16]; + v[i + 28] = u[i + 28]; + } + + v[36] = half_btf_neon_mode10_r(&cospi[16], &u[36], &cospi[48], &u[59], + &v_bit, &rnding); + v[37] = half_btf_neon_mode10_r(&cospi[16], &u[37], &cospi[48], &u[58], + &v_bit, &rnding); + v[38] = half_btf_neon_mode10_r(&cospi[16], &u[38], &cospi[48], &u[57], + &v_bit, &rnding); + v[39] = half_btf_neon_mode10_r(&cospi[16], &u[39], &cospi[48], &u[56], + &v_bit, &rnding); + v[40] = half_btf_neon_mode11_r(&cospi[48], &u[40], &cospi[16], &u[55], + &v_bit, &rnding); + v[41] = half_btf_neon_mode11_r(&cospi[48], &u[41], &cospi[16], &u[54], + &v_bit, &rnding); + v[42] = half_btf_neon_mode11_r(&cospi[48], &u[42], &cospi[16], &u[53], + &v_bit, &rnding); + v[43] = half_btf_neon_mode11_r(&cospi[48], &u[43], &cospi[16], &u[52], + &v_bit, &rnding); + v[52] = half_btf_neon_mode10_r(&cospi[16], &u[43], &cospi[48], &u[52], + &v_bit, &rnding); + v[53] = half_btf_neon_mode10_r(&cospi[16], &u[42], &cospi[48], &u[53], + &v_bit, &rnding); + v[54] = half_btf_neon_mode10_r(&cospi[16], &u[41], &cospi[48], &u[54], + &v_bit, &rnding); + v[55] = half_btf_neon_mode10_r(&cospi[16], &u[40], &cospi[48], &u[55], + &v_bit, &rnding); + v[56] = half_btf_neon_r(&cospi[48], &u[39], &cospi[16], &u[56], &v_bit, + &rnding); + v[57] = half_btf_neon_r(&cospi[48], &u[38], &cospi[16], &u[57], &v_bit, + &rnding); + v[58] = half_btf_neon_r(&cospi[48], &u[37], &cospi[16], &u[58], &v_bit, + &rnding); + v[59] = half_btf_neon_r(&cospi[48], &u[36], &cospi[16], &u[59], &v_bit, + &rnding); + + // stage 9 + for (i = 0; i < 8; ++i) { + addsub_neon(v[i], v[15 - i], &u[i], &u[15 - i], &clamp_lo, &clamp_hi); + } + + for (i = 16; i < 20; ++i) { + u[i] = v[i]; + u[i + 12] = v[i + 12]; + } + + u[20] = half_btf_neon_mode10_r(&cospi[32], &v[20], &cospi[32], &v[27], + &v_bit, &rnding); + u[21] = half_btf_neon_mode10_r(&cospi[32], &v[21], &cospi[32], &v[26], + &v_bit, &rnding); + u[22] = half_btf_neon_mode10_r(&cospi[32], &v[22], &cospi[32], &v[25], + &v_bit, &rnding); + u[23] = half_btf_neon_mode10_r(&cospi[32], &v[23], &cospi[32], &v[24], + &v_bit, &rnding); + u[24] = half_btf_neon_r(&cospi[32], &v[23], &cospi[32], &v[24], &v_bit, + &rnding); + u[25] = half_btf_neon_r(&cospi[32], &v[22], &cospi[32], &v[25], &v_bit, + &rnding); + u[26] = half_btf_neon_r(&cospi[32], &v[21], &cospi[32], &v[26], &v_bit, + &rnding); + u[27] = half_btf_neon_r(&cospi[32], &v[20], &cospi[32], &v[27], &v_bit, + &rnding); + + for (i = 32; i < 40; i++) { + addsub_neon(v[i], v[i ^ 15], &u[i], &u[i ^ 15], &clamp_lo, &clamp_hi); + } + + for (i = 48; i < 56; i++) { + addsub_neon(v[i ^ 15], v[i], &u[i ^ 15], &u[i], &clamp_lo, &clamp_hi); + } + + // stage 10 + for (i = 0; i < 16; i++) { + addsub_neon(u[i], u[31 - i], &v[i], &v[31 - i], &clamp_lo, &clamp_hi); + } + + for (i = 32; i < 40; i++) v[i] = u[i]; + + v[40] = half_btf_neon_mode10_r(&cospi[32], &u[40], &cospi[32], &u[55], + &v_bit, &rnding); + v[41] = half_btf_neon_mode10_r(&cospi[32], &u[41], &cospi[32], &u[54], + &v_bit, &rnding); + v[42] = half_btf_neon_mode10_r(&cospi[32], &u[42], &cospi[32], &u[53], + &v_bit, &rnding); + v[43] = half_btf_neon_mode10_r(&cospi[32], &u[43], &cospi[32], &u[52], + &v_bit, &rnding); + v[44] = half_btf_neon_mode10_r(&cospi[32], &u[44], &cospi[32], &u[51], + &v_bit, &rnding); + v[45] = half_btf_neon_mode10_r(&cospi[32], &u[45], &cospi[32], &u[50], + &v_bit, &rnding); + v[46] = half_btf_neon_mode10_r(&cospi[32], &u[46], &cospi[32], &u[49], + &v_bit, &rnding); + v[47] = half_btf_neon_mode10_r(&cospi[32], &u[47], &cospi[32], &u[48], + &v_bit, &rnding); + v[48] = half_btf_neon_r(&cospi[32], &u[47], &cospi[32], &u[48], &v_bit, + &rnding); + v[49] = half_btf_neon_r(&cospi[32], &u[46], &cospi[32], &u[49], &v_bit, + &rnding); + v[50] = half_btf_neon_r(&cospi[32], &u[45], &cospi[32], &u[50], &v_bit, + &rnding); + v[51] = half_btf_neon_r(&cospi[32], &u[44], &cospi[32], &u[51], &v_bit, + &rnding); + v[52] = half_btf_neon_r(&cospi[32], &u[43], &cospi[32], &u[52], &v_bit, + &rnding); + v[53] = half_btf_neon_r(&cospi[32], &u[42], &cospi[32], &u[53], &v_bit, + &rnding); + v[54] = half_btf_neon_r(&cospi[32], &u[41], &cospi[32], &u[54], &v_bit, + &rnding); + v[55] = half_btf_neon_r(&cospi[32], &u[40], &cospi[32], &u[55], &v_bit, + &rnding); + + for (i = 56; i < 64; i++) v[i] = u[i]; + + // stage 11 + for (i = 0; i < 32; i++) { + addsub_neon(v[i], v[63 - i], &out[(i)], &out[(63 - i)], &clamp_lo, + &clamp_hi); + } + + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = + vdupq_n_s32((1 << (log_range_out - 1)) - 1); + for (i = 0; i < 64; i += 4) { + round_shift_4x4(out + i, out_shift); + highbd_clamp_s32_neon(out + i, out + i, &clamp_lo_out, &clamp_hi_out, + 4); + } + } + } +} + +static void idct32x32_low1_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t bf1; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + // stage 0-1 + bf1 = in[0]; + + // stage 2-5 + bf1 = half_btf_0_neon_r(&cospi[32], &bf1, &v_bit, &rnding); + + // stage 6-9 + if (do_cols) { + bf1 = vmaxq_s32(bf1, clamp_lo); + bf1 = vminq_s32(bf1, clamp_hi); + } else { + const int log_range_out = AOMMAX(16, bd + 6); + clamp_lo = vdupq_n_s32(-(1 << (log_range_out - 1))); + clamp_hi = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + if (out_shift != 0) { + bf1 = vrshlq_s32(bf1, vdupq_n_s32(-out_shift)); + } + } + + bf1 = vmaxq_s32(bf1, clamp_lo); + bf1 = vminq_s32(bf1, clamp_hi); + + for (int i = 0; i < 32; i++) out[i] = bf1; +} + +static void idct32x32_low8_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t bf1[32]; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + + // stage 0-1 + bf1[0] = in[0]; + bf1[4] = in[4]; + bf1[8] = in[2]; + bf1[12] = in[6]; + bf1[16] = in[1]; + bf1[20] = in[5]; + bf1[24] = in[3]; + bf1[28] = in[7]; + + // stage 2 + bf1[31] = half_btf_0_neon_r(&cospi[2], &bf1[16], &v_bit, &rnding); + bf1[16] = half_btf_0_neon_r(&cospi[62], &bf1[16], &v_bit, &rnding); + bf1[19] = half_btf_0_m_neon_r(&cospi[50], &bf1[28], &v_bit, &rnding); + bf1[28] = half_btf_0_neon_r(&cospi[14], &bf1[28], &v_bit, &rnding); + bf1[27] = half_btf_0_neon_r(&cospi[10], &bf1[20], &v_bit, &rnding); + bf1[20] = half_btf_0_neon_r(&cospi[54], &bf1[20], &v_bit, &rnding); + bf1[23] = half_btf_0_m_neon_r(&cospi[58], &bf1[24], &v_bit, &rnding); + bf1[24] = half_btf_0_neon_r(&cospi[6], &bf1[24], &v_bit, &rnding); + + // stage 3 + bf1[15] = half_btf_0_neon_r(&cospi[4], &bf1[8], &v_bit, &rnding); + bf1[8] = half_btf_0_neon_r(&cospi[60], &bf1[8], &v_bit, &rnding); + + bf1[11] = half_btf_0_m_neon_r(&cospi[52], &bf1[12], &v_bit, &rnding); + bf1[12] = half_btf_0_neon_r(&cospi[12], &bf1[12], &v_bit, &rnding); + bf1[17] = bf1[16]; + bf1[18] = bf1[19]; + bf1[21] = bf1[20]; + bf1[22] = bf1[23]; + bf1[25] = bf1[24]; + bf1[26] = bf1[27]; + bf1[29] = bf1[28]; + bf1[30] = bf1[31]; + + // stage 4 : + bf1[7] = half_btf_0_neon_r(&cospi[8], &bf1[4], &v_bit, &rnding); + bf1[4] = half_btf_0_neon_r(&cospi[56], &bf1[4], &v_bit, &rnding); + + bf1[9] = bf1[8]; + bf1[10] = bf1[11]; + bf1[13] = bf1[12]; + bf1[14] = bf1[15]; + + idct32_stage4_neon(bf1, cospi, &v_bit, &rnding); + + // stage 5 + bf1[0] = half_btf_0_neon_r(&cospi[32], &bf1[0], &v_bit, &rnding); + bf1[1] = bf1[0]; + bf1[5] = bf1[4]; + bf1[6] = bf1[7]; + + idct32_stage5_neon(bf1, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 6 + bf1[3] = bf1[0]; + bf1[2] = bf1[1]; + + idct32_stage6_neon(bf1, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 7 + idct32_stage7_neon(bf1, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 8 + idct32_stage8_neon(bf1, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 9 + idct32_stage9_neon(bf1, out, do_cols, bd, out_shift, &clamp_lo, &clamp_hi); +} + +static void idct32x32_low16_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t bf1[32]; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + + // stage 0-1 + + bf1[0] = in[0]; + bf1[2] = in[8]; + bf1[4] = in[4]; + bf1[6] = in[12]; + bf1[8] = in[2]; + bf1[10] = in[10]; + bf1[12] = in[6]; + bf1[14] = in[14]; + bf1[16] = in[1]; + bf1[18] = in[9]; + bf1[20] = in[5]; + bf1[22] = in[13]; + bf1[24] = in[3]; + bf1[26] = in[11]; + bf1[28] = in[7]; + bf1[30] = in[15]; + + // stage 2 + bf1[31] = half_btf_0_neon_r(&cospi[2], &bf1[16], &v_bit, &rnding); + bf1[16] = half_btf_0_neon_r(&cospi[62], &bf1[16], &v_bit, &rnding); + bf1[17] = half_btf_0_m_neon_r(&cospi[34], &bf1[30], &v_bit, &rnding); + bf1[30] = half_btf_0_neon_r(&cospi[30], &bf1[30], &v_bit, &rnding); + bf1[29] = half_btf_0_neon_r(&cospi[18], &bf1[18], &v_bit, &rnding); + bf1[18] = half_btf_0_neon_r(&cospi[46], &bf1[18], &v_bit, &rnding); + bf1[19] = half_btf_0_m_neon_r(&cospi[50], &bf1[28], &v_bit, &rnding); + bf1[28] = half_btf_0_neon_r(&cospi[14], &bf1[28], &v_bit, &rnding); + bf1[27] = half_btf_0_neon_r(&cospi[10], &bf1[20], &v_bit, &rnding); + bf1[20] = half_btf_0_neon_r(&cospi[54], &bf1[20], &v_bit, &rnding); + bf1[21] = half_btf_0_m_neon_r(&cospi[42], &bf1[26], &v_bit, &rnding); + bf1[26] = half_btf_0_neon_r(&cospi[22], &bf1[26], &v_bit, &rnding); + bf1[25] = half_btf_0_neon_r(&cospi[26], &bf1[22], &v_bit, &rnding); + bf1[22] = half_btf_0_neon_r(&cospi[38], &bf1[22], &v_bit, &rnding); + bf1[23] = half_btf_0_m_neon_r(&cospi[58], &bf1[24], &v_bit, &rnding); + bf1[24] = half_btf_0_neon_r(&cospi[6], &bf1[24], &v_bit, &rnding); + + // stage 3 + bf1[15] = half_btf_0_neon_r(&cospi[4], &bf1[8], &v_bit, &rnding); + bf1[8] = half_btf_0_neon_r(&cospi[60], &bf1[8], &v_bit, &rnding); + bf1[9] = half_btf_0_m_neon_r(&cospi[36], &bf1[14], &v_bit, &rnding); + bf1[14] = half_btf_0_neon_r(&cospi[28], &bf1[14], &v_bit, &rnding); + bf1[13] = half_btf_0_neon_r(&cospi[20], &bf1[10], &v_bit, &rnding); + bf1[10] = half_btf_0_neon_r(&cospi[44], &bf1[10], &v_bit, &rnding); + bf1[11] = half_btf_0_m_neon_r(&cospi[52], &bf1[12], &v_bit, &rnding); + bf1[12] = half_btf_0_neon_r(&cospi[12], &bf1[12], &v_bit, &rnding); + + addsub_neon(bf1[16], bf1[17], bf1 + 16, bf1 + 17, &clamp_lo, &clamp_hi); + addsub_neon(bf1[19], bf1[18], bf1 + 19, bf1 + 18, &clamp_lo, &clamp_hi); + addsub_neon(bf1[20], bf1[21], bf1 + 20, bf1 + 21, &clamp_lo, &clamp_hi); + addsub_neon(bf1[23], bf1[22], bf1 + 23, bf1 + 22, &clamp_lo, &clamp_hi); + addsub_neon(bf1[24], bf1[25], bf1 + 24, bf1 + 25, &clamp_lo, &clamp_hi); + addsub_neon(bf1[27], bf1[26], bf1 + 27, bf1 + 26, &clamp_lo, &clamp_hi); + addsub_neon(bf1[28], bf1[29], bf1 + 28, bf1 + 29, &clamp_lo, &clamp_hi); + addsub_neon(bf1[31], bf1[30], bf1 + 31, bf1 + 30, &clamp_lo, &clamp_hi); + // stage 4 + bf1[7] = half_btf_0_neon_r(&cospi[8], &bf1[4], &v_bit, &rnding); + bf1[4] = half_btf_0_neon_r(&cospi[56], &bf1[4], &v_bit, &rnding); + bf1[5] = half_btf_0_m_neon_r(&cospi[40], &bf1[6], &v_bit, &rnding); + bf1[6] = half_btf_0_neon_r(&cospi[24], &bf1[6], &v_bit, &rnding); + + addsub_neon(bf1[8], bf1[9], bf1 + 8, bf1 + 9, &clamp_lo, &clamp_hi); + addsub_neon(bf1[11], bf1[10], bf1 + 11, bf1 + 10, &clamp_lo, &clamp_hi); + addsub_neon(bf1[12], bf1[13], bf1 + 12, bf1 + 13, &clamp_lo, &clamp_hi); + addsub_neon(bf1[15], bf1[14], bf1 + 15, bf1 + 14, &clamp_lo, &clamp_hi); + + idct32_stage4_neon(bf1, cospi, &v_bit, &rnding); + + // stage 5 + bf1[0] = half_btf_0_neon_r(&cospi[32], &bf1[0], &v_bit, &rnding); + bf1[1] = bf1[0]; + bf1[3] = half_btf_0_neon_r(&cospi[16], &bf1[2], &v_bit, &rnding); + bf1[2] = half_btf_0_neon_r(&cospi[48], &bf1[2], &v_bit, &rnding); + + addsub_neon(bf1[4], bf1[5], bf1 + 4, bf1 + 5, &clamp_lo, &clamp_hi); + addsub_neon(bf1[7], bf1[6], bf1 + 7, bf1 + 6, &clamp_lo, &clamp_hi); + + idct32_stage5_neon(bf1, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 6 + addsub_neon(bf1[0], bf1[3], bf1 + 0, bf1 + 3, &clamp_lo, &clamp_hi); + addsub_neon(bf1[1], bf1[2], bf1 + 1, bf1 + 2, &clamp_lo, &clamp_hi); + + idct32_stage6_neon(bf1, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 7 + idct32_stage7_neon(bf1, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + + // stage 8 + idct32_stage8_neon(bf1, cospi, &clamp_lo, &clamp_hi, &v_bit, &rnding); + // stage 9 + idct32_stage9_neon(bf1, out, do_cols, bd, out_shift, &clamp_lo, &clamp_hi); +} + +static void idct32x32_neon(int32x4_t *in, int32x4_t *out, int bit, int do_cols, + int bd, int out_shift) { + const int32_t *cospi = cospi_arr(bit); + const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); + const int32x4_t clamp_lo = vdupq_n_s32(-(1 << (log_range - 1))); + const int32x4_t clamp_hi = vdupq_n_s32((1 << (log_range - 1)) - 1); + int32x4_t bf1[32], bf0[32]; + const int32x4_t v_bit = vdupq_n_s32(-bit); + const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); + // stage 0 + // stage 1 + bf1[0] = in[0]; + bf1[1] = in[16]; + bf1[2] = in[8]; + bf1[3] = in[24]; + bf1[4] = in[4]; + bf1[5] = in[20]; + bf1[6] = in[12]; + bf1[7] = in[28]; + bf1[8] = in[2]; + bf1[9] = in[18]; + bf1[10] = in[10]; + bf1[11] = in[26]; + bf1[12] = in[6]; + bf1[13] = in[22]; + bf1[14] = in[14]; + bf1[15] = in[30]; + bf1[16] = in[1]; + bf1[17] = in[17]; + bf1[18] = in[9]; + bf1[19] = in[25]; + bf1[20] = in[5]; + bf1[21] = in[21]; + bf1[22] = in[13]; + bf1[23] = in[29]; + bf1[24] = in[3]; + bf1[25] = in[19]; + bf1[26] = in[11]; + bf1[27] = in[27]; + bf1[28] = in[7]; + bf1[29] = in[23]; + bf1[30] = in[15]; + bf1[31] = in[31]; + + // stage 2 + for (int i = 0; i < 16; i++) bf0[i] = bf1[i]; + + bf0[16] = half_btf_neon_mode01_r(&cospi[62], &bf1[16], &cospi[2], &bf1[31], + &v_bit, &rnding); + bf0[17] = half_btf_neon_mode01_r(&cospi[30], &bf1[17], &cospi[34], &bf1[30], + &v_bit, &rnding); + bf0[18] = half_btf_neon_mode01_r(&cospi[46], &bf1[18], &cospi[18], &bf1[29], + &v_bit, &rnding); + bf0[19] = half_btf_neon_mode01_r(&cospi[14], &bf1[19], &cospi[50], &bf1[28], + &v_bit, &rnding); + bf0[20] = half_btf_neon_mode01_r(&cospi[54], &bf1[20], &cospi[10], &bf1[27], + &v_bit, &rnding); + bf0[21] = half_btf_neon_mode01_r(&cospi[22], &bf1[21], &cospi[42], &bf1[26], + &v_bit, &rnding); + bf0[22] = half_btf_neon_mode01_r(&cospi[38], &bf1[22], &cospi[26], &bf1[25], + &v_bit, &rnding); + bf0[23] = half_btf_neon_mode01_r(&cospi[6], &bf1[23], &cospi[58], &bf1[24], + &v_bit, &rnding); + bf0[24] = half_btf_neon_r(&cospi[58], &bf1[23], &cospi[6], &bf1[24], &v_bit, + &rnding); + bf0[25] = half_btf_neon_r(&cospi[26], &bf1[22], &cospi[38], &bf1[25], &v_bit, + &rnding); + bf0[26] = half_btf_neon_r(&cospi[42], &bf1[21], &cospi[22], &bf1[26], &v_bit, + &rnding); + bf0[27] = half_btf_neon_r(&cospi[10], &bf1[20], &cospi[54], &bf1[27], &v_bit, + &rnding); + bf0[28] = half_btf_neon_r(&cospi[50], &bf1[19], &cospi[14], &bf1[28], &v_bit, + &rnding); + bf0[29] = half_btf_neon_r(&cospi[18], &bf1[18], &cospi[46], &bf1[29], &v_bit, + &rnding); + bf0[30] = half_btf_neon_r(&cospi[34], &bf1[17], &cospi[30], &bf1[30], &v_bit, + &rnding); + bf0[31] = half_btf_neon_r(&cospi[2], &bf1[16], &cospi[62], &bf1[31], &v_bit, + &rnding); + + // stage 3 + for (int i = 0; i < 8; i++) bf1[i] = bf0[i]; + + bf1[8] = half_btf_neon_mode01_r(&cospi[60], &bf0[8], &cospi[4], &bf0[15], + &v_bit, &rnding); + bf1[9] = half_btf_neon_mode01_r(&cospi[28], &bf0[9], &cospi[36], &bf0[14], + &v_bit, &rnding); + bf1[10] = half_btf_neon_mode01_r(&cospi[44], &bf0[10], &cospi[20], &bf0[13], + &v_bit, &rnding); + bf1[11] = half_btf_neon_mode01_r(&cospi[12], &bf0[11], &cospi[52], &bf0[12], + &v_bit, &rnding); + bf1[12] = half_btf_neon_r(&cospi[52], &bf0[11], &cospi[12], &bf0[12], &v_bit, + &rnding); + bf1[13] = half_btf_neon_r(&cospi[20], &bf0[10], &cospi[44], &bf0[13], &v_bit, + &rnding); + bf1[14] = half_btf_neon_r(&cospi[36], &bf0[9], &cospi[28], &bf0[14], &v_bit, + &rnding); + bf1[15] = half_btf_neon_r(&cospi[4], &bf0[8], &cospi[60], &bf0[15], &v_bit, + &rnding); + + addsub_neon(bf0[16], bf0[17], bf1 + 16, bf1 + 17, &clamp_lo, &clamp_hi); + addsub_neon(bf0[19], bf0[18], bf1 + 19, bf1 + 18, &clamp_lo, &clamp_hi); + addsub_neon(bf0[20], bf0[21], bf1 + 20, bf1 + 21, &clamp_lo, &clamp_hi); + addsub_neon(bf0[23], bf0[22], bf1 + 23, bf1 + 22, &clamp_lo, &clamp_hi); + addsub_neon(bf0[24], bf0[25], bf1 + 24, bf1 + 25, &clamp_lo, &clamp_hi); + addsub_neon(bf0[27], bf0[26], bf1 + 27, bf1 + 26, &clamp_lo, &clamp_hi); + addsub_neon(bf0[28], bf0[29], bf1 + 28, bf1 + 29, &clamp_lo, &clamp_hi); + addsub_neon(bf0[31], bf0[30], bf1 + 31, bf1 + 30, &clamp_lo, &clamp_hi); + + // stage 4 + bf0[0] = bf1[0]; + bf0[1] = bf1[1]; + bf0[2] = bf1[2]; + bf0[3] = bf1[3]; + bf0[4] = half_btf_neon_mode01_r(&cospi[56], &bf1[4], &cospi[8], &bf1[7], + &v_bit, &rnding); + bf0[5] = half_btf_neon_mode01_r(&cospi[24], &bf1[5], &cospi[40], &bf1[6], + &v_bit, &rnding); + bf0[6] = half_btf_neon_r(&cospi[40], &bf1[5], &cospi[24], &bf1[6], &v_bit, + &rnding); + bf0[7] = + half_btf_neon_r(&cospi[8], &bf1[4], &cospi[56], &bf1[7], &v_bit, &rnding); + + addsub_neon(bf1[8], bf1[9], bf0 + 8, bf0 + 9, &clamp_lo, &clamp_hi); + addsub_neon(bf1[11], bf1[10], bf0 + 11, bf0 + 10, &clamp_lo, &clamp_hi); + addsub_neon(bf1[12], bf1[13], bf0 + 12, bf0 + 13, &clamp_lo, &clamp_hi); + addsub_neon(bf1[15], bf1[14], bf0 + 15, bf0 + 14, &clamp_lo, &clamp_hi); + + bf0[16] = bf1[16]; + bf0[17] = half_btf_neon_mode10_r(&cospi[8], &bf1[17], &cospi[56], &bf1[30], + &v_bit, &rnding); + bf0[18] = half_btf_neon_mode11_r(&cospi[56], &bf1[18], &cospi[8], &bf1[29], + &v_bit, &rnding); + bf0[19] = bf1[19]; + bf0[20] = bf1[20]; + bf0[21] = half_btf_neon_mode10_r(&cospi[40], &bf1[21], &cospi[24], &bf1[26], + &v_bit, &rnding); + bf0[22] = half_btf_neon_mode11_r(&cospi[24], &bf1[22], &cospi[40], &bf1[25], + &v_bit, &rnding); + bf0[23] = bf1[23]; + bf0[24] = bf1[24]; + bf0[25] = half_btf_neon_mode10_r(&cospi[40], &bf1[22], &cospi[24], &bf1[25], + &v_bit, &rnding); + bf0[26] = half_btf_neon_r(&cospi[24], &bf1[21], &cospi[40], &bf1[26], &v_bit, + &rnding); + bf0[27] = bf1[27]; + bf0[28] = bf1[28]; + bf0[29] = half_btf_neon_mode10_r(&cospi[8], &bf1[18], &cospi[56], &bf1[29], + &v_bit, &rnding); + bf0[30] = half_btf_neon_r(&cospi[56], &bf1[17], &cospi[8], &bf1[30], &v_bit, + &rnding); + bf0[31] = bf1[31]; + + // stage 5 + bf1[0] = half_btf_neon_r(&cospi[32], &bf0[0], &cospi[32], &bf0[1], &v_bit, + &rnding); + bf1[1] = half_btf_neon_mode01_r(&cospi[32], &bf0[0], &cospi[32], &bf0[1], + &v_bit, &rnding); + bf1[2] = half_btf_neon_mode01_r(&cospi[48], &bf0[2], &cospi[16], &bf0[3], + &v_bit, &rnding); + bf1[3] = half_btf_neon_r(&cospi[16], &bf0[2], &cospi[48], &bf0[3], &v_bit, + &rnding); + addsub_neon(bf0[4], bf0[5], bf1 + 4, bf1 + 5, &clamp_lo, &clamp_hi); + addsub_neon(bf0[7], bf0[6], bf1 + 7, bf1 + 6, &clamp_lo, &clamp_hi); + bf1[8] = bf0[8]; + bf1[9] = half_btf_neon_mode10_r(&cospi[16], &bf0[9], &cospi[48], &bf0[14], + &v_bit, &rnding); + bf1[10] = half_btf_neon_mode11_r(&cospi[48], &bf0[10], &cospi[16], &bf0[13], + &v_bit, &rnding); + bf1[11] = bf0[11]; + bf1[12] = bf0[12]; + bf1[13] = half_btf_neon_mode10_r(&cospi[16], &bf0[10], &cospi[48], &bf0[13], + &v_bit, &rnding); + bf1[14] = half_btf_neon_r(&cospi[48], &bf0[9], &cospi[16], &bf0[14], &v_bit, + &rnding); + bf1[15] = bf0[15]; + addsub_neon(bf0[16], bf0[19], bf1 + 16, bf1 + 19, &clamp_lo, &clamp_hi); + addsub_neon(bf0[17], bf0[18], bf1 + 17, bf1 + 18, &clamp_lo, &clamp_hi); + addsub_neon(bf0[23], bf0[20], bf1 + 23, bf1 + 20, &clamp_lo, &clamp_hi); + addsub_neon(bf0[22], bf0[21], bf1 + 22, bf1 + 21, &clamp_lo, &clamp_hi); + addsub_neon(bf0[24], bf0[27], bf1 + 24, bf1 + 27, &clamp_lo, &clamp_hi); + addsub_neon(bf0[25], bf0[26], bf1 + 25, bf1 + 26, &clamp_lo, &clamp_hi); + addsub_neon(bf0[31], bf0[28], bf1 + 31, bf1 + 28, &clamp_lo, &clamp_hi); + addsub_neon(bf0[30], bf0[29], bf1 + 30, bf1 + 29, &clamp_lo, &clamp_hi); + + // stage 6 + addsub_neon(bf1[0], bf1[3], bf0 + 0, bf0 + 3, &clamp_lo, &clamp_hi); + addsub_neon(bf1[1], bf1[2], bf0 + 1, bf0 + 2, &clamp_lo, &clamp_hi); + bf0[4] = bf1[4]; + bf0[5] = half_btf_neon_mode10_r(&cospi[32], &bf1[5], &cospi[32], &bf1[6], + &v_bit, &rnding); + bf0[6] = half_btf_neon_r(&cospi[32], &bf1[5], &cospi[32], &bf1[6], &v_bit, + &rnding); + bf0[7] = bf1[7]; + addsub_neon(bf1[8], bf1[11], bf0 + 8, bf0 + 11, &clamp_lo, &clamp_hi); + addsub_neon(bf1[9], bf1[10], bf0 + 9, bf0 + 10, &clamp_lo, &clamp_hi); + addsub_neon(bf1[15], bf1[12], bf0 + 15, bf0 + 12, &clamp_lo, &clamp_hi); + addsub_neon(bf1[14], bf1[13], bf0 + 14, bf0 + 13, &clamp_lo, &clamp_hi); + bf0[16] = bf1[16]; + bf0[17] = bf1[17]; + bf0[18] = half_btf_neon_mode10_r(&cospi[16], &bf1[18], &cospi[48], &bf1[29], + &v_bit, &rnding); + bf0[19] = half_btf_neon_mode10_r(&cospi[16], &bf1[19], &cospi[48], &bf1[28], + &v_bit, &rnding); + bf0[20] = half_btf_neon_mode11_r(&cospi[48], &bf1[20], &cospi[16], &bf1[27], + &v_bit, &rnding); + bf0[21] = half_btf_neon_mode11_r(&cospi[48], &bf1[21], &cospi[16], &bf1[26], + &v_bit, &rnding); + bf0[22] = bf1[22]; + bf0[23] = bf1[23]; + bf0[24] = bf1[24]; + bf0[25] = bf1[25]; + bf0[26] = half_btf_neon_mode10_r(&cospi[16], &bf1[21], &cospi[48], &bf1[26], + &v_bit, &rnding); + bf0[27] = half_btf_neon_mode10_r(&cospi[16], &bf1[20], &cospi[48], &bf1[27], + &v_bit, &rnding); + bf0[28] = half_btf_neon_r(&cospi[48], &bf1[19], &cospi[16], &bf1[28], &v_bit, + &rnding); + bf0[29] = half_btf_neon_r(&cospi[48], &bf1[18], &cospi[16], &bf1[29], &v_bit, + &rnding); + bf0[30] = bf1[30]; + bf0[31] = bf1[31]; + + // stage 7 + addsub_neon(bf0[0], bf0[7], bf1 + 0, bf1 + 7, &clamp_lo, &clamp_hi); + addsub_neon(bf0[1], bf0[6], bf1 + 1, bf1 + 6, &clamp_lo, &clamp_hi); + addsub_neon(bf0[2], bf0[5], bf1 + 2, bf1 + 5, &clamp_lo, &clamp_hi); + addsub_neon(bf0[3], bf0[4], bf1 + 3, bf1 + 4, &clamp_lo, &clamp_hi); + bf1[8] = bf0[8]; + bf1[9] = bf0[9]; + bf1[10] = half_btf_neon_mode10_r(&cospi[32], &bf0[10], &cospi[32], &bf0[13], + &v_bit, &rnding); + bf1[11] = half_btf_neon_mode10_r(&cospi[32], &bf0[11], &cospi[32], &bf0[12], + &v_bit, &rnding); + bf1[12] = half_btf_neon_r(&cospi[32], &bf0[11], &cospi[32], &bf0[12], &v_bit, + &rnding); + bf1[13] = half_btf_neon_r(&cospi[32], &bf0[10], &cospi[32], &bf0[13], &v_bit, + &rnding); + bf1[14] = bf0[14]; + bf1[15] = bf0[15]; + addsub_neon(bf0[16], bf0[23], bf1 + 16, bf1 + 23, &clamp_lo, &clamp_hi); + addsub_neon(bf0[17], bf0[22], bf1 + 17, bf1 + 22, &clamp_lo, &clamp_hi); + addsub_neon(bf0[18], bf0[21], bf1 + 18, bf1 + 21, &clamp_lo, &clamp_hi); + addsub_neon(bf0[19], bf0[20], bf1 + 19, bf1 + 20, &clamp_lo, &clamp_hi); + addsub_neon(bf0[31], bf0[24], bf1 + 31, bf1 + 24, &clamp_lo, &clamp_hi); + addsub_neon(bf0[30], bf0[25], bf1 + 30, bf1 + 25, &clamp_lo, &clamp_hi); + addsub_neon(bf0[29], bf0[26], bf1 + 29, bf1 + 26, &clamp_lo, &clamp_hi); + addsub_neon(bf0[28], bf0[27], bf1 + 28, bf1 + 27, &clamp_lo, &clamp_hi); + + // stage 8 + addsub_neon(bf1[0], bf1[15], bf0 + 0, bf0 + 15, &clamp_lo, &clamp_hi); + addsub_neon(bf1[1], bf1[14], bf0 + 1, bf0 + 14, &clamp_lo, &clamp_hi); + addsub_neon(bf1[2], bf1[13], bf0 + 2, bf0 + 13, &clamp_lo, &clamp_hi); + addsub_neon(bf1[3], bf1[12], bf0 + 3, bf0 + 12, &clamp_lo, &clamp_hi); + addsub_neon(bf1[4], bf1[11], bf0 + 4, bf0 + 11, &clamp_lo, &clamp_hi); + addsub_neon(bf1[5], bf1[10], bf0 + 5, bf0 + 10, &clamp_lo, &clamp_hi); + addsub_neon(bf1[6], bf1[9], bf0 + 6, bf0 + 9, &clamp_lo, &clamp_hi); + addsub_neon(bf1[7], bf1[8], bf0 + 7, bf0 + 8, &clamp_lo, &clamp_hi); + bf0[16] = bf1[16]; + bf0[17] = bf1[17]; + bf0[18] = bf1[18]; + bf0[19] = bf1[19]; + bf0[20] = half_btf_neon_mode10_r(&cospi[32], &bf1[20], &cospi[32], &bf1[27], + &v_bit, &rnding); + bf0[21] = half_btf_neon_mode10_r(&cospi[32], &bf1[21], &cospi[32], &bf1[26], + &v_bit, &rnding); + bf0[22] = half_btf_neon_mode10_r(&cospi[32], &bf1[22], &cospi[32], &bf1[25], + &v_bit, &rnding); + bf0[23] = half_btf_neon_mode10_r(&cospi[32], &bf1[23], &cospi[32], &bf1[24], + &v_bit, &rnding); + bf0[24] = half_btf_neon_r(&cospi[32], &bf1[23], &cospi[32], &bf1[24], &v_bit, + &rnding); + bf0[25] = half_btf_neon_r(&cospi[32], &bf1[22], &cospi[32], &bf1[25], &v_bit, + &rnding); + bf0[26] = half_btf_neon_r(&cospi[32], &bf1[21], &cospi[32], &bf1[26], &v_bit, + &rnding); + bf0[27] = half_btf_neon_r(&cospi[32], &bf1[20], &cospi[32], &bf1[27], &v_bit, + &rnding); + bf0[28] = bf1[28]; + bf0[29] = bf1[29]; + bf0[30] = bf1[30]; + bf0[31] = bf1[31]; + + // stage 9 + addsub_neon(bf0[0], bf0[31], out + 0, out + 31, &clamp_lo, &clamp_hi); + addsub_neon(bf0[1], bf0[30], out + 1, out + 30, &clamp_lo, &clamp_hi); + addsub_neon(bf0[2], bf0[29], out + 2, out + 29, &clamp_lo, &clamp_hi); + addsub_neon(bf0[3], bf0[28], out + 3, out + 28, &clamp_lo, &clamp_hi); + addsub_neon(bf0[4], bf0[27], out + 4, out + 27, &clamp_lo, &clamp_hi); + addsub_neon(bf0[5], bf0[26], out + 5, out + 26, &clamp_lo, &clamp_hi); + addsub_neon(bf0[6], bf0[25], out + 6, out + 25, &clamp_lo, &clamp_hi); + addsub_neon(bf0[7], bf0[24], out + 7, out + 24, &clamp_lo, &clamp_hi); + addsub_neon(bf0[8], bf0[23], out + 8, out + 23, &clamp_lo, &clamp_hi); + addsub_neon(bf0[9], bf0[22], out + 9, out + 22, &clamp_lo, &clamp_hi); + addsub_neon(bf0[10], bf0[21], out + 10, out + 21, &clamp_lo, &clamp_hi); + addsub_neon(bf0[11], bf0[20], out + 11, out + 20, &clamp_lo, &clamp_hi); + addsub_neon(bf0[12], bf0[19], out + 12, out + 19, &clamp_lo, &clamp_hi); + addsub_neon(bf0[13], bf0[18], out + 13, out + 18, &clamp_lo, &clamp_hi); + addsub_neon(bf0[14], bf0[17], out + 14, out + 17, &clamp_lo, &clamp_hi); + addsub_neon(bf0[15], bf0[16], out + 15, out + 16, &clamp_lo, &clamp_hi); + + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + round_shift_8x8(out, out_shift); + round_shift_8x8(out + 16, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo_out, &clamp_hi_out, 32); + } +} + +static void iidentity32_neon(int32x4_t *in, int32x4_t *out, int bit, + int do_cols, int bd, int out_shift) { + (void)bit; + for (int i = 0; i < 32; i += 16) { + out[i] = vshlq_n_s32(in[i], 2); + out[i + 1] = vshlq_n_s32(in[i + 1], 2); + out[i + 2] = vshlq_n_s32(in[i + 2], 2); + out[i + 3] = vshlq_n_s32(in[i + 3], 2); + out[i + 4] = vshlq_n_s32(in[i + 4], 2); + out[i + 5] = vshlq_n_s32(in[i + 5], 2); + out[i + 6] = vshlq_n_s32(in[i + 6], 2); + out[i + 7] = vshlq_n_s32(in[i + 7], 2); + out[i + 8] = vshlq_n_s32(in[i + 8], 2); + out[i + 9] = vshlq_n_s32(in[i + 9], 2); + out[i + 10] = vshlq_n_s32(in[i + 10], 2); + out[i + 11] = vshlq_n_s32(in[i + 11], 2); + out[i + 12] = vshlq_n_s32(in[i + 12], 2); + out[i + 13] = vshlq_n_s32(in[i + 13], 2); + out[i + 14] = vshlq_n_s32(in[i + 14], 2); + out[i + 15] = vshlq_n_s32(in[i + 15], 2); + } + + if (!do_cols) { + const int log_range_out = AOMMAX(16, bd + 6); + const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); + const int32x4_t clamp_hi_out = vdupq_n_s32((1 << (log_range_out - 1)) - 1); + round_shift_8x8(out, out_shift); + round_shift_8x8(out + 16, out_shift); + highbd_clamp_s32_neon(out, out, &clamp_lo_out, &clamp_hi_out, 32); + } +} + +// 1D itx types +typedef enum ATTRIBUTE_PACKED { + IDCT_1D, + IADST_1D, + IFLIPADST_1D = IADST_1D, + IIDENTITY_1D, + ITX_TYPES_1D, +} ITX_TYPE_1D; + +static const ITX_TYPE_1D vitx_1d_tab[TX_TYPES] = { + IDCT_1D, IADST_1D, IDCT_1D, IADST_1D, + IFLIPADST_1D, IDCT_1D, IFLIPADST_1D, IADST_1D, + IFLIPADST_1D, IIDENTITY_1D, IDCT_1D, IIDENTITY_1D, + IADST_1D, IIDENTITY_1D, IFLIPADST_1D, IIDENTITY_1D, +}; +static const ITX_TYPE_1D hitx_1d_tab[TX_TYPES] = { + IDCT_1D, IDCT_1D, IADST_1D, IADST_1D, + IDCT_1D, IFLIPADST_1D, IFLIPADST_1D, IFLIPADST_1D, + IADST_1D, IIDENTITY_1D, IIDENTITY_1D, IDCT_1D, + IIDENTITY_1D, IADST_1D, IIDENTITY_1D, IFLIPADST_1D, +}; + +static const transform_1d_neon + highbd_txfm_all_1d_zeros_w8_arr[TX_SIZES][ITX_TYPES_1D][4] = { + { + { idct4x4_neon, NULL, NULL, NULL }, + { iadst4x4_neon, NULL, NULL, NULL }, + { iidentity4_neon, iidentity4_neon, iidentity4_neon, NULL }, + }, + { { idct8x8_low1_neon, idct8x8_new_neon, NULL, NULL }, + { iadst8x8_low1_neon, iadst8x8_new_neon, NULL, NULL }, + { iidentity8_neon, iidentity8_neon, NULL, NULL } }, + { + { idct16x16_low1_neon, idct16x16_low8_neon, idct16x16_neon, NULL }, + { iadst16x16_low1_neon, iadst16x16_low8_neon, iadst16x16_neon, NULL }, + { iidentity16_neon, NULL, iidentity16_neon, NULL }, + }, + { { idct32x32_low1_neon, idct32x32_low8_neon, idct32x32_low16_neon, + idct32x32_neon }, + { NULL, NULL, NULL, NULL }, + { iidentity32_neon, NULL, NULL, NULL } }, + { { idct64x64_low1_neon, idct64x64_low8_neon, idct64x64_low16_neon, + idct64x64_neon }, + { NULL, NULL, NULL, NULL }, + { NULL, NULL, NULL, NULL } } + }; + +void av1_inv_txfm2d_add_4x8_neon(const tran_low_t *input, uint16_t *output, + int stride, TX_TYPE tx_type, const int bd) { + TX_SIZE tx_size = TX_4X8; + int32x4_t buf1[32] = { vdupq_n_s32(0) }; + + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][0]; + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][1]; + const int input_stride = AOMMIN(32, txfm_size_row); + + assert(col_txfm != NULL); + assert(row_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + // 1st stage: column transform + int32x4_t buf0[8]; + load_buffer_32bit_input(input, input_stride, buf0, txfm_size_col); + load_buffer_32bit_input(input + 4, input_stride, buf0 + 4, txfm_size_col); + round_shift_rect_array_32_neon(buf0, buf0, txfm_size_row); + row_txfm(buf0, buf0, INV_COS_BIT, 0, bd, -shift[0]); + row_txfm(buf0 + 4, buf0 + 4, INV_COS_BIT, 0, bd, -shift[0]); + + if (lr_flip) { + TRANSPOSE_4X4(buf0[3], buf0[2], buf0[1], buf0[0], buf1[0], buf1[1], buf1[2], + buf1[3]); + + TRANSPOSE_4X4(buf0[7], buf0[6], buf0[5], buf0[4], buf1[4], buf1[5], buf1[6], + buf1[7]); + } else { + TRANSPOSE_4X4(buf0[0], buf0[1], buf0[2], buf0[3], buf1[0], buf1[1], buf1[2], + buf1[3]); + + TRANSPOSE_4X4(buf0[4], buf0[5], buf0[6], buf0[7], buf1[4], buf1[5], buf1[6], + buf1[7]); + } + + // 2nd stage: column transform + col_txfm(buf1, buf1, INV_COS_BIT, 1, bd, 0); + + round_shift_array_32_neon(buf1, buf1, txfm_size_row, -shift[1]); + + // write to buffer + highbd_write_buffer_4xn_neon(buf1, output, stride, ud_flip, txfm_size_row, + bd); +} + +void av1_inv_txfm2d_add_8x4_neon(const int32_t *input, uint16_t *output, + int stride, TX_TYPE tx_type, const int bd) { + TX_SIZE tx_size = TX_8X4; + int32x4_t buf1[8]; + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][1]; + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][0]; + + assert(col_txfm != NULL); + assert(row_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + // 1st stage: column transform + int32x4_t buf0[8]; + const int32_t *input_row = input; + load_buffer_32bit_input(input_row, 4, buf0, txfm_size_col); + + round_shift_rect_array_32_neon(buf0, buf0, txfm_size_col); + row_txfm(buf0, buf0, INV_COS_BIT, 0, bd, -shift[0]); + + int32x4_t *buf1_ptr; + if (lr_flip) { + flip_buf_neon(buf0, buf1, txfm_size_col); + buf1_ptr = buf1; + } else { + buf1_ptr = buf0; + } + + // 2nd stage: column transform + for (int i = 0; i < 2; i++) { + int32x4_t *buf1_cur = buf1_ptr + i * txfm_size_row; + transpose_4x4(buf1_cur, buf1_cur); + col_txfm(buf1_cur, buf1_cur, INV_COS_BIT, 1, bd, 0); + } + round_shift_array_32_neon(buf1_ptr, buf1_ptr, txfm_size_col, -shift[1]); + // write to buffer + highbd_write_buffer_8xn_neon(buf1_ptr, output, stride, ud_flip, txfm_size_row, + bd); +} + +void av1_inv_txfm2d_add_4x16_neon(const int32_t *input, uint16_t *output, + int stride, TX_TYPE tx_type, const int bd) { + TX_SIZE tx_size = TX_4X16; + int32x4_t buf1[16]; + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_size_h_div8 = txfm_size_row >> 2; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][0]; + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][2]; + const int input_stride = AOMMIN(32, txfm_size_row); + + assert(col_txfm != NULL); + assert(row_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + // 1st stage: column transform + int32x4_t buf0[16]; + for (int i = 0; i < (txfm_size_row >> 2); i++) { + const int32_t *input_row = input + i * 4; + int32x4_t *buf0_cur = buf0 + i * 4; + load_buffer_32bit_input(input_row, input_stride, buf0_cur, txfm_size_col); + row_txfm(buf0 + (i << 2), buf0 + (i << 2), INV_COS_BIT, 0, bd, -shift[0]); + } + + if (lr_flip) { + for (int j = 0; j < buf_size_h_div8; ++j) { + TRANSPOSE_4X4(buf0[4 * j + 3], buf0[4 * j + 2], buf0[4 * j + 1], + buf0[4 * j], buf1[4 * j], buf1[4 * j + 1], buf1[4 * j + 2], + buf1[4 * j + 3]); + } + } else { + for (int j = 0; j < buf_size_h_div8; ++j) { + TRANSPOSE_4X4(buf0[4 * j], buf0[4 * j + 1], buf0[4 * j + 2], + buf0[4 * j + 3], buf1[4 * j], buf1[4 * j + 1], + buf1[4 * j + 2], buf1[4 * j + 3]); + } + } + + // 2nd stage: column transform + col_txfm(buf1, buf1, INV_COS_BIT, 1, bd, 0); + + round_shift_array_32_neon(buf1, buf1, txfm_size_row, -shift[1]); + + // write to buffer + highbd_write_buffer_4xn_neon(buf1, output, stride, ud_flip, txfm_size_row, + bd); +} + +void av1_inv_txfm2d_add_16x4_neon(const int32_t *input, uint16_t *output, + int stride, TX_TYPE tx_type, const int bd) { + TX_SIZE tx_size = TX_16X4; + int32x4_t buf1[16]; + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_size_w_div8 = txfm_size_col >> 2; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][2]; + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][0]; + + assert(col_txfm != NULL); + assert(row_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + // 1st stage: column transform + int32x4_t buf0[16]; + const int32_t *input_row = input; + load_buffer_32bit_input(input_row, 4, buf0, txfm_size_col); + + row_txfm(buf0, buf0, INV_COS_BIT, 0, bd, -shift[0]); + + int32x4_t *buf1_ptr; + if (lr_flip) { + flip_buf_neon(buf0, buf1, txfm_size_col); + buf1_ptr = buf1; + } else { + buf1_ptr = buf0; + } + + // 2nd stage: column transform + for (int i = 0; i < buf_size_w_div8; i++) { + int32x4_t *buf1_cur = buf1_ptr + i * txfm_size_row; + transpose_4x4(buf1_cur, buf1_cur); + col_txfm(buf1_cur, buf1_cur, INV_COS_BIT, 1, bd, 0); + } + round_shift_array_32_neon(buf1_ptr, buf1_ptr, txfm_size_col, -shift[1]); + + // write to buffer + for (int i = 0; i < (txfm_size_col >> 3); i++) { + highbd_write_buffer_8xn_neon(buf1_ptr + i * txfm_size_row * 2, + output + 8 * i, stride, ud_flip, txfm_size_row, + bd); + } +} + +static void highbd_inv_txfm2d_add_4x16_neon(const int32_t *input, + uint16_t *output, int stride, + TX_TYPE tx_type, int eob, + const int bd) { + (void)eob; + TX_SIZE tx_size = TX_4X16; + int32x4_t buf1[16]; + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_size_h_div8 = txfm_size_row >> 2; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][0]; + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][2]; + const int input_stride = AOMMIN(32, txfm_size_col); + + assert(col_txfm != NULL); + assert(row_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + // 1st stage: column transform + int32x4_t buf0[16]; + const int32_t *input_row = input; + int32x4_t *buf0_cur = buf0; + load_buffer_32bit_input(input_row, input_stride, buf0_cur, txfm_size_row); + for (int i = 0; i < (txfm_size_row >> 2); i++) { + row_txfm(buf0 + (i << 2), buf0 + (i << 2), INV_COS_BIT, 0, bd, -shift[0]); + } + + if (lr_flip) { + for (int j = 0; j < buf_size_h_div8; ++j) { + TRANSPOSE_4X4(buf0[4 * j + 3], buf0[4 * j + 2], buf0[4 * j + 1], + buf0[4 * j], buf1[4 * j], buf1[4 * j + 1], buf1[4 * j + 2], + buf1[4 * j + 3]); + } + } else { + for (int j = 0; j < buf_size_h_div8; ++j) { + TRANSPOSE_4X4(buf0[4 * j], buf0[4 * j + 1], buf0[4 * j + 2], + buf0[4 * j + 3], buf1[4 * j], buf1[4 * j + 1], + buf1[4 * j + 2], buf1[4 * j + 3]); + } + } + + // 2nd stage: column transform + col_txfm(buf1, buf1, INV_COS_BIT, 1, bd, 0); + + round_shift_array_32_neon(buf1, buf1, txfm_size_row, -shift[1]); + + // write to buffer + highbd_write_buffer_4xn_neon(buf1, output, stride, ud_flip, txfm_size_row, + bd); +} + +static void highbd_inv_txfm2d_add_16x4_neon(const int32_t *input, + uint16_t *output, int stride, + TX_TYPE tx_type, int eob, + const int bd) { + (void)eob; + TX_SIZE tx_size = TX_16X4; + int32x4_t buf1[16]; + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_size_w_div8 = txfm_size_col >> 2; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][2]; + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][0]; + + assert(col_txfm != NULL); + assert(row_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + // 1st stage: column transform + int32x4_t buf0[16]; + const int32_t *input_row = input; + load_buffer_32bit_input(input_row, 4, buf0, txfm_size_col); + + for (int j = 0; j < buf_size_w_div8; j++) { + TRANSPOSE_4X4(buf0[j], buf0[j + 4], buf0[j + 8], buf0[j + 12], buf1[4 * j], + buf1[4 * j + 1], buf1[4 * j + 2], buf1[4 * j + 3]); + } + row_txfm(buf1, buf0, INV_COS_BIT, 0, bd, -shift[0]); + + int32x4_t *buf1_ptr; + if (lr_flip) { + flip_buf_neon(buf0, buf1, txfm_size_col); + buf1_ptr = buf1; + } else { + buf1_ptr = buf0; + } + + // 2nd stage: column transform + for (int i = 0; i < buf_size_w_div8; i++) { + col_txfm(buf1_ptr + i * txfm_size_row, buf1_ptr + i * txfm_size_row, + INV_COS_BIT, 1, bd, 0); + } + round_shift_array_32_neon(buf1_ptr, buf1_ptr, txfm_size_col, -shift[1]); + + // write to buffer + for (int i = 0; i < (txfm_size_col >> 3); i++) { + highbd_write_buffer_8xn_neon(buf1_ptr + i * txfm_size_row * 2, + output + 8 * i, stride, ud_flip, txfm_size_row, + bd); + } +} + +static const int lowbd_txfm_all_1d_zeros_idx[32] = { + 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, +}; + +// Transform block width in log2 for eob (size of 64 map to 32) +static const int tx_size_wide_log2_eob[TX_SIZES_ALL] = { + 2, 3, 4, 5, 5, 2, 3, 3, 4, 4, 5, 5, 5, 2, 4, 3, 5, 4, 5, +}; + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_8x8_default[8]) = { + 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, +}; + +DECLARE_ALIGNED(16, static const int16_t, + av1_eob_to_eobxy_16x16_default[16]) = { + 0x0707, 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, + 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, +}; + +DECLARE_ALIGNED(16, static const int16_t, + av1_eob_to_eobxy_32x32_default[32]) = { + 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, + 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, + 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, + 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, 0x1f1f, +}; + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_8x16_default[16]) = { + 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0f07, 0x0f07, 0x0f07, + 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, +}; + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_16x8_default[8]) = { + 0x0707, 0x0707, 0x070f, 0x070f, 0x070f, 0x070f, 0x070f, 0x070f, +}; + +DECLARE_ALIGNED(16, static const int16_t, + av1_eob_to_eobxy_16x32_default[32]) = { + 0x0707, 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f0f, + 0x0f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, + 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, + 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, 0x1f0f, +}; + +DECLARE_ALIGNED(16, static const int16_t, + av1_eob_to_eobxy_32x16_default[16]) = { + 0x0707, 0x0f0f, 0x0f0f, 0x0f0f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, + 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, 0x0f1f, +}; + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_8x32_default[32]) = { + 0x0707, 0x0707, 0x0707, 0x0707, 0x0707, 0x0f07, 0x0f07, 0x0f07, + 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x0f07, 0x1f07, 0x1f07, 0x1f07, + 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, + 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, 0x1f07, +}; + +DECLARE_ALIGNED(16, static const int16_t, av1_eob_to_eobxy_32x8_default[8]) = { + 0x0707, 0x070f, 0x070f, 0x071f, 0x071f, 0x071f, 0x071f, 0x071f, +}; + +DECLARE_ALIGNED(16, static const int16_t *, + av1_eob_to_eobxy_default[TX_SIZES_ALL]) = { + NULL, + av1_eob_to_eobxy_8x8_default, + av1_eob_to_eobxy_16x16_default, + av1_eob_to_eobxy_32x32_default, + av1_eob_to_eobxy_32x32_default, + NULL, + NULL, + av1_eob_to_eobxy_8x16_default, + av1_eob_to_eobxy_16x8_default, + av1_eob_to_eobxy_16x32_default, + av1_eob_to_eobxy_32x16_default, + av1_eob_to_eobxy_32x32_default, + av1_eob_to_eobxy_32x32_default, + NULL, + NULL, + av1_eob_to_eobxy_8x32_default, + av1_eob_to_eobxy_32x8_default, + av1_eob_to_eobxy_16x32_default, + av1_eob_to_eobxy_32x16_default, +}; + +static INLINE void highbd_get_eobx_eoby_scan_default(int *eobx, int *eoby, + TX_SIZE tx_size, int eob) { + if (eob == 1) { + *eobx = 0; + *eoby = 0; + return; + } + + const int tx_w_log2 = tx_size_wide_log2_eob[tx_size]; + const int eob_row = (eob - 1) >> tx_w_log2; + const int eobxy = av1_eob_to_eobxy_default[tx_size][eob_row]; + *eobx = eobxy & 0xFF; + *eoby = eobxy >> 8; +} + +static INLINE void get_eobx_eoby_scan_default(int *eobx, int *eoby, + TX_SIZE tx_size) { + if (tx_size == 2) { + *eoby = 15, *eobx = 15; + } else if (tx_size == 3) { + *eoby = 31, *eobx = 31; + } else if (tx_size == 4) { + *eoby = 31, *eobx = 31; + } else if (tx_size == 7) { + *eoby = 15, *eobx = 7; + } else if (tx_size == 8) { + *eoby = 7, *eobx = 15; + } else if (tx_size == 9) { + *eoby = 31, *eobx = 15; + } else if (tx_size == 10) { + *eoby = 15, *eobx = 31; + } else if (tx_size == 11) { + *eoby = 31, *eobx = 31; + } else if (tx_size == 12) { + *eoby = 31, *eobx = 31; + } else if (tx_size == 15) { + *eoby = 31, *eobx = 7; + } else if (tx_size == 16) { + *eoby = 7, *eobx = 31; + } else if (tx_size == 17) { + *eoby = 31, *eobx = 15; + } else if (tx_size == 18) { + *eoby = 15, *eobx = 31; + } else { + *eoby = 0, *eobx = 0; + } +} + +static INLINE void get_eobx_eoby_scan_v_identity(int *eobx, int *eoby, + TX_SIZE tx_size) { + const int txfm_size_row = tx_size_high[tx_size]; + *eoby = AOMMIN(32, txfm_size_row) - 1; + *eobx = 0; +} + +static INLINE void get_eobx_eoby_scan_h_identity(int *eobx, int *eoby, + TX_SIZE tx_size) { + const int txfm_size_col = tx_size_wide[tx_size]; + *eobx = AOMMIN(32, txfm_size_col) - 1; + *eoby = 0; +} + +static void inv_txfm2d_add_h_identity_neon(const int32_t *input, + uint16_t *output, int stride, + TX_TYPE tx_type, TX_SIZE tx_size, + const int bd) { + int32x4_t buf1[64]; + int eobx, eoby; + get_eobx_eoby_scan_v_identity(&eobx, &eoby, tx_size); + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_size_w = AOMMIN(32, txfm_size_col); + const int buf_size_w_div4 = buf_size_w >> 2; + const int buf_size_h_div8 = (eoby + 8) >> 3; + const int row_max = AOMMIN(32, txfm_size_row); + const int input_stride = row_max; + const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); + const int fun_idx = lowbd_txfm_all_1d_zeros_idx[eoby]; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][0]; + assert(row_txfm != NULL); + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx]; + assert(col_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < (buf_size_h_div8 << 1); ++i) { + int32x4_t buf0[16]; + load_buffer_32bit_input(input + i * 4, input_stride, buf0, buf_size_w); + if (rect_type == 1 || rect_type == -1) { + round_shift_rect_array_32_neon(buf0, buf0, buf_size_w); + } + row_txfm(buf0, buf0, INV_COS_BIT, 0, bd, -shift[0]); + + int32x4_t *_buf1 = buf1 + i * 4; + + for (int j = 0; j < buf_size_w_div4; ++j) { + int32x4_t *buf0_cur = buf0 + j * 4; + TRANSPOSE_4X4(buf0_cur[0], buf0_cur[1], buf0_cur[2], buf0_cur[3], + buf0_cur[0], buf0_cur[1], buf0_cur[2], buf0_cur[3]); + _buf1[j * txfm_size_row + 0] = buf0_cur[0]; + _buf1[j * txfm_size_row + 1] = buf0_cur[1]; + _buf1[j * txfm_size_row + 2] = buf0_cur[2]; + _buf1[j * txfm_size_row + 3] = buf0_cur[3]; + } + } + for (int i = 0; i < buf_size_w_div4; i++) { + col_txfm(buf1 + i * txfm_size_row, buf1 + i * txfm_size_row, INV_COS_BIT, 1, + bd, 0); + + round_shift_array_32_neon(buf1 + i * txfm_size_row, + buf1 + i * txfm_size_row, txfm_size_row, + -shift[1]); + } + + // write to buffer + for (int i = 0; i < (txfm_size_col >> 3); i++) { + highbd_write_buffer_8xn_neon(buf1 + i * txfm_size_row * 2, output + 8 * i, + stride, ud_flip, txfm_size_row, bd); + } +} + +static void inv_txfm2d_add_v_identity_neon(const int32_t *input, + uint16_t *output, int stride, + TX_TYPE tx_type, TX_SIZE tx_size, + const int bd) { + int32x4_t buf1[64]; + int eobx, eoby; + get_eobx_eoby_scan_h_identity(&eobx, &eoby, tx_size); + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_size_w_div4 = AOMMIN(32, txfm_size_col) >> 2; + const int row_max = AOMMIN(32, txfm_size_row); + const int input_stride = row_max; + const int buf_size_nonzero_w_div8 = (eobx + 8) >> 3; + const int buf_size_nonzero_w = buf_size_nonzero_w_div8 << 3; + const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); + const int fun_idx = lowbd_txfm_all_1d_zeros_idx[eobx]; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx]; + assert(row_txfm != NULL); + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][0]; + assert(col_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + + for (int i = 0; i < (row_max >> 2); ++i) { + int32x4_t buf0[16]; + load_buffer_32bit_input(input + i * 4, input_stride, buf0, + buf_size_nonzero_w); + if (rect_type == 1 || rect_type == -1) { + round_shift_rect_array_32_neon(buf0, buf0, buf_size_nonzero_w); + } + row_txfm(buf0, buf0, INV_COS_BIT, 0, bd, -shift[0]); + + int32x4_t *_buf1 = buf1 + i * 4; + if (lr_flip) { + for (int j = 0; j < buf_size_w_div4; ++j) { + TRANSPOSE_4X4(buf0[4 * j + 3], buf0[4 * j + 2], buf0[4 * j + 1], + buf0[4 * j], + _buf1[txfm_size_row * (buf_size_w_div4 - 1 - j) + 0], + _buf1[txfm_size_row * (buf_size_w_div4 - 1 - j) + 1], + _buf1[txfm_size_row * (buf_size_w_div4 - 1 - j) + 2], + _buf1[txfm_size_row * (buf_size_w_div4 - 1 - j) + 3]); + } + } else { + for (int j = 0; j < buf_size_w_div4; ++j) { + TRANSPOSE_4X4( + buf0[j * 4 + 0], buf0[j * 4 + 1], buf0[j * 4 + 2], buf0[j * 4 + 3], + _buf1[j * txfm_size_row + 0], _buf1[j * txfm_size_row + 1], + _buf1[j * txfm_size_row + 2], _buf1[j * txfm_size_row + 3]); + } + } + } + for (int i = 0; i < buf_size_w_div4; i++) { + col_txfm(buf1 + i * txfm_size_row, buf1 + i * txfm_size_row, INV_COS_BIT, 1, + bd, 0); + + round_shift_array_32_neon(buf1 + i * txfm_size_row, + buf1 + i * txfm_size_row, txfm_size_row, + -shift[1]); + } + + // write to buffer + { + for (int i = 0; i < (txfm_size_col >> 3); i++) { + highbd_write_buffer_8xn_neon(buf1 + i * txfm_size_row * 2, output + 8 * i, + stride, ud_flip, txfm_size_row, bd); + } + } +} + +static void inv_txfm2d_add_idtx_neon(const int32_t *input, uint16_t *output, + int stride, TX_TYPE tx_type, + TX_SIZE tx_size, const int bd) { + int32x4_t buf1[64 * 4]; + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int row_max = AOMMIN(32, txfm_size_row); + const int input_stride = row_max; + const int buf_size_w = AOMMIN(32, txfm_size_col); + const int buf_size_w_div4 = buf_size_w >> 2; + const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][0]; + assert(row_txfm != NULL); + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][0]; + assert(col_txfm != NULL); + for (int i = 0; i < (row_max >> 2); ++i) { + int32x4_t buf0[32]; + load_buffer_32bit_input(input + i * 4, input_stride, buf0, buf_size_w); + if (rect_type == 1 || rect_type == -1) { + round_shift_rect_array_32_neon(buf0, buf0, buf_size_w); + } + row_txfm(buf0, buf0, INV_COS_BIT, 0, bd, -shift[0]); + + int32x4_t *_buf1 = buf1 + i * 4; + for (int j = 0; j < buf_size_w_div4; ++j) { + int32x4_t *buf0_cur = buf0 + j * 4; + TRANSPOSE_4X4(buf0_cur[0], buf0_cur[1], buf0_cur[2], buf0_cur[3], + buf0_cur[0], buf0_cur[1], buf0_cur[2], buf0_cur[3]); + _buf1[j * txfm_size_row + 0] = buf0_cur[0]; + _buf1[j * txfm_size_row + 1] = buf0_cur[1]; + _buf1[j * txfm_size_row + 2] = buf0_cur[2]; + _buf1[j * txfm_size_row + 3] = buf0_cur[3]; + } + } + for (int i = 0; i < buf_size_w_div4; i++) { + col_txfm(buf1 + i * txfm_size_row, buf1 + i * txfm_size_row, INV_COS_BIT, 1, + bd, 0); + + round_shift_array_32_neon(buf1 + i * txfm_size_row, + buf1 + i * txfm_size_row, txfm_size_row, + -shift[1]); + } + + // write to buffer + { + for (int i = 0; i < (txfm_size_col >> 3); i++) { + highbd_write_buffer_8xn_neon(buf1 + i * txfm_size_row * 2, output + 8 * i, + stride, 0, txfm_size_row, bd); + } + } +} + +static void inv_txfm2d_add_no_identity_neon(const int32_t *input, + uint16_t *output, int stride, + TX_TYPE tx_type, TX_SIZE tx_size, + const int bd) { + int32x4_t buf1[64 * 16]; + int eobx, eoby; + get_eobx_eoby_scan_default(&eobx, &eoby, tx_size); + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_size_w_div4 = txfm_size_col >> 2; + const int buf_size_nonzero_w = (eobx + 8) >> 3 << 3; + const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3; + const int input_stride = AOMMIN(32, txfm_size_row); + const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); + + const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx]; + const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby]; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x]; + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y]; + + assert(col_txfm != NULL); + assert(row_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + // 1st stage: column transform + for (int i = 0; i < buf_size_nonzero_h_div8 << 1; i++) { + int32x4_t buf0[64]; + load_buffer_32bit_input(input + i * 4, input_stride, buf0, + buf_size_nonzero_w); + if (rect_type == 1 || rect_type == -1) { + round_shift_rect_array_32_neon(buf0, buf0, buf_size_nonzero_w); + } + row_txfm(buf0, buf0, INV_COS_BIT, 0, bd, -shift[0]); + + int32x4_t *_buf1 = &buf1[i * 4]; + + if (lr_flip) { + for (int j = 0; j < buf_size_w_div4; ++j) { + TRANSPOSE_4X4(buf0[4 * j + 3], buf0[4 * j + 2], buf0[4 * j + 1], + buf0[4 * j], + _buf1[txfm_size_row * (buf_size_w_div4 - 1 - j) + 0], + _buf1[txfm_size_row * (buf_size_w_div4 - 1 - j) + 1], + _buf1[txfm_size_row * (buf_size_w_div4 - 1 - j) + 2], + _buf1[txfm_size_row * (buf_size_w_div4 - 1 - j) + 3]); + } + } else { + for (int j = 0; j < buf_size_w_div4; ++j) { + TRANSPOSE_4X4( + buf0[j * 4 + 0], buf0[j * 4 + 1], buf0[j * 4 + 2], buf0[j * 4 + 3], + _buf1[j * txfm_size_row + 0], _buf1[j * txfm_size_row + 1], + _buf1[j * txfm_size_row + 2], _buf1[j * txfm_size_row + 3]); + } + } + } + // 2nd stage: column transform + for (int i = 0; i < buf_size_w_div4; i++) { + col_txfm(buf1 + i * txfm_size_row, buf1 + i * txfm_size_row, INV_COS_BIT, 1, + bd, 0); + + round_shift_array_32_neon(buf1 + i * txfm_size_row, + buf1 + i * txfm_size_row, txfm_size_row, + -shift[1]); + } + + // write to buffer + { + for (int i = 0; i < (txfm_size_col >> 3); i++) { + highbd_write_buffer_8xn_neon(buf1 + i * txfm_size_row * 2, output + 8 * i, + stride, ud_flip, txfm_size_row, bd); + } + } +} + +static void highbd_inv_txfm2d_add_no_identity_neon(const int32_t *input, + uint16_t *output, int stride, + TX_TYPE tx_type, + TX_SIZE tx_size, int eob, + const int bd) { + int32x4_t buf1[64 * 16]; + int eobx, eoby; + highbd_get_eobx_eoby_scan_default(&eobx, &eoby, tx_size, eob); + const int8_t *shift = av1_inv_txfm_shift_ls[tx_size]; + const int txw_idx = get_txw_idx(tx_size); + const int txh_idx = get_txh_idx(tx_size); + const int txfm_size_col = tx_size_wide[tx_size]; + const int txfm_size_row = tx_size_high[tx_size]; + const int buf_size_w_div8 = txfm_size_col >> 2; + const int buf_size_nonzero_w_div8 = (eobx + 8) >> 3; + const int buf_size_nonzero_h_div8 = (eoby + 8) >> 3; + const int input_stride = AOMMIN(32, txfm_size_col); + const int rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); + + const int fun_idx_x = lowbd_txfm_all_1d_zeros_idx[eobx]; + const int fun_idx_y = lowbd_txfm_all_1d_zeros_idx[eoby]; + const transform_1d_neon row_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txw_idx][hitx_1d_tab[tx_type]][fun_idx_x]; + const transform_1d_neon col_txfm = + highbd_txfm_all_1d_zeros_w8_arr[txh_idx][vitx_1d_tab[tx_type]][fun_idx_y]; + + assert(col_txfm != NULL); + assert(row_txfm != NULL); + int ud_flip, lr_flip; + get_flip_cfg(tx_type, &ud_flip, &lr_flip); + // 1st stage: column transform + for (int i = 0; i < buf_size_nonzero_h_div8 << 1; i++) { + int32x4_t buf0[64]; + const int32_t *input_row = input + i * input_stride * 4; + for (int j = 0; j < buf_size_nonzero_w_div8 << 1; ++j) { + int32x4_t *buf0_cur = &buf0[j * 4]; + load_buffer_32bit_input(input_row + j * 4, input_stride, buf0_cur, 4); + + TRANSPOSE_4X4(buf0_cur[0], buf0_cur[1], buf0_cur[2], buf0_cur[3], + buf0_cur[0], buf0_cur[1], buf0_cur[2], buf0_cur[3]); + } + if (rect_type == 1 || rect_type == -1) { + round_shift_rect_array_32_neon(buf0, buf0, buf_size_nonzero_w_div8 << 3); + } + row_txfm(buf0, buf0, INV_COS_BIT, 0, bd, -shift[0]); + + int32x4_t *_buf1 = &buf1[i * 4]; + + if (lr_flip) { + for (int j = 0; j < buf_size_w_div8; ++j) { + TRANSPOSE_4X4(buf0[4 * j + 3], buf0[4 * j + 2], buf0[4 * j + 1], + buf0[4 * j], + _buf1[txfm_size_row * (buf_size_w_div8 - 1 - j) + 0], + _buf1[txfm_size_row * (buf_size_w_div8 - 1 - j) + 1], + _buf1[txfm_size_row * (buf_size_w_div8 - 1 - j) + 2], + _buf1[txfm_size_row * (buf_size_w_div8 - 1 - j) + 3]); + } + } else { + for (int j = 0; j < buf_size_w_div8; ++j) { + TRANSPOSE_4X4( + buf0[j * 4 + 0], buf0[j * 4 + 1], buf0[j * 4 + 2], buf0[j * 4 + 3], + _buf1[j * txfm_size_row + 0], _buf1[j * txfm_size_row + 1], + _buf1[j * txfm_size_row + 2], _buf1[j * txfm_size_row + 3]); + } + } + } + // 2nd stage: column transform + for (int i = 0; i < buf_size_w_div8; i++) { + col_txfm(buf1 + i * txfm_size_row, buf1 + i * txfm_size_row, INV_COS_BIT, 1, + bd, 0); + + round_shift_array_32_neon(buf1 + i * txfm_size_row, + buf1 + i * txfm_size_row, txfm_size_row, + -shift[1]); + } + + // write to buffer + { + for (int i = 0; i < (txfm_size_col >> 3); i++) { + highbd_write_buffer_8xn_neon(buf1 + i * txfm_size_row * 2, output + 8 * i, + stride, ud_flip, txfm_size_row, bd); + } + } +} + +static void highbd_inv_txfm2d_add_universe_neon(const int32_t *input, + uint8_t *output, int stride, + TX_TYPE tx_type, + TX_SIZE tx_size, int eob, + const int bd) { + switch (tx_type) { + case DCT_DCT: + case ADST_DCT: + case DCT_ADST: + case ADST_ADST: + case FLIPADST_DCT: + case DCT_FLIPADST: + case FLIPADST_FLIPADST: + case ADST_FLIPADST: + case FLIPADST_ADST: + highbd_inv_txfm2d_add_no_identity_neon(input, CONVERT_TO_SHORTPTR(output), + stride, tx_type, tx_size, eob, bd); + break; + case V_DCT: + case V_ADST: + case V_FLIPADST: + inv_txfm2d_add_h_identity_neon(input, CONVERT_TO_SHORTPTR(output), stride, + tx_type, tx_size, bd); + break; + case H_DCT: + case H_ADST: + case H_FLIPADST: + inv_txfm2d_add_v_identity_neon(input, CONVERT_TO_SHORTPTR(output), stride, + tx_type, tx_size, bd); + break; + case IDTX: + inv_txfm2d_add_idtx_neon(input, CONVERT_TO_SHORTPTR(output), stride, + tx_type, tx_size, bd); + break; + default: assert(0); break; + } +} + +static void inv_txfm2d_add_universe_neon(const int32_t *input, uint8_t *output, + int stride, TX_TYPE tx_type, + TX_SIZE tx_size, const int bd) { + switch (tx_type) { + case DCT_DCT: + case ADST_DCT: + case DCT_ADST: + case ADST_ADST: + case FLIPADST_DCT: + case DCT_FLIPADST: + case FLIPADST_FLIPADST: + case ADST_FLIPADST: + case FLIPADST_ADST: + inv_txfm2d_add_no_identity_neon(input, CONVERT_TO_SHORTPTR(output), + stride, tx_type, tx_size, bd); + break; + case V_DCT: + case V_ADST: + case V_FLIPADST: + inv_txfm2d_add_h_identity_neon(input, CONVERT_TO_SHORTPTR(output), stride, + tx_type, tx_size, bd); + break; + case H_DCT: + case H_ADST: + case H_FLIPADST: + inv_txfm2d_add_v_identity_neon(input, CONVERT_TO_SHORTPTR(output), stride, + tx_type, tx_size, bd); + break; + case IDTX: + inv_txfm2d_add_idtx_neon(input, CONVERT_TO_SHORTPTR(output), stride, + tx_type, tx_size, bd); + break; + default: assert(0); break; + } +} + +void av1_highbd_inv_txfm_add_8x8_neon(const tran_low_t *input, uint8_t *dest, + int stride, const TxfmParam *txfm_param) { + int bd = txfm_param->bd; + const TX_TYPE tx_type = txfm_param->tx_type; + const int32_t *src = cast_to_int32(input); + switch (tx_type) { + case IDTX: + case H_DCT: + case H_ADST: + case H_FLIPADST: + case V_DCT: + case V_ADST: + case V_FLIPADST: + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, tx_type, + txfm_param->tx_size, txfm_param->eob, + bd); + break; + default: + av1_inv_txfm2d_add_8x8_neon(src, CONVERT_TO_SHORTPTR(dest), stride, + tx_type, bd); + break; + } +} + +void av1_highbd_inv_txfm_add_4x4_neon(const tran_low_t *input, uint8_t *dest, + int stride, const TxfmParam *txfm_param) { + assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]); + int eob = txfm_param->eob; + int bd = txfm_param->bd; + int lossless = txfm_param->lossless; + const int32_t *src = cast_to_int32(input); + const TX_TYPE tx_type = txfm_param->tx_type; + if (lossless) { + assert(tx_type == DCT_DCT); + av1_highbd_iwht4x4_add(input, dest, stride, eob, bd); + return; + } + av1_inv_txfm2d_add_4x4_neon(src, CONVERT_TO_SHORTPTR(dest), stride, tx_type, + bd); +} + +void av1_highbd_inv_txfm_add_4x8_neon(const tran_low_t *input, uint8_t *dest, + int stride, const TxfmParam *txfm_param) { + av1_inv_txfm2d_add_4x8_neon(input, CONVERT_TO_SHORTPTR(dest), stride, + txfm_param->tx_type, txfm_param->bd); +} + +void av1_highbd_inv_txfm_add_8x4_neon(const tran_low_t *input, uint8_t *dest, + int stride, const TxfmParam *txfm_param) { + av1_inv_txfm2d_add_8x4_neon(input, CONVERT_TO_SHORTPTR(dest), stride, + txfm_param->tx_type, txfm_param->bd); +} + +void av1_inv_txfm2d_add_8x16_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, TX_8X16, + bd); +} + +void av1_highbd_inv_txfm_add_4x16_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + int bd = txfm_param->bd; + const TX_TYPE tx_type = txfm_param->tx_type; + int eob = txfm_param->eob; + highbd_inv_txfm2d_add_4x16_neon(input, CONVERT_TO_SHORTPTR(dest), stride, + tx_type, eob, bd); +} + +void av1_highbd_inv_txfm_add_16x4_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + int bd = txfm_param->bd; + const TX_TYPE tx_type = txfm_param->tx_type; + int eob = txfm_param->eob; + highbd_inv_txfm2d_add_16x4_neon(input, CONVERT_TO_SHORTPTR(dest), stride, + tx_type, eob, bd); +} + +void av1_highbd_inv_txfm_add_8x16_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_8X16, txfm_param->eob, txfm_param->bd); +} + +void av1_highbd_inv_txfm_add_16x8_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_16X8, txfm_param->eob, txfm_param->bd); +} + +void av1_inv_txfm2d_add_16x8_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, TX_16X8, + bd); +} + +void av1_highbd_inv_txfm_add_16x32_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_16X32, txfm_param->eob, + txfm_param->bd); +} + +void av1_inv_txfm2d_add_16x32_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, + TX_16X32, bd); +} + +void av1_highbd_inv_txfm_add_32x16_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_32X16, txfm_param->eob, + txfm_param->bd); +} + +void av1_inv_txfm2d_add_32x16_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, + TX_32X16, bd); +} + +void av1_highbd_inv_txfm_add_32x32_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_32X32, txfm_param->eob, + txfm_param->bd); +} + +void av1_inv_txfm2d_add_32x32_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, + TX_32X32, bd); +} + +void av1_highbd_inv_txfm_add_64x64_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_64X64, txfm_param->eob, + txfm_param->bd); +} + +void av1_inv_txfm2d_add_64x64_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, + TX_64X64, bd); +} + +void av1_highbd_inv_txfm_add_32x64_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_32X64, txfm_param->eob, + txfm_param->bd); +} + +void av1_inv_txfm2d_add_32x64_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, + TX_32X64, bd); +} + +void av1_highbd_inv_txfm_add_64x32_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_64X32, txfm_param->eob, + txfm_param->bd); +} + +void av1_inv_txfm2d_add_64x32_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, + TX_64X32, bd); +} + +void av1_highbd_inv_txfm_add_64x16_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_64X16, txfm_param->eob, + txfm_param->bd); +} + +void av1_inv_txfm2d_add_64x16_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, + TX_64X16, bd); +} + +void av1_highbd_inv_txfm_add_16x64_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_16X64, txfm_param->eob, + txfm_param->bd); +} + +void av1_inv_txfm2d_add_16x64_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, + TX_16X64, bd); +} + +void av1_highbd_inv_txfm_add_16x16_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_16X16, txfm_param->eob, + txfm_param->bd); +} + +void av1_inv_txfm2d_add_16x16_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, + TX_16X16, bd); +} + +void av1_highbd_inv_txfm_add_32x8_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_32X8, txfm_param->eob, txfm_param->bd); +} + +void av1_inv_txfm2d_add_32x8_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, TX_32X8, + bd); +} + +void av1_highbd_inv_txfm_add_8x32_neon(const tran_low_t *input, uint8_t *dest, + int stride, + const TxfmParam *txfm_param) { + highbd_inv_txfm2d_add_universe_neon(input, dest, stride, txfm_param->tx_type, + TX_8X32, txfm_param->eob, txfm_param->bd); +} + +void av1_inv_txfm2d_add_8x32_neon(const tran_low_t *input, uint16_t *dest, + int stride, TX_TYPE tx_type, const int bd) { + inv_txfm2d_add_universe_neon(input, (uint8_t *)dest, stride, tx_type, TX_8X32, + bd); +} + +void av1_highbd_inv_txfm_add_neon(const tran_low_t *input, uint8_t *dest, + int stride, const TxfmParam *txfm_param) { + const TX_SIZE tx_size = txfm_param->tx_size; + + TX_TYPE tx_type = txfm_param->tx_type; + int bd = txfm_param->bd; + switch (tx_size) { + case TX_8X8: + av1_highbd_inv_txfm_add_8x8_neon(input, dest, stride, txfm_param); + break; + case TX_4X8: + av1_inv_txfm2d_add_4x8_neon(input, CONVERT_TO_SHORTPTR(dest), stride, + txfm_param->tx_type, txfm_param->bd); + break; + case TX_8X4: + av1_inv_txfm2d_add_8x4_neon(input, CONVERT_TO_SHORTPTR(dest), stride, + txfm_param->tx_type, txfm_param->bd); + break; + case TX_4X4: + av1_highbd_inv_txfm_add_4x4_neon(input, dest, stride, txfm_param); + break; + case TX_16X4: + av1_inv_txfm2d_add_16x4_neon(input, CONVERT_TO_SHORTPTR(dest), stride, + txfm_param->tx_type, txfm_param->bd); + break; + case TX_4X16: + av1_inv_txfm2d_add_4x16_neon(input, CONVERT_TO_SHORTPTR(dest), stride, + txfm_param->tx_type, txfm_param->bd); + break; + case TX_8X16: + av1_inv_txfm2d_add_8x16_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_16X8: + av1_inv_txfm2d_add_16x8_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_16X32: + av1_inv_txfm2d_add_16x32_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_32X16: + av1_inv_txfm2d_add_32x16_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_16X16: + av1_inv_txfm2d_add_16x16_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_32X32: + av1_inv_txfm2d_add_32x32_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_64X64: + av1_inv_txfm2d_add_64x64_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_32X64: + av1_inv_txfm2d_add_32x64_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_64X32: + av1_inv_txfm2d_add_64x32_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_16X64: + av1_inv_txfm2d_add_16x64_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_64X16: + av1_inv_txfm2d_add_64x16_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_32X8: + av1_inv_txfm2d_add_32x8_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + case TX_8X32: + av1_inv_txfm2d_add_8x32_neon(input, (uint16_t *)dest, stride, tx_type, + bd); + break; + } +} diff --git a/third_party/aom/av1/common/arm/highbd_reconinter_neon.c b/third_party/aom/av1/common/arm/highbd_reconinter_neon.c new file mode 100644 index 0000000000..da7f6c57d0 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_reconinter_neon.c @@ -0,0 +1,327 @@ +/* + * + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> +#include <stdbool.h> + +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/blend.h" +#include "aom_ports/mem.h" +#include "config/av1_rtcd.h" + +static INLINE void diffwtd_mask_highbd_neon(uint8_t *mask, bool inverse, + const uint16_t *src0, + int src0_stride, + const uint16_t *src1, + int src1_stride, int h, int w, + const unsigned int bd) { + assert(DIFF_FACTOR > 0); + uint8x16_t max_alpha = vdupq_n_u8(AOM_BLEND_A64_MAX_ALPHA); + uint8x16_t mask_base = vdupq_n_u8(38); + uint8x16_t mask_diff = vdupq_n_u8(AOM_BLEND_A64_MAX_ALPHA - 38); + + if (bd == 8) { + if (w >= 16) { + do { + uint8_t *mask_ptr = mask; + const uint16_t *src0_ptr = src0; + const uint16_t *src1_ptr = src1; + int width = w; + do { + uint16x8_t s0_lo = vld1q_u16(src0_ptr); + uint16x8_t s0_hi = vld1q_u16(src0_ptr + 8); + uint16x8_t s1_lo = vld1q_u16(src1_ptr); + uint16x8_t s1_hi = vld1q_u16(src1_ptr + 8); + + uint16x8_t diff_lo_u16 = vabdq_u16(s0_lo, s1_lo); + uint16x8_t diff_hi_u16 = vabdq_u16(s0_hi, s1_hi); + uint8x8_t diff_lo_u8 = vshrn_n_u16(diff_lo_u16, DIFF_FACTOR_LOG2); + uint8x8_t diff_hi_u8 = vshrn_n_u16(diff_hi_u16, DIFF_FACTOR_LOG2); + uint8x16_t diff = vcombine_u8(diff_lo_u8, diff_hi_u8); + + uint8x16_t m; + if (inverse) { + m = vqsubq_u8(mask_diff, diff); + } else { + m = vminq_u8(vaddq_u8(diff, mask_base), max_alpha); + } + + vst1q_u8(mask_ptr, m); + + src0_ptr += 16; + src1_ptr += 16; + mask_ptr += 16; + width -= 16; + } while (width != 0); + mask += w; + src0 += src0_stride; + src1 += src1_stride; + } while (--h != 0); + } else if (w == 8) { + do { + uint8_t *mask_ptr = mask; + const uint16_t *src0_ptr = src0; + const uint16_t *src1_ptr = src1; + int width = w; + do { + uint16x8_t s0 = vld1q_u16(src0_ptr); + uint16x8_t s1 = vld1q_u16(src1_ptr); + + uint16x8_t diff_u16 = vabdq_u16(s0, s1); + uint8x8_t diff_u8 = vshrn_n_u16(diff_u16, DIFF_FACTOR_LOG2); + uint8x8_t m; + if (inverse) { + m = vqsub_u8(vget_low_u8(mask_diff), diff_u8); + } else { + m = vmin_u8(vadd_u8(diff_u8, vget_low_u8(mask_base)), + vget_low_u8(max_alpha)); + } + + vst1_u8(mask_ptr, m); + + src0_ptr += 8; + src1_ptr += 8; + mask_ptr += 8; + width -= 8; + } while (width != 0); + mask += w; + src0 += src0_stride; + src1 += src1_stride; + } while (--h != 0); + } else if (w == 4) { + do { + uint16x8_t s0 = load_unaligned_u16_4x2(src0, src0_stride); + uint16x8_t s1 = load_unaligned_u16_4x2(src1, src1_stride); + + uint16x8_t diff_u16 = vabdq_u16(s0, s1); + uint8x8_t diff_u8 = vshrn_n_u16(diff_u16, DIFF_FACTOR_LOG2); + uint8x8_t m; + if (inverse) { + m = vqsub_u8(vget_low_u8(mask_diff), diff_u8); + } else { + m = vmin_u8(vadd_u8(diff_u8, vget_low_u8(mask_base)), + vget_low_u8(max_alpha)); + } + + store_u8x4_strided_x2(mask, w, m); + + src0 += 2 * src0_stride; + src1 += 2 * src1_stride; + mask += 2 * w; + h -= 2; + } while (h != 0); + } + } else if (bd == 10) { + if (w >= 16) { + do { + uint8_t *mask_ptr = mask; + const uint16_t *src0_ptr = src0; + const uint16_t *src1_ptr = src1; + int width = w; + do { + uint16x8_t s0_lo = vld1q_u16(src0_ptr); + uint16x8_t s0_hi = vld1q_u16(src0_ptr + 8); + uint16x8_t s1_lo = vld1q_u16(src1_ptr); + uint16x8_t s1_hi = vld1q_u16(src1_ptr + 8); + + uint16x8_t diff_lo_u16 = vabdq_u16(s0_lo, s1_lo); + uint16x8_t diff_hi_u16 = vabdq_u16(s0_hi, s1_hi); + uint8x8_t diff_lo_u8 = vshrn_n_u16(diff_lo_u16, 2 + DIFF_FACTOR_LOG2); + uint8x8_t diff_hi_u8 = vshrn_n_u16(diff_hi_u16, 2 + DIFF_FACTOR_LOG2); + uint8x16_t diff = vcombine_u8(diff_lo_u8, diff_hi_u8); + + uint8x16_t m; + if (inverse) { + m = vqsubq_u8(mask_diff, diff); + } else { + m = vminq_u8(vaddq_u8(diff, mask_base), max_alpha); + } + + vst1q_u8(mask_ptr, m); + + src0_ptr += 16; + src1_ptr += 16; + mask_ptr += 16; + width -= 16; + } while (width != 0); + mask += w; + src0 += src0_stride; + src1 += src1_stride; + } while (--h != 0); + } else if (w == 8) { + do { + uint8_t *mask_ptr = mask; + const uint16_t *src0_ptr = src0; + const uint16_t *src1_ptr = src1; + int width = w; + do { + uint16x8_t s0 = vld1q_u16(src0_ptr); + uint16x8_t s1 = vld1q_u16(src1_ptr); + + uint16x8_t diff_u16 = vabdq_u16(s0, s1); + uint8x8_t diff_u8 = vshrn_n_u16(diff_u16, 2 + DIFF_FACTOR_LOG2); + uint8x8_t m; + if (inverse) { + m = vqsub_u8(vget_low_u8(mask_diff), diff_u8); + } else { + m = vmin_u8(vadd_u8(diff_u8, vget_low_u8(mask_base)), + vget_low_u8(max_alpha)); + } + + vst1_u8(mask_ptr, m); + + src0_ptr += 8; + src1_ptr += 8; + mask_ptr += 8; + width -= 8; + } while (width != 0); + mask += w; + src0 += src0_stride; + src1 += src1_stride; + } while (--h != 0); + } else if (w == 4) { + do { + uint16x8_t s0 = load_unaligned_u16_4x2(src0, src0_stride); + uint16x8_t s1 = load_unaligned_u16_4x2(src1, src1_stride); + + uint16x8_t diff_u16 = vabdq_u16(s0, s1); + uint8x8_t diff_u8 = vshrn_n_u16(diff_u16, 2 + DIFF_FACTOR_LOG2); + uint8x8_t m; + if (inverse) { + m = vqsub_u8(vget_low_u8(mask_diff), diff_u8); + } else { + m = vmin_u8(vadd_u8(diff_u8, vget_low_u8(mask_base)), + vget_low_u8(max_alpha)); + } + + store_u8x4_strided_x2(mask, w, m); + + src0 += 2 * src0_stride; + src1 += 2 * src1_stride; + mask += 2 * w; + h -= 2; + } while (h != 0); + } + } else { + assert(bd == 12); + if (w >= 16) { + do { + uint8_t *mask_ptr = mask; + const uint16_t *src0_ptr = src0; + const uint16_t *src1_ptr = src1; + int width = w; + do { + uint16x8_t s0_lo = vld1q_u16(src0_ptr); + uint16x8_t s0_hi = vld1q_u16(src0_ptr + 8); + uint16x8_t s1_lo = vld1q_u16(src1_ptr); + uint16x8_t s1_hi = vld1q_u16(src1_ptr + 8); + + uint16x8_t diff_lo_u16 = vabdq_u16(s0_lo, s1_lo); + uint16x8_t diff_hi_u16 = vabdq_u16(s0_hi, s1_hi); + uint8x8_t diff_lo_u8 = vshrn_n_u16(diff_lo_u16, 4 + DIFF_FACTOR_LOG2); + uint8x8_t diff_hi_u8 = vshrn_n_u16(diff_hi_u16, 4 + DIFF_FACTOR_LOG2); + uint8x16_t diff = vcombine_u8(diff_lo_u8, diff_hi_u8); + + uint8x16_t m; + if (inverse) { + m = vqsubq_u8(mask_diff, diff); + } else { + m = vminq_u8(vaddq_u8(diff, mask_base), max_alpha); + } + + vst1q_u8(mask_ptr, m); + + src0_ptr += 16; + src1_ptr += 16; + mask_ptr += 16; + width -= 16; + } while (width != 0); + mask += w; + src0 += src0_stride; + src1 += src1_stride; + } while (--h != 0); + } else if (w == 8) { + do { + uint8_t *mask_ptr = mask; + const uint16_t *src0_ptr = src0; + const uint16_t *src1_ptr = src1; + int width = w; + do { + uint16x8_t s0 = vld1q_u16(src0_ptr); + uint16x8_t s1 = vld1q_u16(src1_ptr); + + uint16x8_t diff_u16 = vabdq_u16(s0, s1); + uint8x8_t diff_u8 = vshrn_n_u16(diff_u16, 4 + DIFF_FACTOR_LOG2); + uint8x8_t m; + if (inverse) { + m = vqsub_u8(vget_low_u8(mask_diff), diff_u8); + } else { + m = vmin_u8(vadd_u8(diff_u8, vget_low_u8(mask_base)), + vget_low_u8(max_alpha)); + } + + vst1_u8(mask_ptr, m); + + src0_ptr += 8; + src1_ptr += 8; + mask_ptr += 8; + width -= 8; + } while (width != 0); + mask += w; + src0 += src0_stride; + src1 += src1_stride; + } while (--h != 0); + } else if (w == 4) { + do { + uint16x8_t s0 = load_unaligned_u16_4x2(src0, src0_stride); + uint16x8_t s1 = load_unaligned_u16_4x2(src1, src1_stride); + + uint16x8_t diff_u16 = vabdq_u16(s0, s1); + uint8x8_t diff_u8 = vshrn_n_u16(diff_u16, 4 + DIFF_FACTOR_LOG2); + uint8x8_t m; + if (inverse) { + m = vqsub_u8(vget_low_u8(mask_diff), diff_u8); + } else { + m = vmin_u8(vadd_u8(diff_u8, vget_low_u8(mask_base)), + vget_low_u8(max_alpha)); + } + + store_u8x4_strided_x2(mask, w, m); + + src0 += 2 * src0_stride; + src1 += 2 * src1_stride; + mask += 2 * w; + h -= 2; + } while (h != 0); + } + } +} + +void av1_build_compound_diffwtd_mask_highbd_neon( + uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const uint8_t *src0, + int src0_stride, const uint8_t *src1, int src1_stride, int h, int w, + int bd) { + assert(h % 4 == 0); + assert(w % 4 == 0); + assert(mask_type == DIFFWTD_38_INV || mask_type == DIFFWTD_38); + + if (mask_type == DIFFWTD_38) { + diffwtd_mask_highbd_neon(mask, /*inverse=*/false, CONVERT_TO_SHORTPTR(src0), + src0_stride, CONVERT_TO_SHORTPTR(src1), + src1_stride, h, w, bd); + } else { // mask_type == DIFFWTD_38_INV + diffwtd_mask_highbd_neon(mask, /*inverse=*/true, CONVERT_TO_SHORTPTR(src0), + src0_stride, CONVERT_TO_SHORTPTR(src1), + src1_stride, h, w, bd); + } +} diff --git a/third_party/aom/av1/common/arm/highbd_reconintra_neon.c b/third_party/aom/av1/common/arm/highbd_reconintra_neon.c new file mode 100644 index 0000000000..170491b504 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_reconintra_neon.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "aom_dsp/arm/sum_neon.h" + +#define MAX_UPSAMPLE_SZ 16 + +void av1_highbd_filter_intra_edge_neon(uint16_t *p, int sz, int strength) { + if (!strength) return; + assert(sz >= 0 && sz <= 129); + + DECLARE_ALIGNED(16, static const uint16_t, + idx[8]) = { 0, 1, 2, 3, 4, 5, 6, 7 }; + const uint16x8_t index = vld1q_u16(idx); + + uint16_t edge[160]; // Max value of sz + enough padding for vector accesses. + memcpy(edge + 1, p, sz * sizeof(*p)); + + // Populate extra space appropriately. + edge[0] = edge[1]; + edge[sz + 1] = edge[sz]; + edge[sz + 2] = edge[sz]; + + // Don't overwrite first pixel. + uint16_t *dst = p + 1; + sz--; + + if (strength == 1) { // Filter: {4, 8, 4}. + const uint16_t *src = edge + 1; + + while (sz >= 8) { + uint16x8_t s0 = vld1q_u16(src); + uint16x8_t s1 = vld1q_u16(src + 1); + uint16x8_t s2 = vld1q_u16(src + 2); + + // Make use of the identity: + // (4*a + 8*b + 4*c) >> 4 == (a + (b << 1) + c) >> 2 + uint16x8_t t0 = vaddq_u16(s0, s2); + uint16x8_t t1 = vaddq_u16(s1, s1); + uint16x8_t sum = vaddq_u16(t0, t1); + uint16x8_t res = vrshrq_n_u16(sum, 2); + + vst1q_u16(dst, res); + + src += 8; + dst += 8; + sz -= 8; + } + + if (sz > 0) { // Handle sz < 8 to avoid modifying out-of-bounds values. + uint16x8_t s0 = vld1q_u16(src); + uint16x8_t s1 = vld1q_u16(src + 1); + uint16x8_t s2 = vld1q_u16(src + 2); + + // Make use of the identity: + // (4*a + 8*b + 4*c) >> 4 == (a + (b << 1) + c) >> 2 + uint16x8_t t0 = vaddq_u16(s0, s2); + uint16x8_t t1 = vaddq_u16(s1, s1); + uint16x8_t sum = vaddq_u16(t0, t1); + uint16x8_t res = vrshrq_n_u16(sum, 2); + + // Mask off out-of-bounds indices. + uint16x8_t current_dst = vld1q_u16(dst); + uint16x8_t mask = vcgtq_u16(vdupq_n_u16(sz), index); + res = vbslq_u16(mask, res, current_dst); + + vst1q_u16(dst, res); + } + } else if (strength == 2) { // Filter: {5, 6, 5}. + const uint16_t *src = edge + 1; + + const uint16x8x3_t filter = { { vdupq_n_u16(5), vdupq_n_u16(6), + vdupq_n_u16(5) } }; + while (sz >= 8) { + uint16x8_t s0 = vld1q_u16(src); + uint16x8_t s1 = vld1q_u16(src + 1); + uint16x8_t s2 = vld1q_u16(src + 2); + + uint16x8_t accum = vmulq_u16(s0, filter.val[0]); + accum = vmlaq_u16(accum, s1, filter.val[1]); + accum = vmlaq_u16(accum, s2, filter.val[2]); + uint16x8_t res = vrshrq_n_u16(accum, 4); + + vst1q_u16(dst, res); + + src += 8; + dst += 8; + sz -= 8; + } + + if (sz > 0) { // Handle sz < 8 to avoid modifying out-of-bounds values. + uint16x8_t s0 = vld1q_u16(src); + uint16x8_t s1 = vld1q_u16(src + 1); + uint16x8_t s2 = vld1q_u16(src + 2); + + uint16x8_t accum = vmulq_u16(s0, filter.val[0]); + accum = vmlaq_u16(accum, s1, filter.val[1]); + accum = vmlaq_u16(accum, s2, filter.val[2]); + uint16x8_t res = vrshrq_n_u16(accum, 4); + + // Mask off out-of-bounds indices. + uint16x8_t current_dst = vld1q_u16(dst); + uint16x8_t mask = vcgtq_u16(vdupq_n_u16(sz), index); + res = vbslq_u16(mask, res, current_dst); + + vst1q_u16(dst, res); + } + } else { // Filter {2, 4, 4, 4, 2}. + const uint16_t *src = edge; + + while (sz >= 8) { + uint16x8_t s0 = vld1q_u16(src); + uint16x8_t s1 = vld1q_u16(src + 1); + uint16x8_t s2 = vld1q_u16(src + 2); + uint16x8_t s3 = vld1q_u16(src + 3); + uint16x8_t s4 = vld1q_u16(src + 4); + + // Make use of the identity: + // (2*a + 4*b + 4*c + 4*d + 2*e) >> 4 == (a + ((b + c + d) << 1) + e) >> 3 + uint16x8_t t0 = vaddq_u16(s0, s4); + uint16x8_t t1 = vaddq_u16(s1, s2); + t1 = vaddq_u16(t1, s3); + t1 = vaddq_u16(t1, t1); + uint16x8_t sum = vaddq_u16(t0, t1); + uint16x8_t res = vrshrq_n_u16(sum, 3); + + vst1q_u16(dst, res); + + src += 8; + dst += 8; + sz -= 8; + } + + if (sz > 0) { // Handle sz < 8 to avoid modifying out-of-bounds values. + uint16x8_t s0 = vld1q_u16(src); + uint16x8_t s1 = vld1q_u16(src + 1); + uint16x8_t s2 = vld1q_u16(src + 2); + uint16x8_t s3 = vld1q_u16(src + 3); + uint16x8_t s4 = vld1q_u16(src + 4); + + // Make use of the identity: + // (2*a + 4*b + 4*c + 4*d + 2*e) >> 4 == (a + ((b + c + d) << 1) + e) >> 3 + uint16x8_t t0 = vaddq_u16(s0, s4); + uint16x8_t t1 = vaddq_u16(s1, s2); + t1 = vaddq_u16(t1, s3); + t1 = vaddq_u16(t1, t1); + uint16x8_t sum = vaddq_u16(t0, t1); + uint16x8_t res = vrshrq_n_u16(sum, 3); + + // Mask off out-of-bounds indices. + uint16x8_t current_dst = vld1q_u16(dst); + uint16x8_t mask = vcgtq_u16(vdupq_n_u16(sz), index); + res = vbslq_u16(mask, res, current_dst); + + vst1q_u16(dst, res); + } + } +} + +void av1_highbd_upsample_intra_edge_neon(uint16_t *p, int sz, int bd) { + if (!sz) return; + + assert(sz <= MAX_UPSAMPLE_SZ); + + uint16_t edge[MAX_UPSAMPLE_SZ + 3]; + const uint16_t *src = edge; + + // Copy p[-1..(sz-1)] and pad out both ends. + edge[0] = p[-1]; + edge[1] = p[-1]; + memcpy(edge + 2, p, sz * 2); + edge[sz + 2] = p[sz - 1]; + p[-2] = p[-1]; + + uint16x8_t pixel_val_max = vdupq_n_u16((1 << bd) - 1); + + uint16_t *dst = p - 1; + + if (bd == 12) { + do { + uint16x8_t s0 = vld1q_u16(src); + uint16x8_t s1 = vld1q_u16(src + 1); + uint16x8_t s2 = vld1q_u16(src + 2); + uint16x8_t s3 = vld1q_u16(src + 3); + + uint16x8_t t0 = vaddq_u16(s1, s2); + uint16x8_t t1 = vaddq_u16(s0, s3); + uint32x4_t acc0 = vmull_n_u16(vget_low_u16(t0), 9); + acc0 = vqsubq_u32(acc0, vmovl_u16(vget_low_u16(t1))); + uint32x4_t acc1 = vmull_n_u16(vget_high_u16(t0), 9); + acc1 = vqsubq_u32(acc1, vmovl_u16(vget_high_u16(t1))); + + uint16x8x2_t res; + res.val[0] = vcombine_u16(vrshrn_n_u32(acc0, 4), vrshrn_n_u32(acc1, 4)); + // Clamp pixel values at bitdepth maximum. + res.val[0] = vminq_u16(res.val[0], pixel_val_max); + res.val[1] = s2; + + vst2q_u16(dst, res); + + src += 8; + dst += 16; + sz -= 8; + } while (sz > 0); + } else { // Bit depth is 8 or 10. + do { + uint16x8_t s0 = vld1q_u16(src); + uint16x8_t s1 = vld1q_u16(src + 1); + uint16x8_t s2 = vld1q_u16(src + 2); + uint16x8_t s3 = vld1q_u16(src + 3); + + uint16x8_t t0 = vaddq_u16(s0, s3); + uint16x8_t t1 = vaddq_u16(s1, s2); + t1 = vmulq_n_u16(t1, 9); + t1 = vqsubq_u16(t1, t0); + + uint16x8x2_t res; + res.val[0] = vrshrq_n_u16(t1, 4); + // Clamp pixel values at bitdepth maximum. + res.val[0] = vminq_u16(res.val[0], pixel_val_max); + res.val[1] = s2; + + vst2q_u16(dst, res); + + src += 8; + dst += 16; + sz -= 8; + } while (sz > 0); + } +} diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c new file mode 100644 index 0000000000..c6f1e3ad92 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c @@ -0,0 +1,317 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> +#include <stdbool.h> + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/sum_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/scale.h" +#include "av1/common/warped_motion.h" +#include "config/av1_rtcd.h" +#include "highbd_warp_plane_neon.h" + +static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, + int sx, int alpha) { + int16x8_t f[4]; + load_filters_4(f, sx, alpha); + + int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 0); + int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 1); + int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 2); + int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 3); + + int32x4_t m0 = vmull_s16(vget_low_s16(f[0]), vget_low_s16(rv0)); + m0 = vmlal_s16(m0, vget_high_s16(f[0]), vget_high_s16(rv0)); + int32x4_t m1 = vmull_s16(vget_low_s16(f[1]), vget_low_s16(rv1)); + m1 = vmlal_s16(m1, vget_high_s16(f[1]), vget_high_s16(rv1)); + int32x4_t m2 = vmull_s16(vget_low_s16(f[2]), vget_low_s16(rv2)); + m2 = vmlal_s16(m2, vget_high_s16(f[2]), vget_high_s16(rv2)); + int32x4_t m3 = vmull_s16(vget_low_s16(f[3]), vget_low_s16(rv3)); + m3 = vmlal_s16(m3, vget_high_s16(f[3]), vget_high_s16(rv3)); + + int32x4_t m0123[] = { m0, m1, m2, m3 }; + + const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS; + const int offset_bits_horiz = bd + FILTER_BITS - 1; + + int32x4_t res = horizontal_add_4d_s32x4(m0123); + res = vaddq_s32(res, vdupq_n_s32(1 << offset_bits_horiz)); + res = vrshlq_s32(res, vdupq_n_s32(-round0)); + return vcombine_s16(vmovn_s32(res), vdup_n_s16(0)); +} + +static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, + int sx, int alpha) { + int16x8_t f[8]; + load_filters_8(f, sx, alpha); + + int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 0); + int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 1); + int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 2); + int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 3); + int16x8_t rv4 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 4); + int16x8_t rv5 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 5); + int16x8_t rv6 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 6); + int16x8_t rv7 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 7); + + int32x4_t m0 = vmull_s16(vget_low_s16(f[0]), vget_low_s16(rv0)); + m0 = vmlal_s16(m0, vget_high_s16(f[0]), vget_high_s16(rv0)); + int32x4_t m1 = vmull_s16(vget_low_s16(f[1]), vget_low_s16(rv1)); + m1 = vmlal_s16(m1, vget_high_s16(f[1]), vget_high_s16(rv1)); + int32x4_t m2 = vmull_s16(vget_low_s16(f[2]), vget_low_s16(rv2)); + m2 = vmlal_s16(m2, vget_high_s16(f[2]), vget_high_s16(rv2)); + int32x4_t m3 = vmull_s16(vget_low_s16(f[3]), vget_low_s16(rv3)); + m3 = vmlal_s16(m3, vget_high_s16(f[3]), vget_high_s16(rv3)); + int32x4_t m4 = vmull_s16(vget_low_s16(f[4]), vget_low_s16(rv4)); + m4 = vmlal_s16(m4, vget_high_s16(f[4]), vget_high_s16(rv4)); + int32x4_t m5 = vmull_s16(vget_low_s16(f[5]), vget_low_s16(rv5)); + m5 = vmlal_s16(m5, vget_high_s16(f[5]), vget_high_s16(rv5)); + int32x4_t m6 = vmull_s16(vget_low_s16(f[6]), vget_low_s16(rv6)); + m6 = vmlal_s16(m6, vget_high_s16(f[6]), vget_high_s16(rv6)); + int32x4_t m7 = vmull_s16(vget_low_s16(f[7]), vget_low_s16(rv7)); + m7 = vmlal_s16(m7, vget_high_s16(f[7]), vget_high_s16(rv7)); + + int32x4_t m0123[] = { m0, m1, m2, m3 }; + int32x4_t m4567[] = { m4, m5, m6, m7 }; + + const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS; + const int offset_bits_horiz = bd + FILTER_BITS - 1; + + int32x4_t res0 = horizontal_add_4d_s32x4(m0123); + int32x4_t res1 = horizontal_add_4d_s32x4(m4567); + res0 = vaddq_s32(res0, vdupq_n_s32(1 << offset_bits_horiz)); + res1 = vaddq_s32(res1, vdupq_n_s32(1 << offset_bits_horiz)); + res0 = vrshlq_s32(res0, vdupq_n_s32(-round0)); + res1 = vrshlq_s32(res1, vdupq_n_s32(-round0)); + return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1)); +} + +static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, + int sx) { + int16x8_t f = load_filters_1(sx); + + int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 0); + int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 1); + int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 2); + int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 3); + + int32x4_t m0 = vmull_s16(vget_low_s16(f), vget_low_s16(rv0)); + m0 = vmlal_s16(m0, vget_high_s16(f), vget_high_s16(rv0)); + int32x4_t m1 = vmull_s16(vget_low_s16(f), vget_low_s16(rv1)); + m1 = vmlal_s16(m1, vget_high_s16(f), vget_high_s16(rv1)); + int32x4_t m2 = vmull_s16(vget_low_s16(f), vget_low_s16(rv2)); + m2 = vmlal_s16(m2, vget_high_s16(f), vget_high_s16(rv2)); + int32x4_t m3 = vmull_s16(vget_low_s16(f), vget_low_s16(rv3)); + m3 = vmlal_s16(m3, vget_high_s16(f), vget_high_s16(rv3)); + + int32x4_t m0123[] = { m0, m1, m2, m3 }; + + const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS; + const int offset_bits_horiz = bd + FILTER_BITS - 1; + + int32x4_t res = horizontal_add_4d_s32x4(m0123); + res = vaddq_s32(res, vdupq_n_s32(1 << offset_bits_horiz)); + res = vrshlq_s32(res, vdupq_n_s32(-round0)); + return vcombine_s16(vmovn_s32(res), vdup_n_s16(0)); +} + +static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, + int sx) { + int16x8_t f = load_filters_1(sx); + + int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 0); + int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 1); + int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 2); + int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 3); + int16x8_t rv4 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 4); + int16x8_t rv5 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 5); + int16x8_t rv6 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 6); + int16x8_t rv7 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), + vreinterpretq_s16_u16(in.val[1]), 7); + + int32x4_t m0 = vmull_s16(vget_low_s16(f), vget_low_s16(rv0)); + m0 = vmlal_s16(m0, vget_high_s16(f), vget_high_s16(rv0)); + int32x4_t m1 = vmull_s16(vget_low_s16(f), vget_low_s16(rv1)); + m1 = vmlal_s16(m1, vget_high_s16(f), vget_high_s16(rv1)); + int32x4_t m2 = vmull_s16(vget_low_s16(f), vget_low_s16(rv2)); + m2 = vmlal_s16(m2, vget_high_s16(f), vget_high_s16(rv2)); + int32x4_t m3 = vmull_s16(vget_low_s16(f), vget_low_s16(rv3)); + m3 = vmlal_s16(m3, vget_high_s16(f), vget_high_s16(rv3)); + int32x4_t m4 = vmull_s16(vget_low_s16(f), vget_low_s16(rv4)); + m4 = vmlal_s16(m4, vget_high_s16(f), vget_high_s16(rv4)); + int32x4_t m5 = vmull_s16(vget_low_s16(f), vget_low_s16(rv5)); + m5 = vmlal_s16(m5, vget_high_s16(f), vget_high_s16(rv5)); + int32x4_t m6 = vmull_s16(vget_low_s16(f), vget_low_s16(rv6)); + m6 = vmlal_s16(m6, vget_high_s16(f), vget_high_s16(rv6)); + int32x4_t m7 = vmull_s16(vget_low_s16(f), vget_low_s16(rv7)); + m7 = vmlal_s16(m7, vget_high_s16(f), vget_high_s16(rv7)); + + int32x4_t m0123[] = { m0, m1, m2, m3 }; + int32x4_t m4567[] = { m4, m5, m6, m7 }; + + const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS; + const int offset_bits_horiz = bd + FILTER_BITS - 1; + + int32x4_t res0 = horizontal_add_4d_s32x4(m0123); + int32x4_t res1 = horizontal_add_4d_s32x4(m4567); + res0 = vaddq_s32(res0, vdupq_n_s32(1 << offset_bits_horiz)); + res1 = vaddq_s32(res1, vdupq_n_s32(1 << offset_bits_horiz)); + res0 = vrshlq_s32(res0, vdupq_n_s32(-round0)); + res1 = vrshlq_s32(res1, vdupq_n_s32(-round0)); + return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1)); +} + +static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) { + const int16x8_t f = load_filters_1(sy); + const int16x4_t f0123 = vget_low_s16(f); + const int16x4_t f4567 = vget_high_s16(f); + + int32x4_t m0123 = vmull_lane_s16(vget_low_s16(tmp[0]), f0123, 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[1]), f0123, 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[2]), f0123, 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[3]), f0123, 3); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[4]), f4567, 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[5]), f4567, 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[6]), f4567, 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[7]), f4567, 3); + return m0123; +} + +static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) { + const int16x8_t f = load_filters_1(sy); + const int16x4_t f0123 = vget_low_s16(f); + const int16x4_t f4567 = vget_high_s16(f); + + int32x4_t m0123 = vmull_lane_s16(vget_low_s16(tmp[0]), f0123, 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[1]), f0123, 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[2]), f0123, 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[3]), f0123, 3); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[4]), f4567, 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[5]), f4567, 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[6]), f4567, 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[7]), f4567, 3); + + int32x4_t m4567 = vmull_lane_s16(vget_high_s16(tmp[0]), f0123, 0); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[1]), f0123, 1); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[2]), f0123, 2); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[3]), f0123, 3); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[4]), f4567, 0); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[5]), f4567, 1); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[6]), f4567, 2); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[7]), f4567, 3); + return (int32x4x2_t){ { m0123, m4567 } }; +} + +static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy, + int gamma) { + int16x8_t s0, s1, s2, s3; + transpose_elems_s16_4x8( + vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]), + vget_low_s16(tmp[3]), vget_low_s16(tmp[4]), vget_low_s16(tmp[5]), + vget_low_s16(tmp[6]), vget_low_s16(tmp[7]), &s0, &s1, &s2, &s3); + + int16x8_t f[4]; + load_filters_4(f, sy, gamma); + + int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); + m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0])); + int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1])); + m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1])); + int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2])); + m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2])); + int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3])); + m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3])); + + int32x4_t m0123[] = { m0, m1, m2, m3 }; + return horizontal_add_4d_s32x4(m0123); +} + +static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy, + int gamma) { + int16x8_t s0 = tmp[0]; + int16x8_t s1 = tmp[1]; + int16x8_t s2 = tmp[2]; + int16x8_t s3 = tmp[3]; + int16x8_t s4 = tmp[4]; + int16x8_t s5 = tmp[5]; + int16x8_t s6 = tmp[6]; + int16x8_t s7 = tmp[7]; + transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); + + int16x8_t f[8]; + load_filters_8(f, sy, gamma); + + int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); + m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0])); + int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1])); + m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1])); + int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2])); + m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2])); + int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3])); + m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3])); + int32x4_t m4 = vmull_s16(vget_low_s16(s4), vget_low_s16(f[4])); + m4 = vmlal_s16(m4, vget_high_s16(s4), vget_high_s16(f[4])); + int32x4_t m5 = vmull_s16(vget_low_s16(s5), vget_low_s16(f[5])); + m5 = vmlal_s16(m5, vget_high_s16(s5), vget_high_s16(f[5])); + int32x4_t m6 = vmull_s16(vget_low_s16(s6), vget_low_s16(f[6])); + m6 = vmlal_s16(m6, vget_high_s16(s6), vget_high_s16(f[6])); + int32x4_t m7 = vmull_s16(vget_low_s16(s7), vget_low_s16(f[7])); + m7 = vmlal_s16(m7, vget_high_s16(s7), vget_high_s16(f[7])); + + int32x4_t m0123[] = { m0, m1, m2, m3 }; + int32x4_t m4567[] = { m4, m5, m6, m7 }; + + int32x4x2_t ret; + ret.val[0] = horizontal_add_4d_s32x4(m0123); + ret.val[1] = horizontal_add_4d_s32x4(m4567); + return ret; +} + +void av1_highbd_warp_affine_neon(const int32_t *mat, const uint16_t *ref, + int width, int height, int stride, + uint16_t *pred, int p_col, int p_row, + int p_width, int p_height, int p_stride, + int subsampling_x, int subsampling_y, int bd, + ConvolveParams *conv_params, int16_t alpha, + int16_t beta, int16_t gamma, int16_t delta) { + highbd_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row, + p_width, p_height, p_stride, subsampling_x, + subsampling_y, bd, conv_params, alpha, beta, gamma, + delta); +} diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h new file mode 100644 index 0000000000..3b8982898e --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ +#ifndef AOM_AV1_COMMON_ARM_HIGHBD_WARP_PLANE_NEON_H_ +#define AOM_AV1_COMMON_ARM_HIGHBD_WARP_PLANE_NEON_H_ + +#include <arm_neon.h> +#include <assert.h> +#include <stdbool.h> + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/sum_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/scale.h" +#include "av1/common/warped_motion.h" +#include "config/av1_rtcd.h" + +static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, + int sx, int alpha); + +static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, + int sx, int alpha); + +static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, + int sx); + +static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, + int sx); + +static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy); + +static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy); + +static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy, + int gamma); + +static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy, + int gamma); + +static INLINE int16x8_t load_filters_1(int ofs) { + const int ofs0 = ROUND_POWER_OF_TWO(ofs, WARPEDDIFF_PREC_BITS); + + const int16_t *base = + (int16_t *)av1_warped_filter + WARPEDPIXEL_PREC_SHIFTS * 8; + return vld1q_s16(base + ofs0 * 8); +} + +static INLINE void load_filters_4(int16x8_t out[], int ofs, int stride) { + const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS); + const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS); + const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS); + const int ofs3 = ROUND_POWER_OF_TWO(ofs + stride * 3, WARPEDDIFF_PREC_BITS); + + const int16_t *base = + (int16_t *)av1_warped_filter + WARPEDPIXEL_PREC_SHIFTS * 8; + out[0] = vld1q_s16(base + ofs0 * 8); + out[1] = vld1q_s16(base + ofs1 * 8); + out[2] = vld1q_s16(base + ofs2 * 8); + out[3] = vld1q_s16(base + ofs3 * 8); +} + +static INLINE void load_filters_8(int16x8_t out[], int ofs, int stride) { + const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS); + const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS); + const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS); + const int ofs3 = ROUND_POWER_OF_TWO(ofs + stride * 3, WARPEDDIFF_PREC_BITS); + const int ofs4 = ROUND_POWER_OF_TWO(ofs + stride * 4, WARPEDDIFF_PREC_BITS); + const int ofs5 = ROUND_POWER_OF_TWO(ofs + stride * 5, WARPEDDIFF_PREC_BITS); + const int ofs6 = ROUND_POWER_OF_TWO(ofs + stride * 6, WARPEDDIFF_PREC_BITS); + const int ofs7 = ROUND_POWER_OF_TWO(ofs + stride * 7, WARPEDDIFF_PREC_BITS); + + const int16_t *base = + (int16_t *)av1_warped_filter + WARPEDPIXEL_PREC_SHIFTS * 8; + out[0] = vld1q_s16(base + ofs0 * 8); + out[1] = vld1q_s16(base + ofs1 * 8); + out[2] = vld1q_s16(base + ofs2 * 8); + out[3] = vld1q_s16(base + ofs3 * 8); + out[4] = vld1q_s16(base + ofs4 * 8); + out[5] = vld1q_s16(base + ofs5 * 8); + out[6] = vld1q_s16(base + ofs6 * 8); + out[7] = vld1q_s16(base + ofs7 * 8); +} + +static INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val, int bd) { + const int limit = (1 << bd) - 1; + return vqmovun_s32(vminq_s32(val, vdupq_n_s32(limit))); +} + +static INLINE void warp_affine_horizontal(const uint16_t *ref, int width, + int height, int stride, int p_width, + int16_t alpha, int16_t beta, int iy4, + int sx4, int ix4, int16x8_t tmp[], + int bd) { + const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS; + + if (ix4 <= -7) { + for (int k = 0; k < 15; ++k) { + int iy = clamp(iy4 + k - 7, 0, height - 1); + int32_t dup_val = (1 << (bd + FILTER_BITS - round0 - 1)) + + ref[iy * stride] * (1 << (FILTER_BITS - round0)); + tmp[k] = vdupq_n_s16(dup_val); + } + return; + } else if (ix4 >= width + 6) { + for (int k = 0; k < 15; ++k) { + int iy = clamp(iy4 + k - 7, 0, height - 1); + int32_t dup_val = + (1 << (bd + FILTER_BITS - round0 - 1)) + + ref[iy * stride + (width - 1)] * (1 << (FILTER_BITS - round0)); + tmp[k] = vdupq_n_s16(dup_val); + } + return; + } + + static const uint16_t kIotaArr[] = { 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15 }; + const uint16x8_t indx0 = vld1q_u16(kIotaArr); + const uint16x8_t indx1 = vld1q_u16(kIotaArr + 8); + + const int out_of_boundary_left = -(ix4 - 6); + const int out_of_boundary_right = (ix4 + 8) - width; + +#define APPLY_HORIZONTAL_SHIFT(fn, ...) \ + do { \ + if (out_of_boundary_left >= 0 || out_of_boundary_right >= 0) { \ + for (int k = 0; k < 15; ++k) { \ + const int iy = clamp(iy4 + k - 7, 0, height - 1); \ + uint16x8x2_t src_1 = vld1q_u16_x2(ref + iy * stride + ix4 - 7); \ + \ + if (out_of_boundary_left >= 0) { \ + uint16x8_t cmp_vec = vdupq_n_u16(out_of_boundary_left); \ + uint16x8_t vec_dup = vdupq_n_u16(ref[iy * stride]); \ + uint16x8_t mask0 = vcleq_u16(indx0, cmp_vec); \ + uint16x8_t mask1 = vcleq_u16(indx1, cmp_vec); \ + src_1.val[0] = vbslq_u16(mask0, vec_dup, src_1.val[0]); \ + src_1.val[1] = vbslq_u16(mask1, vec_dup, src_1.val[1]); \ + } \ + if (out_of_boundary_right >= 0) { \ + uint16x8_t cmp_vec = vdupq_n_u16(15 - out_of_boundary_right); \ + uint16x8_t vec_dup = vdupq_n_u16(ref[iy * stride + width - 1]); \ + uint16x8_t mask0 = vcgeq_u16(indx0, cmp_vec); \ + uint16x8_t mask1 = vcgeq_u16(indx1, cmp_vec); \ + src_1.val[0] = vbslq_u16(mask0, vec_dup, src_1.val[0]); \ + src_1.val[1] = vbslq_u16(mask1, vec_dup, src_1.val[1]); \ + } \ + tmp[k] = (fn)(src_1, __VA_ARGS__); \ + } \ + } else { \ + for (int k = 0; k < 15; ++k) { \ + const int iy = clamp(iy4 + k - 7, 0, height - 1); \ + uint16x8x2_t src_1 = vld1q_u16_x2(ref + iy * stride + ix4 - 7); \ + tmp[k] = (fn)(src_1, __VA_ARGS__); \ + } \ + } \ + } while (0) + + if (p_width == 4) { + if (beta == 0) { + if (alpha == 0) { + APPLY_HORIZONTAL_SHIFT(highbd_horizontal_filter_4x1_f1, bd, sx4); + } else { + APPLY_HORIZONTAL_SHIFT(highbd_horizontal_filter_4x1_f4, bd, sx4, alpha); + } + } else { + if (alpha == 0) { + APPLY_HORIZONTAL_SHIFT(highbd_horizontal_filter_4x1_f1, bd, + (sx4 + beta * (k - 3))); + } else { + APPLY_HORIZONTAL_SHIFT(highbd_horizontal_filter_4x1_f4, bd, + (sx4 + beta * (k - 3)), alpha); + } + } + } else { + if (beta == 0) { + if (alpha == 0) { + APPLY_HORIZONTAL_SHIFT(highbd_horizontal_filter_8x1_f1, bd, sx4); + } else { + APPLY_HORIZONTAL_SHIFT(highbd_horizontal_filter_8x1_f8, bd, sx4, alpha); + } + } else { + if (alpha == 0) { + APPLY_HORIZONTAL_SHIFT(highbd_horizontal_filter_8x1_f1, bd, + (sx4 + beta * (k - 3))); + } else { + APPLY_HORIZONTAL_SHIFT(highbd_horizontal_filter_8x1_f8, bd, + (sx4 + beta * (k - 3)), alpha); + } + } + } +} + +static INLINE void highbd_vertical_filter_4x1_f4( + uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride, + bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd, + int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) { + int32x4_t sum0 = gamma == 0 ? vertical_filter_4x1_f1(tmp, sy) + : vertical_filter_4x1_f4(tmp, sy, gamma); + + const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS; + const int offset_bits_vert = bd + 2 * FILTER_BITS - round0; + + sum0 = vaddq_s32(sum0, vdupq_n_s32(1 << offset_bits_vert)); + + uint16_t *dst16 = &pred[i * p_stride + j]; + + if (!is_compound) { + const int reduce_bits_vert = 2 * FILTER_BITS - round0; + sum0 = vrshlq_s32(sum0, vdupq_n_s32(-reduce_bits_vert)); + + const int res_sub_const = (1 << (bd - 1)) + (1 << bd); + sum0 = vsubq_s32(sum0, vdupq_n_s32(res_sub_const)); + uint16x4_t res0 = clip_pixel_highbd_vec(sum0, bd); + vst1_u16(dst16, res0); + return; + } + + sum0 = vrshrq_n_s32(sum0, COMPOUND_ROUND1_BITS); + + uint16_t *p = &dst[i * dst_stride + j]; + + if (!do_average) { + vst1_u16(p, vqmovun_s32(sum0)); + return; + } + + uint16x4_t p0 = vld1_u16(p); + int32x4_t p_vec0 = vreinterpretq_s32_u32(vmovl_u16(p0)); + if (use_dist_wtd_comp_avg) { + p_vec0 = vmulq_n_s32(p_vec0, fwd); + p_vec0 = vmlaq_n_s32(p_vec0, sum0, bwd); + p_vec0 = vshrq_n_s32(p_vec0, DIST_PRECISION_BITS); + } else { + p_vec0 = vhaddq_s32(p_vec0, sum0); + } + + const int offset_bits = bd + 2 * FILTER_BITS - round0; + const int round1 = COMPOUND_ROUND1_BITS; + const int res_sub_const = + (1 << (offset_bits - round1)) + (1 << (offset_bits - round1 - 1)); + const int round_bits = 2 * FILTER_BITS - round0 - round1; + + p_vec0 = vsubq_s32(p_vec0, vdupq_n_s32(res_sub_const)); + p_vec0 = vrshlq_s32(p_vec0, vdupq_n_s32(-round_bits)); + uint16x4_t res0 = clip_pixel_highbd_vec(p_vec0, bd); + vst1_u16(dst16, res0); +} + +static INLINE void highbd_vertical_filter_8x1_f8( + uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride, + bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd, + int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) { + int32x4x2_t sums = gamma == 0 ? vertical_filter_8x1_f1(tmp, sy) + : vertical_filter_8x1_f8(tmp, sy, gamma); + int32x4_t sum0 = sums.val[0]; + int32x4_t sum1 = sums.val[1]; + + const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS; + const int offset_bits_vert = bd + 2 * FILTER_BITS - round0; + + sum0 = vaddq_s32(sum0, vdupq_n_s32(1 << offset_bits_vert)); + sum1 = vaddq_s32(sum1, vdupq_n_s32(1 << offset_bits_vert)); + + uint16_t *dst16 = &pred[i * p_stride + j]; + + if (!is_compound) { + const int reduce_bits_vert = 2 * FILTER_BITS - round0; + sum0 = vrshlq_s32(sum0, vdupq_n_s32(-reduce_bits_vert)); + sum1 = vrshlq_s32(sum1, vdupq_n_s32(-reduce_bits_vert)); + + const int res_sub_const = (1 << (bd - 1)) + (1 << bd); + sum0 = vsubq_s32(sum0, vdupq_n_s32(res_sub_const)); + sum1 = vsubq_s32(sum1, vdupq_n_s32(res_sub_const)); + uint16x4_t res0 = clip_pixel_highbd_vec(sum0, bd); + uint16x4_t res1 = clip_pixel_highbd_vec(sum1, bd); + vst1_u16(dst16, res0); + vst1_u16(dst16 + 4, res1); + return; + } + + sum0 = vrshrq_n_s32(sum0, COMPOUND_ROUND1_BITS); + sum1 = vrshrq_n_s32(sum1, COMPOUND_ROUND1_BITS); + + uint16_t *p = &dst[i * dst_stride + j]; + + if (!do_average) { + vst1_u16(p, vqmovun_s32(sum0)); + vst1_u16(p + 4, vqmovun_s32(sum1)); + return; + } + + uint16x8_t p0 = vld1q_u16(p); + int32x4_t p_vec0 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(p0))); + int32x4_t p_vec1 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(p0))); + if (use_dist_wtd_comp_avg) { + p_vec0 = vmulq_n_s32(p_vec0, fwd); + p_vec1 = vmulq_n_s32(p_vec1, fwd); + p_vec0 = vmlaq_n_s32(p_vec0, sum0, bwd); + p_vec1 = vmlaq_n_s32(p_vec1, sum1, bwd); + p_vec0 = vshrq_n_s32(p_vec0, DIST_PRECISION_BITS); + p_vec1 = vshrq_n_s32(p_vec1, DIST_PRECISION_BITS); + } else { + p_vec0 = vhaddq_s32(p_vec0, sum0); + p_vec1 = vhaddq_s32(p_vec1, sum1); + } + + const int offset_bits = bd + 2 * FILTER_BITS - round0; + const int round1 = COMPOUND_ROUND1_BITS; + const int res_sub_const = + (1 << (offset_bits - round1)) + (1 << (offset_bits - round1 - 1)); + const int round_bits = 2 * FILTER_BITS - round0 - round1; + + p_vec0 = vsubq_s32(p_vec0, vdupq_n_s32(res_sub_const)); + p_vec1 = vsubq_s32(p_vec1, vdupq_n_s32(res_sub_const)); + + p_vec0 = vrshlq_s32(p_vec0, vdupq_n_s32(-round_bits)); + p_vec1 = vrshlq_s32(p_vec1, vdupq_n_s32(-round_bits)); + uint16x4_t res0 = clip_pixel_highbd_vec(p_vec0, bd); + uint16x4_t res1 = clip_pixel_highbd_vec(p_vec1, bd); + vst1_u16(dst16, res0); + vst1_u16(dst16 + 4, res1); +} + +static INLINE void warp_affine_vertical( + uint16_t *pred, int p_width, int p_height, int p_stride, int bd, + uint16_t *dst, int dst_stride, bool is_compound, bool do_average, + bool use_dist_wtd_comp_avg, int fwd, int bwd, int16_t gamma, int16_t delta, + const int16x8_t *tmp, int i, int sy4, int j) { + int limit_height = p_height > 4 ? 8 : 4; + + if (p_width > 4) { + // p_width == 8 + for (int k = 0; k < limit_height; ++k) { + int sy = sy4 + delta * k; + highbd_vertical_filter_8x1_f8( + pred, p_stride, bd, dst, dst_stride, is_compound, do_average, + use_dist_wtd_comp_avg, fwd, bwd, gamma, tmp + k, i + k, sy, j); + } + } else { + // p_width == 4 + for (int k = 0; k < limit_height; ++k) { + int sy = sy4 + delta * k; + highbd_vertical_filter_4x1_f4( + pred, p_stride, bd, dst, dst_stride, is_compound, do_average, + use_dist_wtd_comp_avg, fwd, bwd, gamma, tmp + k, i + k, sy, j); + } + } +} + +static INLINE void highbd_warp_affine_common( + const int32_t *mat, const uint16_t *ref, int width, int height, int stride, + uint16_t *pred, int p_col, int p_row, int p_width, int p_height, + int p_stride, int subsampling_x, int subsampling_y, int bd, + ConvolveParams *conv_params, int16_t alpha, int16_t beta, int16_t gamma, + int16_t delta) { + uint16_t *const dst = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + const bool is_compound = conv_params->is_compound; + const bool do_average = conv_params->do_average; + const bool use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg; + const int fwd = conv_params->fwd_offset; + const int bwd = conv_params->bck_offset; + + assert(IMPLIES(is_compound, dst != NULL)); + + for (int i = 0; i < p_height; i += 8) { + for (int j = 0; j < p_width; j += 8) { + // Calculate the center of this 8x8 block, + // project to luma coordinates (if in a subsampled chroma plane), + // apply the affine transformation, + // then convert back to the original coordinates (if necessary) + const int32_t src_x = (j + 4 + p_col) << subsampling_x; + const int32_t src_y = (i + 4 + p_row) << subsampling_y; + const int64_t dst_x = + (int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0]; + const int64_t dst_y = + (int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1]; + const int64_t x4 = dst_x >> subsampling_x; + const int64_t y4 = dst_y >> subsampling_y; + + const int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS); + int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1); + const int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS); + int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1); + + sx4 += alpha * (-4) + beta * (-4); + sy4 += gamma * (-4) + delta * (-4); + + sx4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1); + sy4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1); + + // Each horizontal filter result is formed by the sum of up to eight + // multiplications by filter values and then a shift. Although both the + // inputs and filters are loaded as int16, the input data is at most bd + // bits and the filters are at most 8 bits each. Additionally since we + // know all possible filter values we know that the sum of absolute + // filter values will fit in at most 9 bits. With this in mind we can + // conclude that the sum of each filter application will fit in bd + 9 + // bits. The shift following the summation is ROUND0_BITS (which is 3), + // +2 for 12-bit, which gives us a final storage of: + // bd == 8: ( 8 + 9) - 3 => 14 bits + // bd == 10: (10 + 9) - 3 => 16 bits + // bd == 12: (12 + 9) - 5 => 16 bits + // So it is safe to use int16x8_t as the intermediate storage type here. + int16x8_t tmp[15]; + + warp_affine_horizontal(ref, width, height, stride, p_width, alpha, beta, + iy4, sx4, ix4, tmp, bd); + warp_affine_vertical(pred, p_width, p_height, p_stride, bd, dst, + dst_stride, is_compound, do_average, + use_dist_wtd_comp_avg, fwd, bwd, gamma, delta, tmp, + i, sy4, j); + } + } +} + +#endif // AOM_AV1_COMMON_ARM_HIGHBD_WARP_PLANE_NEON_H_ diff --git a/third_party/aom/av1/common/arm/highbd_wiener_convolve_neon.c b/third_party/aom/av1/common/arm/highbd_wiener_convolve_neon.c new file mode 100644 index 0000000000..a6bd6d38e4 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_wiener_convolve_neon.c @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "aom_dsp/arm/mem_neon.h" +#include "av1/common/convolve.h" +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#define HBD_WIENER_5TAP_HORIZ(name, shift) \ + static INLINE uint16x8_t name##_wiener_convolve5_8_2d_h( \ + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, \ + const int16x8_t s3, const int16x8_t s4, const int16x4_t x_filter, \ + const int32x4_t round_vec, const uint16x8_t im_max_val) { \ + /* Wiener filter is symmetric so add mirrored source elements. */ \ + int16x8_t s04 = vaddq_s16(s0, s4); \ + int16x8_t s13 = vaddq_s16(s1, s3); \ + \ + /* x_filter[0] = 0. (5-tap filters are 0-padded to 7 taps.) */ \ + int32x4_t sum_lo = \ + vmlal_lane_s16(round_vec, vget_low_s16(s04), x_filter, 1); \ + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s13), x_filter, 2); \ + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s2), x_filter, 3); \ + \ + int32x4_t sum_hi = \ + vmlal_lane_s16(round_vec, vget_high_s16(s04), x_filter, 1); \ + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s13), x_filter, 2); \ + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s2), x_filter, 3); \ + \ + uint16x4_t res_lo = vqrshrun_n_s32(sum_lo, shift); \ + uint16x4_t res_hi = vqrshrun_n_s32(sum_hi, shift); \ + \ + return vminq_u16(vcombine_u16(res_lo, res_hi), im_max_val); \ + } \ + \ + static INLINE void name##_convolve_add_src_5tap_horiz( \ + const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr, \ + ptrdiff_t dst_stride, int w, int h, const int16x4_t x_filter, \ + const int32x4_t round_vec, const uint16x8_t im_max_val) { \ + do { \ + const int16_t *s = (int16_t *)src_ptr; \ + uint16_t *d = dst_ptr; \ + int width = w; \ + \ + do { \ + int16x8_t s0, s1, s2, s3, s4; \ + load_s16_8x5(s, 1, &s0, &s1, &s2, &s3, &s4); \ + \ + uint16x8_t d0 = name##_wiener_convolve5_8_2d_h( \ + s0, s1, s2, s3, s4, x_filter, round_vec, im_max_val); \ + \ + vst1q_u16(d, d0); \ + \ + s += 8; \ + d += 8; \ + width -= 8; \ + } while (width != 0); \ + src_ptr += src_stride; \ + dst_ptr += dst_stride; \ + } while (--h != 0); \ + } + +HBD_WIENER_5TAP_HORIZ(highbd, WIENER_ROUND0_BITS) +HBD_WIENER_5TAP_HORIZ(highbd_12, WIENER_ROUND0_BITS + 2) + +#undef HBD_WIENER_5TAP_HORIZ + +#define HBD_WIENER_7TAP_HORIZ(name, shift) \ + static INLINE uint16x8_t name##_wiener_convolve7_8_2d_h( \ + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, \ + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, \ + const int16x8_t s6, const int16x4_t x_filter, const int32x4_t round_vec, \ + const uint16x8_t im_max_val) { \ + /* Wiener filter is symmetric so add mirrored source elements. */ \ + int16x8_t s06 = vaddq_s16(s0, s6); \ + int16x8_t s15 = vaddq_s16(s1, s5); \ + int16x8_t s24 = vaddq_s16(s2, s4); \ + \ + int32x4_t sum_lo = \ + vmlal_lane_s16(round_vec, vget_low_s16(s06), x_filter, 0); \ + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s15), x_filter, 1); \ + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s24), x_filter, 2); \ + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s3), x_filter, 3); \ + \ + int32x4_t sum_hi = \ + vmlal_lane_s16(round_vec, vget_high_s16(s06), x_filter, 0); \ + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s15), x_filter, 1); \ + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s24), x_filter, 2); \ + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s3), x_filter, 3); \ + \ + uint16x4_t res_lo = vqrshrun_n_s32(sum_lo, shift); \ + uint16x4_t res_hi = vqrshrun_n_s32(sum_hi, shift); \ + \ + return vminq_u16(vcombine_u16(res_lo, res_hi), im_max_val); \ + } \ + \ + static INLINE void name##_convolve_add_src_7tap_horiz( \ + const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr, \ + ptrdiff_t dst_stride, int w, int h, const int16x4_t x_filter, \ + const int32x4_t round_vec, const uint16x8_t im_max_val) { \ + do { \ + const int16_t *s = (int16_t *)src_ptr; \ + uint16_t *d = dst_ptr; \ + int width = w; \ + \ + do { \ + int16x8_t s0, s1, s2, s3, s4, s5, s6; \ + load_s16_8x7(s, 1, &s0, &s1, &s2, &s3, &s4, &s5, &s6); \ + \ + uint16x8_t d0 = name##_wiener_convolve7_8_2d_h( \ + s0, s1, s2, s3, s4, s5, s6, x_filter, round_vec, im_max_val); \ + \ + vst1q_u16(d, d0); \ + \ + s += 8; \ + d += 8; \ + width -= 8; \ + } while (width != 0); \ + src_ptr += src_stride; \ + dst_ptr += dst_stride; \ + } while (--h != 0); \ + } + +HBD_WIENER_7TAP_HORIZ(highbd, WIENER_ROUND0_BITS) +HBD_WIENER_7TAP_HORIZ(highbd_12, WIENER_ROUND0_BITS + 2) + +#undef HBD_WIENER_7TAP_HORIZ + +#define HBD_WIENER_5TAP_VERT(name, shift) \ + static INLINE uint16x8_t name##_wiener_convolve5_8_2d_v( \ + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, \ + const int16x8_t s3, const int16x8_t s4, const int16x4_t y_filter, \ + const int32x4_t round_vec, const uint16x8_t res_max_val) { \ + const int32x2_t y_filter_lo = vget_low_s32(vmovl_s16(y_filter)); \ + const int32x2_t y_filter_hi = vget_high_s32(vmovl_s16(y_filter)); \ + /* Wiener filter is symmetric so add mirrored source elements. */ \ + int32x4_t s04_lo = vaddl_s16(vget_low_s16(s0), vget_low_s16(s4)); \ + int32x4_t s13_lo = vaddl_s16(vget_low_s16(s1), vget_low_s16(s3)); \ + \ + /* y_filter[0] = 0. (5-tap filters are 0-padded to 7 taps.) */ \ + int32x4_t sum_lo = vmlaq_lane_s32(round_vec, s04_lo, y_filter_lo, 1); \ + sum_lo = vmlaq_lane_s32(sum_lo, s13_lo, y_filter_hi, 0); \ + sum_lo = \ + vmlaq_lane_s32(sum_lo, vmovl_s16(vget_low_s16(s2)), y_filter_hi, 1); \ + \ + int32x4_t s04_hi = vaddl_s16(vget_high_s16(s0), vget_high_s16(s4)); \ + int32x4_t s13_hi = vaddl_s16(vget_high_s16(s1), vget_high_s16(s3)); \ + \ + int32x4_t sum_hi = vmlaq_lane_s32(round_vec, s04_hi, y_filter_lo, 1); \ + sum_hi = vmlaq_lane_s32(sum_hi, s13_hi, y_filter_hi, 0); \ + sum_hi = \ + vmlaq_lane_s32(sum_hi, vmovl_s16(vget_high_s16(s2)), y_filter_hi, 1); \ + \ + uint16x4_t res_lo = vqrshrun_n_s32(sum_lo, shift); \ + uint16x4_t res_hi = vqrshrun_n_s32(sum_hi, shift); \ + \ + return vminq_u16(vcombine_u16(res_lo, res_hi), res_max_val); \ + } \ + \ + static INLINE void name##_convolve_add_src_5tap_vert( \ + const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr, \ + ptrdiff_t dst_stride, int w, int h, const int16x4_t y_filter, \ + const int32x4_t round_vec, const uint16x8_t res_max_val) { \ + do { \ + const int16_t *s = (int16_t *)src_ptr; \ + uint16_t *d = dst_ptr; \ + int height = h; \ + \ + while (height > 3) { \ + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; \ + load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); \ + \ + uint16x8_t d0 = name##_wiener_convolve5_8_2d_v( \ + s0, s1, s2, s3, s4, y_filter, round_vec, res_max_val); \ + uint16x8_t d1 = name##_wiener_convolve5_8_2d_v( \ + s1, s2, s3, s4, s5, y_filter, round_vec, res_max_val); \ + uint16x8_t d2 = name##_wiener_convolve5_8_2d_v( \ + s2, s3, s4, s5, s6, y_filter, round_vec, res_max_val); \ + uint16x8_t d3 = name##_wiener_convolve5_8_2d_v( \ + s3, s4, s5, s6, s7, y_filter, round_vec, res_max_val); \ + \ + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); \ + \ + s += 4 * src_stride; \ + d += 4 * dst_stride; \ + height -= 4; \ + } \ + \ + while (height-- != 0) { \ + int16x8_t s0, s1, s2, s3, s4; \ + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); \ + \ + uint16x8_t d0 = name##_wiener_convolve5_8_2d_v( \ + s0, s1, s2, s3, s4, y_filter, round_vec, res_max_val); \ + \ + vst1q_u16(d, d0); \ + \ + s += src_stride; \ + d += dst_stride; \ + } \ + \ + src_ptr += 8; \ + dst_ptr += 8; \ + w -= 8; \ + } while (w != 0); \ + } + +HBD_WIENER_5TAP_VERT(highbd, 2 * FILTER_BITS - WIENER_ROUND0_BITS) +HBD_WIENER_5TAP_VERT(highbd_12, 2 * FILTER_BITS - WIENER_ROUND0_BITS - 2) + +#undef HBD_WIENER_5TAP_VERT + +#define HBD_WIENER_7TAP_VERT(name, shift) \ + static INLINE uint16x8_t name##_wiener_convolve7_8_2d_v( \ + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, \ + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, \ + const int16x8_t s6, const int16x4_t y_filter, const int32x4_t round_vec, \ + const uint16x8_t res_max_val) { \ + const int32x2_t y_filter_lo = vget_low_s32(vmovl_s16(y_filter)); \ + const int32x2_t y_filter_hi = vget_high_s32(vmovl_s16(y_filter)); \ + /* Wiener filter is symmetric so add mirrored source elements. */ \ + int32x4_t s06_lo = vaddl_s16(vget_low_s16(s0), vget_low_s16(s6)); \ + int32x4_t s15_lo = vaddl_s16(vget_low_s16(s1), vget_low_s16(s5)); \ + int32x4_t s24_lo = vaddl_s16(vget_low_s16(s2), vget_low_s16(s4)); \ + \ + int32x4_t sum_lo = vmlaq_lane_s32(round_vec, s06_lo, y_filter_lo, 0); \ + sum_lo = vmlaq_lane_s32(sum_lo, s15_lo, y_filter_lo, 1); \ + sum_lo = vmlaq_lane_s32(sum_lo, s24_lo, y_filter_hi, 0); \ + sum_lo = \ + vmlaq_lane_s32(sum_lo, vmovl_s16(vget_low_s16(s3)), y_filter_hi, 1); \ + \ + int32x4_t s06_hi = vaddl_s16(vget_high_s16(s0), vget_high_s16(s6)); \ + int32x4_t s15_hi = vaddl_s16(vget_high_s16(s1), vget_high_s16(s5)); \ + int32x4_t s24_hi = vaddl_s16(vget_high_s16(s2), vget_high_s16(s4)); \ + \ + int32x4_t sum_hi = vmlaq_lane_s32(round_vec, s06_hi, y_filter_lo, 0); \ + sum_hi = vmlaq_lane_s32(sum_hi, s15_hi, y_filter_lo, 1); \ + sum_hi = vmlaq_lane_s32(sum_hi, s24_hi, y_filter_hi, 0); \ + sum_hi = \ + vmlaq_lane_s32(sum_hi, vmovl_s16(vget_high_s16(s3)), y_filter_hi, 1); \ + \ + uint16x4_t res_lo = vqrshrun_n_s32(sum_lo, shift); \ + uint16x4_t res_hi = vqrshrun_n_s32(sum_hi, shift); \ + \ + return vminq_u16(vcombine_u16(res_lo, res_hi), res_max_val); \ + } \ + \ + static INLINE void name##_convolve_add_src_7tap_vert( \ + const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr, \ + ptrdiff_t dst_stride, int w, int h, const int16x4_t y_filter, \ + const int32x4_t round_vec, const uint16x8_t res_max_val) { \ + do { \ + const int16_t *s = (int16_t *)src_ptr; \ + uint16_t *d = dst_ptr; \ + int height = h; \ + \ + while (height > 3) { \ + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9; \ + load_s16_8x10(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, \ + &s8, &s9); \ + \ + uint16x8_t d0 = name##_wiener_convolve7_8_2d_v( \ + s0, s1, s2, s3, s4, s5, s6, y_filter, round_vec, res_max_val); \ + uint16x8_t d1 = name##_wiener_convolve7_8_2d_v( \ + s1, s2, s3, s4, s5, s6, s7, y_filter, round_vec, res_max_val); \ + uint16x8_t d2 = name##_wiener_convolve7_8_2d_v( \ + s2, s3, s4, s5, s6, s7, s8, y_filter, round_vec, res_max_val); \ + uint16x8_t d3 = name##_wiener_convolve7_8_2d_v( \ + s3, s4, s5, s6, s7, s8, s9, y_filter, round_vec, res_max_val); \ + \ + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); \ + \ + s += 4 * src_stride; \ + d += 4 * dst_stride; \ + height -= 4; \ + } \ + \ + while (height-- != 0) { \ + int16x8_t s0, s1, s2, s3, s4, s5, s6; \ + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); \ + \ + uint16x8_t d0 = name##_wiener_convolve7_8_2d_v( \ + s0, s1, s2, s3, s4, s5, s6, y_filter, round_vec, res_max_val); \ + \ + vst1q_u16(d, d0); \ + \ + s += src_stride; \ + d += dst_stride; \ + } \ + \ + src_ptr += 8; \ + dst_ptr += 8; \ + w -= 8; \ + } while (w != 0); \ + } + +HBD_WIENER_7TAP_VERT(highbd, 2 * FILTER_BITS - WIENER_ROUND0_BITS) +HBD_WIENER_7TAP_VERT(highbd_12, 2 * FILTER_BITS - WIENER_ROUND0_BITS - 2) + +#undef HBD_WIENER_7TAP_VERT + +static AOM_INLINE int get_wiener_filter_taps(const int16_t *filter) { + assert(filter[7] == 0); + if (filter[0] == 0 && filter[6] == 0) { + return WIENER_WIN_REDUCED; + } + return WIENER_WIN; +} + +void av1_highbd_wiener_convolve_add_src_neon( + const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, + ptrdiff_t dst_stride, const int16_t *x_filter, int x_step_q4, + const int16_t *y_filter, int y_step_q4, int w, int h, + const WienerConvolveParams *conv_params, int bd) { + (void)x_step_q4; + (void)y_step_q4; + + assert(w % 8 == 0); + assert(w <= MAX_SB_SIZE && h <= MAX_SB_SIZE); + assert(x_step_q4 == 16 && y_step_q4 == 16); + assert(x_filter[7] == 0 && y_filter[7] == 0); + + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + WIENER_WIN - 1) * MAX_SB_SIZE]); + + const int x_filter_taps = get_wiener_filter_taps(x_filter); + const int y_filter_taps = get_wiener_filter_taps(y_filter); + int16x4_t x_filter_s16 = vld1_s16(x_filter); + int16x4_t y_filter_s16 = vld1_s16(y_filter); + // Add 128 to tap 3. (Needed for rounding.) + x_filter_s16 = vadd_s16(x_filter_s16, vcreate_s16(128ULL << 48)); + y_filter_s16 = vadd_s16(y_filter_s16, vcreate_s16(128ULL << 48)); + + const int im_stride = MAX_SB_SIZE; + const int im_h = h + y_filter_taps - 1; + const int horiz_offset = x_filter_taps / 2; + const int vert_offset = (y_filter_taps / 2) * (int)src_stride; + + const int extraprec_clamp_limit = + WIENER_CLAMP_LIMIT(conv_params->round_0, bd); + const uint16x8_t im_max_val = vdupq_n_u16(extraprec_clamp_limit - 1); + const int32x4_t horiz_round_vec = vdupq_n_s32(1 << (bd + FILTER_BITS - 1)); + + const uint16x8_t res_max_val = vdupq_n_u16((1 << bd) - 1); + const int32x4_t vert_round_vec = + vdupq_n_s32(-(1 << (bd + conv_params->round_1 - 1))); + + uint16_t *src = CONVERT_TO_SHORTPTR(src8); + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); + + if (bd == 12) { + if (x_filter_taps == WIENER_WIN_REDUCED) { + highbd_12_convolve_add_src_5tap_horiz( + src - horiz_offset - vert_offset, src_stride, im_block, im_stride, w, + im_h, x_filter_s16, horiz_round_vec, im_max_val); + } else { + highbd_12_convolve_add_src_7tap_horiz( + src - horiz_offset - vert_offset, src_stride, im_block, im_stride, w, + im_h, x_filter_s16, horiz_round_vec, im_max_val); + } + + if (y_filter_taps == WIENER_WIN_REDUCED) { + highbd_12_convolve_add_src_5tap_vert(im_block, im_stride, dst, dst_stride, + w, h, y_filter_s16, vert_round_vec, + res_max_val); + } else { + highbd_12_convolve_add_src_7tap_vert(im_block, im_stride, dst, dst_stride, + w, h, y_filter_s16, vert_round_vec, + res_max_val); + } + + } else { + if (x_filter_taps == WIENER_WIN_REDUCED) { + highbd_convolve_add_src_5tap_horiz( + src - horiz_offset - vert_offset, src_stride, im_block, im_stride, w, + im_h, x_filter_s16, horiz_round_vec, im_max_val); + } else { + highbd_convolve_add_src_7tap_horiz( + src - horiz_offset - vert_offset, src_stride, im_block, im_stride, w, + im_h, x_filter_s16, horiz_round_vec, im_max_val); + } + + if (y_filter_taps == WIENER_WIN_REDUCED) { + highbd_convolve_add_src_5tap_vert(im_block, im_stride, dst, dst_stride, w, + h, y_filter_s16, vert_round_vec, + res_max_val); + } else { + highbd_convolve_add_src_7tap_vert(im_block, im_stride, dst, dst_stride, w, + h, y_filter_s16, vert_round_vec, + res_max_val); + } + } +} diff --git a/third_party/aom/av1/common/arm/reconinter_neon.c b/third_party/aom/av1/common/arm/reconinter_neon.c new file mode 100644 index 0000000000..2b0274cc64 --- /dev/null +++ b/third_party/aom/av1/common/arm/reconinter_neon.c @@ -0,0 +1,217 @@ +/* + * + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> +#include <stdbool.h> + +#include "aom/aom_integer.h" +#include "aom_dsp/blend.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/blockd.h" +#include "config/av1_rtcd.h" + +static AOM_INLINE void diffwtd_mask_d16_neon( + uint8_t *mask, const bool inverse, const CONV_BUF_TYPE *src0, + int src0_stride, const CONV_BUF_TYPE *src1, int src1_stride, int h, int w, + ConvolveParams *conv_params, int bd) { + const int round = + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1 + (bd - 8); + const int16x8_t round_vec = vdupq_n_s16((int16_t)(-round)); + + if (w >= 16) { + int i = 0; + do { + int j = 0; + do { + uint16x8_t s0_lo = vld1q_u16(src0 + j); + uint16x8_t s1_lo = vld1q_u16(src1 + j); + uint16x8_t s0_hi = vld1q_u16(src0 + j + 8); + uint16x8_t s1_hi = vld1q_u16(src1 + j + 8); + + uint16x8_t diff_lo_u16 = vrshlq_u16(vabdq_u16(s0_lo, s1_lo), round_vec); + uint16x8_t diff_hi_u16 = vrshlq_u16(vabdq_u16(s0_hi, s1_hi), round_vec); + uint8x8_t diff_lo_u8 = vshrn_n_u16(diff_lo_u16, DIFF_FACTOR_LOG2); + uint8x8_t diff_hi_u8 = vshrn_n_u16(diff_hi_u16, DIFF_FACTOR_LOG2); + uint8x16_t diff = vcombine_u8(diff_lo_u8, diff_hi_u8); + + uint8x16_t m; + if (inverse) { + m = vqsubq_u8(vdupq_n_u8(64 - 38), diff); // Saturating to 0 + } else { + m = vminq_u8(vaddq_u8(diff, vdupq_n_u8(38)), vdupq_n_u8(64)); + } + + vst1q_u8(mask, m); + + mask += 16; + j += 16; + } while (j < w); + src0 += src0_stride; + src1 += src1_stride; + } while (++i < h); + } else if (w == 8) { + int i = 0; + do { + uint16x8_t s0 = vld1q_u16(src0); + uint16x8_t s1 = vld1q_u16(src1); + + uint16x8_t diff_u16 = vrshlq_u16(vabdq_u16(s0, s1), round_vec); + uint8x8_t diff_u8 = vshrn_n_u16(diff_u16, DIFF_FACTOR_LOG2); + uint8x8_t m; + if (inverse) { + m = vqsub_u8(vdup_n_u8(64 - 38), diff_u8); // Saturating to 0 + } else { + m = vmin_u8(vadd_u8(diff_u8, vdup_n_u8(38)), vdup_n_u8(64)); + } + + vst1_u8(mask, m); + + mask += 8; + src0 += src0_stride; + src1 += src1_stride; + } while (++i < h); + } else if (w == 4) { + int i = 0; + do { + uint16x8_t s0 = + vcombine_u16(vld1_u16(src0), vld1_u16(src0 + src0_stride)); + uint16x8_t s1 = + vcombine_u16(vld1_u16(src1), vld1_u16(src1 + src1_stride)); + + uint16x8_t diff_u16 = vrshlq_u16(vabdq_u16(s0, s1), round_vec); + uint8x8_t diff_u8 = vshrn_n_u16(diff_u16, DIFF_FACTOR_LOG2); + uint8x8_t m; + if (inverse) { + m = vqsub_u8(vdup_n_u8(64 - 38), diff_u8); // Saturating to 0 + } else { + m = vmin_u8(vadd_u8(diff_u8, vdup_n_u8(38)), vdup_n_u8(64)); + } + + vst1_u8(mask, m); + + mask += 8; + src0 += 2 * src0_stride; + src1 += 2 * src1_stride; + i += 2; + } while (i < h); + } +} + +void av1_build_compound_diffwtd_mask_d16_neon( + uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const CONV_BUF_TYPE *src0, + int src0_stride, const CONV_BUF_TYPE *src1, int src1_stride, int h, int w, + ConvolveParams *conv_params, int bd) { + assert(h >= 4); + assert(w >= 4); + assert((mask_type == DIFFWTD_38_INV) || (mask_type == DIFFWTD_38)); + + if (mask_type == DIFFWTD_38) { + diffwtd_mask_d16_neon(mask, /*inverse=*/false, src0, src0_stride, src1, + src1_stride, h, w, conv_params, bd); + } else { // mask_type == DIFFWTD_38_INV + diffwtd_mask_d16_neon(mask, /*inverse=*/true, src0, src0_stride, src1, + src1_stride, h, w, conv_params, bd); + } +} + +static AOM_INLINE void diffwtd_mask_neon(uint8_t *mask, const bool inverse, + const uint8_t *src0, int src0_stride, + const uint8_t *src1, int src1_stride, + int h, int w) { + if (w >= 16) { + int i = 0; + do { + int j = 0; + do { + uint8x16_t s0 = vld1q_u8(src0 + j); + uint8x16_t s1 = vld1q_u8(src1 + j); + + uint8x16_t diff = vshrq_n_u8(vabdq_u8(s0, s1), DIFF_FACTOR_LOG2); + uint8x16_t m; + if (inverse) { + m = vqsubq_u8(vdupq_n_u8(64 - 38), diff); // Saturating to 0 + } else { + m = vminq_u8(vaddq_u8(diff, vdupq_n_u8(38)), vdupq_n_u8(64)); + } + + vst1q_u8(mask, m); + + mask += 16; + j += 16; + } while (j < w); + src0 += src0_stride; + src1 += src1_stride; + } while (++i < h); + } else if (w == 8) { + int i = 0; + do { + uint8x16_t s0 = vcombine_u8(vld1_u8(src0), vld1_u8(src0 + src0_stride)); + uint8x16_t s1 = vcombine_u8(vld1_u8(src1), vld1_u8(src1 + src0_stride)); + + uint8x16_t diff = vshrq_n_u8(vabdq_u8(s0, s1), DIFF_FACTOR_LOG2); + uint8x16_t m; + if (inverse) { + m = vqsubq_u8(vdupq_n_u8(64 - 38), diff); // Saturating to 0 + } else { + m = vminq_u8(vaddq_u8(diff, vdupq_n_u8(38)), vdupq_n_u8(64)); + } + + vst1q_u8(mask, m); + + mask += 16; + src0 += 2 * src0_stride; + src1 += 2 * src1_stride; + i += 2; + } while (i < h); + } else if (w == 4) { + int i = 0; + do { + uint8x16_t s0 = load_unaligned_u8q(src0, src0_stride); + uint8x16_t s1 = load_unaligned_u8q(src1, src1_stride); + + uint8x16_t diff = vshrq_n_u8(vabdq_u8(s0, s1), DIFF_FACTOR_LOG2); + uint8x16_t m; + if (inverse) { + m = vqsubq_u8(vdupq_n_u8(64 - 38), diff); // Saturating to 0 + } else { + m = vminq_u8(vaddq_u8(diff, vdupq_n_u8(38)), vdupq_n_u8(64)); + } + + vst1q_u8(mask, m); + + mask += 16; + src0 += 4 * src0_stride; + src1 += 4 * src1_stride; + i += 4; + } while (i < h); + } +} + +void av1_build_compound_diffwtd_mask_neon(uint8_t *mask, + DIFFWTD_MASK_TYPE mask_type, + const uint8_t *src0, int src0_stride, + const uint8_t *src1, int src1_stride, + int h, int w) { + assert(h % 4 == 0); + assert(w % 4 == 0); + assert(mask_type == DIFFWTD_38_INV || mask_type == DIFFWTD_38); + + if (mask_type == DIFFWTD_38) { + diffwtd_mask_neon(mask, /*inverse=*/false, src0, src0_stride, src1, + src1_stride, h, w); + } else { // mask_type == DIFFWTD_38_INV + diffwtd_mask_neon(mask, /*inverse=*/true, src0, src0_stride, src1, + src1_stride, h, w); + } +} diff --git a/third_party/aom/av1/common/arm/reconintra_neon.c b/third_party/aom/av1/common/arm/reconintra_neon.c new file mode 100644 index 0000000000..3db39987a6 --- /dev/null +++ b/third_party/aom/av1/common/arm/reconintra_neon.c @@ -0,0 +1,392 @@ +/* + * Copyright (c) 2020, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "config/aom_config.h" + +#include "aom/aom_integer.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/sum_neon.h" + +#define MAX_UPSAMPLE_SZ 16 + +// These kernels are a transposed version of those defined in reconintra.c, +// with the absolute value of the negatives taken in the top row. +DECLARE_ALIGNED(16, const uint8_t, + av1_filter_intra_taps_neon[FILTER_INTRA_MODES][7][8]) = { + // clang-format off + { + { 6, 5, 3, 3, 4, 3, 3, 3 }, + { 10, 2, 1, 1, 6, 2, 2, 1 }, + { 0, 10, 1, 1, 0, 6, 2, 2 }, + { 0, 0, 10, 2, 0, 0, 6, 2 }, + { 0, 0, 0, 10, 0, 0, 0, 6 }, + { 12, 9, 7, 5, 2, 2, 2, 3 }, + { 0, 0, 0, 0, 12, 9, 7, 5 } + }, + { + { 10, 6, 4, 2, 10, 6, 4, 2 }, + { 16, 0, 0, 0, 16, 0, 0, 0 }, + { 0, 16, 0, 0, 0, 16, 0, 0 }, + { 0, 0, 16, 0, 0, 0, 16, 0 }, + { 0, 0, 0, 16, 0, 0, 0, 16 }, + { 10, 6, 4, 2, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 10, 6, 4, 2 } + }, + { + { 8, 8, 8, 8, 4, 4, 4, 4 }, + { 8, 0, 0, 0, 4, 0, 0, 0 }, + { 0, 8, 0, 0, 0, 4, 0, 0 }, + { 0, 0, 8, 0, 0, 0, 4, 0 }, + { 0, 0, 0, 8, 0, 0, 0, 4 }, + { 16, 16, 16, 16, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 16, 16, 16, 16 } + }, + { + { 2, 1, 1, 0, 1, 1, 1, 1 }, + { 8, 3, 2, 1, 4, 3, 2, 2 }, + { 0, 8, 3, 2, 0, 4, 3, 2 }, + { 0, 0, 8, 3, 0, 0, 4, 3 }, + { 0, 0, 0, 8, 0, 0, 0, 4 }, + { 10, 6, 4, 2, 3, 4, 4, 3 }, + { 0, 0, 0, 0, 10, 6, 4, 3 } + }, + { + { 12, 10, 9, 8, 10, 9, 8, 7 }, + { 14, 0, 0, 0, 12, 1, 0, 0 }, + { 0, 14, 0, 0, 0, 12, 0, 0 }, + { 0, 0, 14, 0, 0, 0, 12, 1 }, + { 0, 0, 0, 14, 0, 0, 0, 12 }, + { 14, 12, 11, 10, 0, 0, 1, 1 }, + { 0, 0, 0, 0, 14, 12, 11, 9 } + } + // clang-format on +}; + +#define FILTER_INTRA_SCALE_BITS 4 + +void av1_filter_intra_predictor_neon(uint8_t *dst, ptrdiff_t stride, + TX_SIZE tx_size, const uint8_t *above, + const uint8_t *left, int mode) { + const int width = tx_size_wide[tx_size]; + const int height = tx_size_high[tx_size]; + assert(width <= 32 && height <= 32); + + const uint8x8_t f0 = vld1_u8(av1_filter_intra_taps_neon[mode][0]); + const uint8x8_t f1 = vld1_u8(av1_filter_intra_taps_neon[mode][1]); + const uint8x8_t f2 = vld1_u8(av1_filter_intra_taps_neon[mode][2]); + const uint8x8_t f3 = vld1_u8(av1_filter_intra_taps_neon[mode][3]); + const uint8x8_t f4 = vld1_u8(av1_filter_intra_taps_neon[mode][4]); + const uint8x8_t f5 = vld1_u8(av1_filter_intra_taps_neon[mode][5]); + const uint8x8_t f6 = vld1_u8(av1_filter_intra_taps_neon[mode][6]); + + uint8_t buffer[33][33]; + // Populate the top row in the scratch buffer with data from above. + memcpy(buffer[0], &above[-1], (width + 1) * sizeof(uint8_t)); + // Populate the first column in the scratch buffer with data from the left. + int r = 0; + do { + buffer[r + 1][0] = left[r]; + } while (++r < height); + + // Computing 4 cols per iteration (instead of 8) for 8x<h> blocks is faster. + if (width <= 8) { + r = 1; + do { + int c = 1; + uint8x8_t s0 = vld1_dup_u8(&buffer[r - 1][c - 1]); + uint8x8_t s5 = vld1_dup_u8(&buffer[r + 0][c - 1]); + uint8x8_t s6 = vld1_dup_u8(&buffer[r + 1][c - 1]); + + do { + uint8x8_t s1234 = load_u8_4x1(&buffer[r - 1][c - 1] + 1); + uint8x8_t s1 = vdup_lane_u8(s1234, 0); + uint8x8_t s2 = vdup_lane_u8(s1234, 1); + uint8x8_t s3 = vdup_lane_u8(s1234, 2); + uint8x8_t s4 = vdup_lane_u8(s1234, 3); + + uint16x8_t sum = vmull_u8(s1, f1); + // First row of each filter has all negative values so subtract. + sum = vmlsl_u8(sum, s0, f0); + sum = vmlal_u8(sum, s2, f2); + sum = vmlal_u8(sum, s3, f3); + sum = vmlal_u8(sum, s4, f4); + sum = vmlal_u8(sum, s5, f5); + sum = vmlal_u8(sum, s6, f6); + + uint8x8_t res = + vqrshrun_n_s16(vreinterpretq_s16_u16(sum), FILTER_INTRA_SCALE_BITS); + + // Store buffer[r + 0][c] and buffer[r + 1][c]. + store_u8x4_strided_x2(&buffer[r][c], 33, res); + + store_u8x4_strided_x2(dst + (r - 1) * stride + c - 1, stride, res); + + s0 = s4; + s5 = vdup_lane_u8(res, 3); + s6 = vdup_lane_u8(res, 7); + c += 4; + } while (c < width + 1); + + r += 2; + } while (r < height + 1); + } else { + r = 1; + do { + int c = 1; + uint8x8_t s0_lo = vld1_dup_u8(&buffer[r - 1][c - 1]); + uint8x8_t s5_lo = vld1_dup_u8(&buffer[r + 0][c - 1]); + uint8x8_t s6_lo = vld1_dup_u8(&buffer[r + 1][c - 1]); + + do { + uint8x8_t s1234 = vld1_u8(&buffer[r - 1][c - 1] + 1); + uint8x8_t s1_lo = vdup_lane_u8(s1234, 0); + uint8x8_t s2_lo = vdup_lane_u8(s1234, 1); + uint8x8_t s3_lo = vdup_lane_u8(s1234, 2); + uint8x8_t s4_lo = vdup_lane_u8(s1234, 3); + + uint16x8_t sum_lo = vmull_u8(s1_lo, f1); + // First row of each filter has all negative values so subtract. + sum_lo = vmlsl_u8(sum_lo, s0_lo, f0); + sum_lo = vmlal_u8(sum_lo, s2_lo, f2); + sum_lo = vmlal_u8(sum_lo, s3_lo, f3); + sum_lo = vmlal_u8(sum_lo, s4_lo, f4); + sum_lo = vmlal_u8(sum_lo, s5_lo, f5); + sum_lo = vmlal_u8(sum_lo, s6_lo, f6); + + uint8x8_t res_lo = vqrshrun_n_s16(vreinterpretq_s16_u16(sum_lo), + FILTER_INTRA_SCALE_BITS); + + uint8x8_t s0_hi = s4_lo; + uint8x8_t s1_hi = vdup_lane_u8(s1234, 4); + uint8x8_t s2_hi = vdup_lane_u8(s1234, 5); + uint8x8_t s3_hi = vdup_lane_u8(s1234, 6); + uint8x8_t s4_hi = vdup_lane_u8(s1234, 7); + uint8x8_t s5_hi = vdup_lane_u8(res_lo, 3); + uint8x8_t s6_hi = vdup_lane_u8(res_lo, 7); + + uint16x8_t sum_hi = vmull_u8(s1_hi, f1); + // First row of each filter has all negative values so subtract. + sum_hi = vmlsl_u8(sum_hi, s0_hi, f0); + sum_hi = vmlal_u8(sum_hi, s2_hi, f2); + sum_hi = vmlal_u8(sum_hi, s3_hi, f3); + sum_hi = vmlal_u8(sum_hi, s4_hi, f4); + sum_hi = vmlal_u8(sum_hi, s5_hi, f5); + sum_hi = vmlal_u8(sum_hi, s6_hi, f6); + + uint8x8_t res_hi = vqrshrun_n_s16(vreinterpretq_s16_u16(sum_hi), + FILTER_INTRA_SCALE_BITS); + + uint32x2x2_t res = + vzip_u32(vreinterpret_u32_u8(res_lo), vreinterpret_u32_u8(res_hi)); + + vst1_u8(&buffer[r + 0][c], vreinterpret_u8_u32(res.val[0])); + vst1_u8(&buffer[r + 1][c], vreinterpret_u8_u32(res.val[1])); + + vst1_u8(dst + (r - 1) * stride + c - 1, + vreinterpret_u8_u32(res.val[0])); + vst1_u8(dst + (r + 0) * stride + c - 1, + vreinterpret_u8_u32(res.val[1])); + + s0_lo = s4_hi; + s5_lo = vdup_lane_u8(res_hi, 3); + s6_lo = vdup_lane_u8(res_hi, 7); + c += 8; + } while (c < width + 1); + + r += 2; + } while (r < height + 1); + } +} + +void av1_filter_intra_edge_neon(uint8_t *p, int sz, int strength) { + if (!strength) return; + assert(sz >= 0 && sz <= 129); + + uint8_t edge[160]; // Max value of sz + enough padding for vector accesses. + memcpy(edge + 1, p, sz * sizeof(*p)); + + // Populate extra space appropriately. + edge[0] = edge[1]; + edge[sz + 1] = edge[sz]; + edge[sz + 2] = edge[sz]; + + // Don't overwrite first pixel. + uint8_t *dst = p + 1; + sz--; + + if (strength == 1) { // Filter: {4, 8, 4}. + const uint8_t *src = edge + 1; + + while (sz >= 8) { + uint8x8_t s0 = vld1_u8(src); + uint8x8_t s1 = vld1_u8(src + 1); + uint8x8_t s2 = vld1_u8(src + 2); + + // Make use of the identity: + // (4*a + 8*b + 4*c) >> 4 == (a + (b << 1) + c) >> 2 + uint16x8_t t0 = vaddl_u8(s0, s2); + uint16x8_t t1 = vaddl_u8(s1, s1); + uint16x8_t sum = vaddq_u16(t0, t1); + uint8x8_t res = vrshrn_n_u16(sum, 2); + + vst1_u8(dst, res); + + src += 8; + dst += 8; + sz -= 8; + } + + if (sz > 0) { // Handle sz < 8 to avoid modifying out-of-bounds values. + uint8x8_t s0 = vld1_u8(src); + uint8x8_t s1 = vld1_u8(src + 1); + uint8x8_t s2 = vld1_u8(src + 2); + + uint16x8_t t0 = vaddl_u8(s0, s2); + uint16x8_t t1 = vaddl_u8(s1, s1); + uint16x8_t sum = vaddq_u16(t0, t1); + uint8x8_t res = vrshrn_n_u16(sum, 2); + + // Mask off out-of-bounds indices. + uint8x8_t current_dst = vld1_u8(dst); + uint8x8_t mask = vcgt_u8(vdup_n_u8(sz), vcreate_u8(0x0706050403020100)); + res = vbsl_u8(mask, res, current_dst); + + vst1_u8(dst, res); + } + } else if (strength == 2) { // Filter: {5, 6, 5}. + const uint8_t *src = edge + 1; + + const uint8x8x3_t filter = { { vdup_n_u8(5), vdup_n_u8(6), vdup_n_u8(5) } }; + + while (sz >= 8) { + uint8x8_t s0 = vld1_u8(src); + uint8x8_t s1 = vld1_u8(src + 1); + uint8x8_t s2 = vld1_u8(src + 2); + + uint16x8_t accum = vmull_u8(s0, filter.val[0]); + accum = vmlal_u8(accum, s1, filter.val[1]); + accum = vmlal_u8(accum, s2, filter.val[2]); + uint8x8_t res = vrshrn_n_u16(accum, 4); + + vst1_u8(dst, res); + + src += 8; + dst += 8; + sz -= 8; + } + + if (sz > 0) { // Handle sz < 8 to avoid modifying out-of-bounds values. + uint8x8_t s0 = vld1_u8(src); + uint8x8_t s1 = vld1_u8(src + 1); + uint8x8_t s2 = vld1_u8(src + 2); + + uint16x8_t accum = vmull_u8(s0, filter.val[0]); + accum = vmlal_u8(accum, s1, filter.val[1]); + accum = vmlal_u8(accum, s2, filter.val[2]); + uint8x8_t res = vrshrn_n_u16(accum, 4); + + // Mask off out-of-bounds indices. + uint8x8_t current_dst = vld1_u8(dst); + uint8x8_t mask = vcgt_u8(vdup_n_u8(sz), vcreate_u8(0x0706050403020100)); + res = vbsl_u8(mask, res, current_dst); + + vst1_u8(dst, res); + } + } else { // Filter {2, 4, 4, 4, 2}. + const uint8_t *src = edge; + + while (sz >= 8) { + uint8x8_t s0 = vld1_u8(src); + uint8x8_t s1 = vld1_u8(src + 1); + uint8x8_t s2 = vld1_u8(src + 2); + uint8x8_t s3 = vld1_u8(src + 3); + uint8x8_t s4 = vld1_u8(src + 4); + + // Make use of the identity: + // (2*a + 4*b + 4*c + 4*d + 2*e) >> 4 == (a + ((b + c + d) << 1) + e) >> 3 + uint16x8_t t0 = vaddl_u8(s0, s4); + uint16x8_t t1 = vaddl_u8(s1, s2); + t1 = vaddw_u8(t1, s3); + t1 = vaddq_u16(t1, t1); + uint16x8_t sum = vaddq_u16(t0, t1); + uint8x8_t res = vrshrn_n_u16(sum, 3); + + vst1_u8(dst, res); + + src += 8; + dst += 8; + sz -= 8; + } + + if (sz > 0) { // Handle sz < 8 to avoid modifying out-of-bounds values. + uint8x8_t s0 = vld1_u8(src); + uint8x8_t s1 = vld1_u8(src + 1); + uint8x8_t s2 = vld1_u8(src + 2); + uint8x8_t s3 = vld1_u8(src + 3); + uint8x8_t s4 = vld1_u8(src + 4); + + uint16x8_t t0 = vaddl_u8(s0, s4); + uint16x8_t t1 = vaddl_u8(s1, s2); + t1 = vaddw_u8(t1, s3); + t1 = vaddq_u16(t1, t1); + uint16x8_t sum = vaddq_u16(t0, t1); + uint8x8_t res = vrshrn_n_u16(sum, 3); + + // Mask off out-of-bounds indices. + uint8x8_t current_dst = vld1_u8(dst); + uint8x8_t mask = vcgt_u8(vdup_n_u8(sz), vcreate_u8(0x0706050403020100)); + res = vbsl_u8(mask, res, current_dst); + + vst1_u8(dst, res); + } + } +} + +void av1_upsample_intra_edge_neon(uint8_t *p, int sz) { + if (!sz) return; + + assert(sz <= MAX_UPSAMPLE_SZ); + + uint8_t edge[MAX_UPSAMPLE_SZ + 3]; + const uint8_t *src = edge; + + // Copy p[-1..(sz-1)] and pad out both ends. + edge[0] = p[-1]; + edge[1] = p[-1]; + memcpy(edge + 2, p, sz); + edge[sz + 2] = p[sz - 1]; + p[-2] = p[-1]; + + uint8_t *dst = p - 1; + + do { + uint8x8_t s0 = vld1_u8(src); + uint8x8_t s1 = vld1_u8(src + 1); + uint8x8_t s2 = vld1_u8(src + 2); + uint8x8_t s3 = vld1_u8(src + 3); + + int16x8_t t0 = vreinterpretq_s16_u16(vaddl_u8(s0, s3)); + int16x8_t t1 = vreinterpretq_s16_u16(vaddl_u8(s1, s2)); + t1 = vmulq_n_s16(t1, 9); + t1 = vsubq_s16(t1, t0); + + uint8x8x2_t res = { { vqrshrun_n_s16(t1, 4), s2 } }; + + vst2_u8(dst, res); + + src += 8; + dst += 16; + sz -= 8; + } while (sz > 0); +} diff --git a/third_party/aom/av1/common/arm/resize_neon.c b/third_party/aom/av1/common/arm/resize_neon.c new file mode 100644 index 0000000000..b00ebd1fc2 --- /dev/null +++ b/third_party/aom/av1/common/arm/resize_neon.c @@ -0,0 +1,1178 @@ +/* + * + * Copyright (c) 2020, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ +#include <arm_neon.h> +#include <assert.h> + +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "av1/common/resize.h" +#include "config/av1_rtcd.h" +#include "config/aom_scale_rtcd.h" + +static INLINE int16x4_t convolve8_4(const int16x4_t s0, const int16x4_t s1, + const int16x4_t s2, const int16x4_t s3, + const int16x4_t s4, const int16x4_t s5, + const int16x4_t s6, const int16x4_t s7, + const int16x8_t filter) { + const int16x4_t filter_lo = vget_low_s16(filter); + const int16x4_t filter_hi = vget_high_s16(filter); + + int16x4_t sum = vmul_lane_s16(s0, filter_lo, 0); + sum = vmla_lane_s16(sum, s1, filter_lo, 1); + sum = vmla_lane_s16(sum, s2, filter_lo, 2); + sum = vmla_lane_s16(sum, s5, filter_hi, 1); + sum = vmla_lane_s16(sum, s6, filter_hi, 2); + sum = vmla_lane_s16(sum, s7, filter_hi, 3); + sum = vqadd_s16(sum, vmul_lane_s16(s3, filter_lo, 3)); + sum = vqadd_s16(sum, vmul_lane_s16(s4, filter_hi, 0)); + return sum; +} + +static INLINE uint8x8_t convolve8_8(const int16x8_t s0, const int16x8_t s1, + const int16x8_t s2, const int16x8_t s3, + const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x8_t s7, + const int16x8_t filter) { + const int16x4_t filter_lo = vget_low_s16(filter); + const int16x4_t filter_hi = vget_high_s16(filter); + + int16x8_t sum = vmulq_lane_s16(s0, filter_lo, 0); + sum = vmlaq_lane_s16(sum, s1, filter_lo, 1); + sum = vmlaq_lane_s16(sum, s2, filter_lo, 2); + sum = vmlaq_lane_s16(sum, s5, filter_hi, 1); + sum = vmlaq_lane_s16(sum, s6, filter_hi, 2); + sum = vmlaq_lane_s16(sum, s7, filter_hi, 3); + sum = vqaddq_s16(sum, vmulq_lane_s16(s3, filter_lo, 3)); + sum = vqaddq_s16(sum, vmulq_lane_s16(s4, filter_hi, 0)); + return vqrshrun_n_s16(sum, 7); +} + +static INLINE uint8x8_t scale_filter_8(const uint8x8_t *const s, + const int16x8_t filter) { + int16x8_t ss0 = vreinterpretq_s16_u16(vmovl_u8(s[0])); + int16x8_t ss1 = vreinterpretq_s16_u16(vmovl_u8(s[1])); + int16x8_t ss2 = vreinterpretq_s16_u16(vmovl_u8(s[2])); + int16x8_t ss3 = vreinterpretq_s16_u16(vmovl_u8(s[3])); + int16x8_t ss4 = vreinterpretq_s16_u16(vmovl_u8(s[4])); + int16x8_t ss5 = vreinterpretq_s16_u16(vmovl_u8(s[5])); + int16x8_t ss6 = vreinterpretq_s16_u16(vmovl_u8(s[6])); + int16x8_t ss7 = vreinterpretq_s16_u16(vmovl_u8(s[7])); + + return convolve8_8(ss0, ss1, ss2, ss3, ss4, ss5, ss6, ss7, filter); +} + +static INLINE void scale_plane_2_to_1_phase_0(const uint8_t *src, + const int src_stride, + uint8_t *dst, + const int dst_stride, const int w, + const int h) { + const int max_width = (w + 15) & ~15; + int y = h; + + assert(w && h); + + do { + int x = max_width; + do { + const uint8x16x2_t s = vld2q_u8(src); + vst1q_u8(dst, s.val[0]); + src += 32; + dst += 16; + x -= 16; + } while (x); + src += 2 * (src_stride - max_width); + dst += dst_stride - max_width; + } while (--y); +} + +static INLINE void scale_plane_4_to_1_phase_0(const uint8_t *src, + const int src_stride, + uint8_t *dst, + const int dst_stride, const int w, + const int h) { + const int max_width = (w + 15) & ~15; + int y = h; + + assert(w && h); + + do { + int x = max_width; + do { + const uint8x16x4_t s = vld4q_u8(src); + vst1q_u8(dst, s.val[0]); + src += 64; + dst += 16; + x -= 16; + } while (x); + src += 4 * (src_stride - max_width); + dst += dst_stride - max_width; + } while (--y); +} + +static INLINE void scale_plane_bilinear_kernel( + const uint8x16_t in0, const uint8x16_t in1, const uint8x16_t in2, + const uint8x16_t in3, const uint8x8_t coef0, const uint8x8_t coef1, + uint8_t *const dst) { + const uint16x8_t h0 = vmull_u8(vget_low_u8(in0), coef0); + const uint16x8_t h1 = vmull_u8(vget_high_u8(in0), coef0); + const uint16x8_t h2 = vmull_u8(vget_low_u8(in2), coef0); + const uint16x8_t h3 = vmull_u8(vget_high_u8(in2), coef0); + const uint16x8_t h4 = vmlal_u8(h0, vget_low_u8(in1), coef1); + const uint16x8_t h5 = vmlal_u8(h1, vget_high_u8(in1), coef1); + const uint16x8_t h6 = vmlal_u8(h2, vget_low_u8(in3), coef1); + const uint16x8_t h7 = vmlal_u8(h3, vget_high_u8(in3), coef1); + + const uint8x8_t hor0 = vrshrn_n_u16(h4, 7); // temp: 00 01 02 03 04 05 06 07 + const uint8x8_t hor1 = vrshrn_n_u16(h5, 7); // temp: 08 09 0A 0B 0C 0D 0E 0F + const uint8x8_t hor2 = vrshrn_n_u16(h6, 7); // temp: 10 11 12 13 14 15 16 17 + const uint8x8_t hor3 = vrshrn_n_u16(h7, 7); // temp: 18 19 1A 1B 1C 1D 1E 1F + const uint16x8_t v0 = vmull_u8(hor0, coef0); + const uint16x8_t v1 = vmull_u8(hor1, coef0); + const uint16x8_t v2 = vmlal_u8(v0, hor2, coef1); + const uint16x8_t v3 = vmlal_u8(v1, hor3, coef1); + // dst: 0 1 2 3 4 5 6 7 8 9 A B C D E F + const uint8x16_t d = vcombine_u8(vrshrn_n_u16(v2, 7), vrshrn_n_u16(v3, 7)); + vst1q_u8(dst, d); +} + +static INLINE void scale_plane_2_to_1_bilinear( + const uint8_t *const src, const int src_stride, uint8_t *dst, + const int dst_stride, const int w, const int h, const int16_t c0, + const int16_t c1) { + const int max_width = (w + 15) & ~15; + const uint8_t *src0 = src; + const uint8_t *src1 = src + src_stride; + const uint8x8_t coef0 = vdup_n_u8(c0); + const uint8x8_t coef1 = vdup_n_u8(c1); + int y = h; + + assert(w && h); + + do { + int x = max_width; + do { + // 000 002 004 006 008 00A 00C 00E 010 012 014 016 018 01A 01C 01E + // 001 003 005 007 009 00B 00D 00F 011 013 015 017 019 01B 01D 01F + const uint8x16x2_t s0 = vld2q_u8(src0); + // 100 102 104 106 108 10A 10C 10E 110 112 114 116 118 11A 11C 11E + // 101 103 105 107 109 10B 10D 10F 111 113 115 117 119 11B 11D 11F + const uint8x16x2_t s1 = vld2q_u8(src1); + scale_plane_bilinear_kernel(s0.val[0], s0.val[1], s1.val[0], s1.val[1], + coef0, coef1, dst); + src0 += 32; + src1 += 32; + dst += 16; + x -= 16; + } while (x); + src0 += 2 * (src_stride - max_width); + src1 += 2 * (src_stride - max_width); + dst += dst_stride - max_width; + } while (--y); +} + +static INLINE void scale_plane_4_to_1_bilinear( + const uint8_t *const src, const int src_stride, uint8_t *dst, + const int dst_stride, const int w, const int h, const int16_t c0, + const int16_t c1) { + const int max_width = (w + 15) & ~15; + const uint8_t *src0 = src; + const uint8_t *src1 = src + src_stride; + const uint8x8_t coef0 = vdup_n_u8(c0); + const uint8x8_t coef1 = vdup_n_u8(c1); + int y = h; + + assert(w && h); + + do { + int x = max_width; + do { + // (*) -- useless + // 000 004 008 00C 010 014 018 01C 020 024 028 02C 030 034 038 03C + // 001 005 009 00D 011 015 019 01D 021 025 029 02D 031 035 039 03D + // 002 006 00A 00E 012 016 01A 01E 022 026 02A 02E 032 036 03A 03E (*) + // 003 007 00B 00F 013 017 01B 01F 023 027 02B 02F 033 037 03B 03F (*) + const uint8x16x4_t s0 = vld4q_u8(src0); + // 100 104 108 10C 110 114 118 11C 120 124 128 12C 130 134 138 13C + // 101 105 109 10D 111 115 119 11D 121 125 129 12D 131 135 139 13D + // 102 106 10A 10E 112 116 11A 11E 122 126 12A 12E 132 136 13A 13E (*) + // 103 107 10B 10F 113 117 11B 11F 123 127 12B 12F 133 137 13B 13F (*) + const uint8x16x4_t s1 = vld4q_u8(src1); + scale_plane_bilinear_kernel(s0.val[0], s0.val[1], s1.val[0], s1.val[1], + coef0, coef1, dst); + src0 += 64; + src1 += 64; + dst += 16; + x -= 16; + } while (x); + src0 += 4 * (src_stride - max_width); + src1 += 4 * (src_stride - max_width); + dst += dst_stride - max_width; + } while (--y); +} + +static void scale_plane_2_to_1_general(const uint8_t *src, const int src_stride, + uint8_t *dst, const int dst_stride, + const int w, const int h, + const int16_t *const coef, + uint8_t *const temp_buffer) { + const int width_hor = (w + 3) & ~3; + const int width_ver = (w + 7) & ~7; + const int height_hor = (2 * h + SUBPEL_TAPS - 2 + 7) & ~7; + const int height_ver = (h + 3) & ~3; + const int16x8_t filters = vld1q_s16(coef); + int x, y = height_hor; + uint8_t *t = temp_buffer; + uint8x8_t s[14], d[4]; + + assert(w && h); + + src -= (SUBPEL_TAPS / 2 - 1) * src_stride + SUBPEL_TAPS / 2 + 1; + + // horizontal 4x8 + // Note: processing 4x8 is about 20% faster than processing row by row using + // vld4_u8(). + do { + load_u8_8x8(src + 2, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], + &s[6], &s[7]); + transpose_elems_inplace_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], + &s[6], &s[7]); + x = width_hor; + + do { + src += 8; + load_u8_8x8(src, src_stride, &s[6], &s[7], &s[8], &s[9], &s[10], &s[11], + &s[12], &s[13]); + transpose_elems_inplace_u8_8x8(&s[6], &s[7], &s[8], &s[9], &s[10], &s[11], + &s[12], &s[13]); + + d[0] = scale_filter_8(&s[0], filters); // 00 10 20 30 40 50 60 70 + d[1] = scale_filter_8(&s[2], filters); // 01 11 21 31 41 51 61 71 + d[2] = scale_filter_8(&s[4], filters); // 02 12 22 32 42 52 62 72 + d[3] = scale_filter_8(&s[6], filters); // 03 13 23 33 43 53 63 73 + // 00 01 02 03 40 41 42 43 + // 10 11 12 13 50 51 52 53 + // 20 21 22 23 60 61 62 63 + // 30 31 32 33 70 71 72 73 + transpose_elems_inplace_u8_8x4(&d[0], &d[1], &d[2], &d[3]); + vst1_lane_u32((uint32_t *)(t + 0 * width_hor), vreinterpret_u32_u8(d[0]), + 0); + vst1_lane_u32((uint32_t *)(t + 1 * width_hor), vreinterpret_u32_u8(d[1]), + 0); + vst1_lane_u32((uint32_t *)(t + 2 * width_hor), vreinterpret_u32_u8(d[2]), + 0); + vst1_lane_u32((uint32_t *)(t + 3 * width_hor), vreinterpret_u32_u8(d[3]), + 0); + vst1_lane_u32((uint32_t *)(t + 4 * width_hor), vreinterpret_u32_u8(d[0]), + 1); + vst1_lane_u32((uint32_t *)(t + 5 * width_hor), vreinterpret_u32_u8(d[1]), + 1); + vst1_lane_u32((uint32_t *)(t + 6 * width_hor), vreinterpret_u32_u8(d[2]), + 1); + vst1_lane_u32((uint32_t *)(t + 7 * width_hor), vreinterpret_u32_u8(d[3]), + 1); + + s[0] = s[8]; + s[1] = s[9]; + s[2] = s[10]; + s[3] = s[11]; + s[4] = s[12]; + s[5] = s[13]; + + t += 4; + x -= 4; + } while (x); + src += 8 * src_stride - 2 * width_hor; + t += 7 * width_hor; + y -= 8; + } while (y); + + // vertical 8x4 + x = width_ver; + t = temp_buffer; + do { + load_u8_8x8(t, width_hor, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], + &s[7]); + t += 6 * width_hor; + y = height_ver; + + do { + load_u8_8x8(t, width_hor, &s[6], &s[7], &s[8], &s[9], &s[10], &s[11], + &s[12], &s[13]); + t += 8 * width_hor; + + d[0] = scale_filter_8(&s[0], filters); // 00 01 02 03 04 05 06 07 + d[1] = scale_filter_8(&s[2], filters); // 10 11 12 13 14 15 16 17 + d[2] = scale_filter_8(&s[4], filters); // 20 21 22 23 24 25 26 27 + d[3] = scale_filter_8(&s[6], filters); // 30 31 32 33 34 35 36 37 + vst1_u8(dst + 0 * dst_stride, d[0]); + vst1_u8(dst + 1 * dst_stride, d[1]); + vst1_u8(dst + 2 * dst_stride, d[2]); + vst1_u8(dst + 3 * dst_stride, d[3]); + + s[0] = s[8]; + s[1] = s[9]; + s[2] = s[10]; + s[3] = s[11]; + s[4] = s[12]; + s[5] = s[13]; + + dst += 4 * dst_stride; + y -= 4; + } while (y); + t -= width_hor * (2 * height_ver + 6); + t += 8; + dst -= height_ver * dst_stride; + dst += 8; + x -= 8; + } while (x); +} + +static void scale_plane_4_to_1_general(const uint8_t *src, const int src_stride, + uint8_t *dst, const int dst_stride, + const int w, const int h, + const int16_t *const coef, + uint8_t *const temp_buffer) { + const int width_hor = (w + 1) & ~1; + const int width_ver = (w + 7) & ~7; + const int height_hor = (4 * h + SUBPEL_TAPS - 2 + 7) & ~7; + const int height_ver = (h + 1) & ~1; + const int16x8_t filters = vld1q_s16(coef); + int x, y = height_hor; + uint8_t *t = temp_buffer; + uint8x8_t s[12], d[2]; + + assert(w && h); + + src -= (SUBPEL_TAPS / 2 - 1) * src_stride + SUBPEL_TAPS / 2 + 3; + + // horizontal 2x8 + // Note: processing 2x8 is about 20% faster than processing row by row using + // vld4_u8(). + do { + load_u8_8x8(src + 4, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], + &s[6], &s[7]); + transpose_elems_u8_4x8(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], + &s[0], &s[1], &s[2], &s[3]); + x = width_hor; + + do { + uint8x8x2_t dd; + src += 8; + load_u8_8x8(src, src_stride, &s[4], &s[5], &s[6], &s[7], &s[8], &s[9], + &s[10], &s[11]); + transpose_elems_inplace_u8_8x8(&s[4], &s[5], &s[6], &s[7], &s[8], &s[9], + &s[10], &s[11]); + + d[0] = scale_filter_8(&s[0], filters); // 00 10 20 30 40 50 60 70 + d[1] = scale_filter_8(&s[4], filters); // 01 11 21 31 41 51 61 71 + // dd.val[0]: 00 01 20 21 40 41 60 61 + // dd.val[1]: 10 11 30 31 50 51 70 71 + dd = vtrn_u8(d[0], d[1]); + vst1_lane_u16((uint16_t *)(t + 0 * width_hor), + vreinterpret_u16_u8(dd.val[0]), 0); + vst1_lane_u16((uint16_t *)(t + 1 * width_hor), + vreinterpret_u16_u8(dd.val[1]), 0); + vst1_lane_u16((uint16_t *)(t + 2 * width_hor), + vreinterpret_u16_u8(dd.val[0]), 1); + vst1_lane_u16((uint16_t *)(t + 3 * width_hor), + vreinterpret_u16_u8(dd.val[1]), 1); + vst1_lane_u16((uint16_t *)(t + 4 * width_hor), + vreinterpret_u16_u8(dd.val[0]), 2); + vst1_lane_u16((uint16_t *)(t + 5 * width_hor), + vreinterpret_u16_u8(dd.val[1]), 2); + vst1_lane_u16((uint16_t *)(t + 6 * width_hor), + vreinterpret_u16_u8(dd.val[0]), 3); + vst1_lane_u16((uint16_t *)(t + 7 * width_hor), + vreinterpret_u16_u8(dd.val[1]), 3); + + s[0] = s[8]; + s[1] = s[9]; + s[2] = s[10]; + s[3] = s[11]; + + t += 2; + x -= 2; + } while (x); + src += 8 * src_stride - 4 * width_hor; + t += 7 * width_hor; + y -= 8; + } while (y); + + // vertical 8x2 + x = width_ver; + t = temp_buffer; + do { + load_u8_8x4(t, width_hor, &s[0], &s[1], &s[2], &s[3]); + t += 4 * width_hor; + y = height_ver; + + do { + load_u8_8x8(t, width_hor, &s[4], &s[5], &s[6], &s[7], &s[8], &s[9], + &s[10], &s[11]); + t += 8 * width_hor; + + d[0] = scale_filter_8(&s[0], filters); // 00 01 02 03 04 05 06 07 + d[1] = scale_filter_8(&s[4], filters); // 10 11 12 13 14 15 16 17 + vst1_u8(dst + 0 * dst_stride, d[0]); + vst1_u8(dst + 1 * dst_stride, d[1]); + + s[0] = s[8]; + s[1] = s[9]; + s[2] = s[10]; + s[3] = s[11]; + + dst += 2 * dst_stride; + y -= 2; + } while (y); + t -= width_hor * (4 * height_ver + 4); + t += 8; + dst -= height_ver * dst_stride; + dst += 8; + x -= 8; + } while (x); +} + +static INLINE uint8x8_t scale_filter_bilinear(const uint8x8_t *const s, + const uint8x8_t *const coef) { + const uint16x8_t h0 = vmull_u8(s[0], coef[0]); + const uint16x8_t h1 = vmlal_u8(h0, s[1], coef[1]); + + return vrshrn_n_u16(h1, 7); +} + +// Notes for 4 to 3 scaling: +// +// 1. 6 rows are calculated in each horizontal inner loop, so width_hor must be +// multiple of 6, and no less than w. +// +// 2. 8 rows are calculated in each vertical inner loop, so width_ver must be +// multiple of 8, and no less than w. +// +// 3. 8 columns are calculated in each horizontal inner loop for further +// vertical scaling, so height_hor must be multiple of 8, and no less than +// 4 * h / 3. +// +// 4. 6 columns are calculated in each vertical inner loop, so height_ver must +// be multiple of 6, and no less than h. +// +// 5. The physical location of the last row of the 4 to 3 scaled frame is +// decided by phase_scaler, and are always less than 1 pixel below the last row +// of the original image. +static void scale_plane_4_to_3_bilinear(const uint8_t *src, + const int src_stride, uint8_t *dst, + const int dst_stride, const int w, + const int h, const int phase_scaler, + uint8_t *const temp_buffer) { + static const int step_q4 = 16 * 4 / 3; + const int width_hor = (w + 5) - ((w + 5) % 6); + const int stride_hor = width_hor + 2; // store 2 extra pixels + const int width_ver = (w + 7) & ~7; + // We only need 1 extra row below because there are only 2 bilinear + // coefficients. + const int height_hor = (4 * h / 3 + 1 + 7) & ~7; + const int height_ver = (h + 5) - ((h + 5) % 6); + int x, y = height_hor; + uint8_t *t = temp_buffer; + uint8x8_t s[9], d[8], c[6]; + const InterpKernel *interp_kernel = + (const InterpKernel *)av1_interp_filter_params_list[BILINEAR].filter_ptr; + assert(w && h); + + c[0] = vdup_n_u8((uint8_t)interp_kernel[phase_scaler][3]); + c[1] = vdup_n_u8((uint8_t)interp_kernel[phase_scaler][4]); + c[2] = vdup_n_u8( + (uint8_t)interp_kernel[(phase_scaler + 1 * step_q4) & SUBPEL_MASK][3]); + c[3] = vdup_n_u8( + (uint8_t)interp_kernel[(phase_scaler + 1 * step_q4) & SUBPEL_MASK][4]); + c[4] = vdup_n_u8( + (uint8_t)interp_kernel[(phase_scaler + 2 * step_q4) & SUBPEL_MASK][3]); + c[5] = vdup_n_u8( + (uint8_t)interp_kernel[(phase_scaler + 2 * step_q4) & SUBPEL_MASK][4]); + + d[6] = vdup_n_u8(0); + d[7] = vdup_n_u8(0); + + // horizontal 6x8 + do { + load_u8_8x8(src, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], + &s[6], &s[7]); + src += 1; + transpose_elems_inplace_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], + &s[6], &s[7]); + x = width_hor; + + do { + load_u8_8x8(src, src_stride, &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], + &s[7], &s[8]); + src += 8; + transpose_elems_inplace_u8_8x8(&s[1], &s[2], &s[3], &s[4], &s[5], &s[6], + &s[7], &s[8]); + + // 00 10 20 30 40 50 60 70 + // 01 11 21 31 41 51 61 71 + // 02 12 22 32 42 52 62 72 + // 03 13 23 33 43 53 63 73 + // 04 14 24 34 44 54 64 74 + // 05 15 25 35 45 55 65 75 + d[0] = scale_filter_bilinear(&s[0], &c[0]); + d[1] = + scale_filter_bilinear(&s[(phase_scaler + 1 * step_q4) >> 4], &c[2]); + d[2] = + scale_filter_bilinear(&s[(phase_scaler + 2 * step_q4) >> 4], &c[4]); + d[3] = scale_filter_bilinear(&s[4], &c[0]); + d[4] = scale_filter_bilinear(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], + &c[2]); + d[5] = scale_filter_bilinear(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], + &c[4]); + + // 00 01 02 03 04 05 xx xx + // 10 11 12 13 14 15 xx xx + // 20 21 22 23 24 25 xx xx + // 30 31 32 33 34 35 xx xx + // 40 41 42 43 44 45 xx xx + // 50 51 52 53 54 55 xx xx + // 60 61 62 63 64 65 xx xx + // 70 71 72 73 74 75 xx xx + transpose_elems_inplace_u8_8x8(&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], + &d[6], &d[7]); + // store 2 extra pixels + vst1_u8(t + 0 * stride_hor, d[0]); + vst1_u8(t + 1 * stride_hor, d[1]); + vst1_u8(t + 2 * stride_hor, d[2]); + vst1_u8(t + 3 * stride_hor, d[3]); + vst1_u8(t + 4 * stride_hor, d[4]); + vst1_u8(t + 5 * stride_hor, d[5]); + vst1_u8(t + 6 * stride_hor, d[6]); + vst1_u8(t + 7 * stride_hor, d[7]); + + s[0] = s[8]; + + t += 6; + x -= 6; + } while (x); + src += 8 * src_stride - 4 * width_hor / 3 - 1; + t += 7 * stride_hor + 2; + y -= 8; + } while (y); + + // vertical 8x6 + x = width_ver; + t = temp_buffer; + do { + load_u8_8x8(t, stride_hor, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], + &s[7]); + t += stride_hor; + y = height_ver; + + do { + load_u8_8x8(t, stride_hor, &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], + &s[7], &s[8]); + t += 8 * stride_hor; + + d[0] = scale_filter_bilinear(&s[0], &c[0]); + d[1] = + scale_filter_bilinear(&s[(phase_scaler + 1 * step_q4) >> 4], &c[2]); + d[2] = + scale_filter_bilinear(&s[(phase_scaler + 2 * step_q4) >> 4], &c[4]); + d[3] = scale_filter_bilinear(&s[4], &c[0]); + d[4] = scale_filter_bilinear(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], + &c[2]); + d[5] = scale_filter_bilinear(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], + &c[4]); + vst1_u8(dst + 0 * dst_stride, d[0]); + vst1_u8(dst + 1 * dst_stride, d[1]); + vst1_u8(dst + 2 * dst_stride, d[2]); + vst1_u8(dst + 3 * dst_stride, d[3]); + vst1_u8(dst + 4 * dst_stride, d[4]); + vst1_u8(dst + 5 * dst_stride, d[5]); + + s[0] = s[8]; + + dst += 6 * dst_stride; + y -= 6; + } while (y); + t -= stride_hor * (4 * height_ver / 3 + 1); + t += 8; + dst -= height_ver * dst_stride; + dst += 8; + x -= 8; + } while (x); +} + +static void scale_plane_4_to_3_general(const uint8_t *src, const int src_stride, + uint8_t *dst, const int dst_stride, + const int w, const int h, + const InterpKernel *const coef, + const int phase_scaler, + uint8_t *const temp_buffer) { + static const int step_q4 = 16 * 4 / 3; + const int width_hor = (w + 5) - ((w + 5) % 6); + const int stride_hor = width_hor + 2; // store 2 extra pixels + const int width_ver = (w + 7) & ~7; + // We need (SUBPEL_TAPS - 1) extra rows: (SUBPEL_TAPS / 2 - 1) extra rows + // above and (SUBPEL_TAPS / 2) extra rows below. + const int height_hor = (4 * h / 3 + SUBPEL_TAPS - 1 + 7) & ~7; + const int height_ver = (h + 5) - ((h + 5) % 6); + const int16x8_t filters0 = vld1q_s16( + (const int16_t *)&coef[(phase_scaler + 0 * step_q4) & SUBPEL_MASK]); + const int16x8_t filters1 = vld1q_s16( + (const int16_t *)&coef[(phase_scaler + 1 * step_q4) & SUBPEL_MASK]); + const int16x8_t filters2 = vld1q_s16( + (const int16_t *)&coef[(phase_scaler + 2 * step_q4) & SUBPEL_MASK]); + int x, y = height_hor; + uint8_t *t = temp_buffer; + uint8x8_t s[15], d[8]; + + assert(w && h); + + src -= (SUBPEL_TAPS / 2 - 1) * src_stride + SUBPEL_TAPS / 2; + d[6] = vdup_n_u8(0); + d[7] = vdup_n_u8(0); + + // horizontal 6x8 + do { + load_u8_8x8(src + 1, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], + &s[6], &s[7]); + transpose_elems_inplace_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], + &s[6], &s[7]); + x = width_hor; + + do { + src += 8; + load_u8_8x8(src, src_stride, &s[7], &s[8], &s[9], &s[10], &s[11], &s[12], + &s[13], &s[14]); + transpose_elems_inplace_u8_8x8(&s[7], &s[8], &s[9], &s[10], &s[11], + &s[12], &s[13], &s[14]); + + // 00 10 20 30 40 50 60 70 + // 01 11 21 31 41 51 61 71 + // 02 12 22 32 42 52 62 72 + // 03 13 23 33 43 53 63 73 + // 04 14 24 34 44 54 64 74 + // 05 15 25 35 45 55 65 75 + d[0] = scale_filter_8(&s[0], filters0); + d[1] = scale_filter_8(&s[(phase_scaler + 1 * step_q4) >> 4], filters1); + d[2] = scale_filter_8(&s[(phase_scaler + 2 * step_q4) >> 4], filters2); + d[3] = scale_filter_8(&s[4], filters0); + d[4] = + scale_filter_8(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], filters1); + d[5] = + scale_filter_8(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], filters2); + + // 00 01 02 03 04 05 xx xx + // 10 11 12 13 14 15 xx xx + // 20 21 22 23 24 25 xx xx + // 30 31 32 33 34 35 xx xx + // 40 41 42 43 44 45 xx xx + // 50 51 52 53 54 55 xx xx + // 60 61 62 63 64 65 xx xx + // 70 71 72 73 74 75 xx xx + transpose_elems_inplace_u8_8x8(&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], + &d[6], &d[7]); + // store 2 extra pixels + vst1_u8(t + 0 * stride_hor, d[0]); + vst1_u8(t + 1 * stride_hor, d[1]); + vst1_u8(t + 2 * stride_hor, d[2]); + vst1_u8(t + 3 * stride_hor, d[3]); + vst1_u8(t + 4 * stride_hor, d[4]); + vst1_u8(t + 5 * stride_hor, d[5]); + vst1_u8(t + 6 * stride_hor, d[6]); + vst1_u8(t + 7 * stride_hor, d[7]); + + s[0] = s[8]; + s[1] = s[9]; + s[2] = s[10]; + s[3] = s[11]; + s[4] = s[12]; + s[5] = s[13]; + s[6] = s[14]; + + t += 6; + x -= 6; + } while (x); + src += 8 * src_stride - 4 * width_hor / 3; + t += 7 * stride_hor + 2; + y -= 8; + } while (y); + + // vertical 8x6 + x = width_ver; + t = temp_buffer; + do { + load_u8_8x8(t, stride_hor, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], + &s[7]); + t += 7 * stride_hor; + y = height_ver; + + do { + load_u8_8x8(t, stride_hor, &s[7], &s[8], &s[9], &s[10], &s[11], &s[12], + &s[13], &s[14]); + t += 8 * stride_hor; + + d[0] = scale_filter_8(&s[0], filters0); + d[1] = scale_filter_8(&s[(phase_scaler + 1 * step_q4) >> 4], filters1); + d[2] = scale_filter_8(&s[(phase_scaler + 2 * step_q4) >> 4], filters2); + d[3] = scale_filter_8(&s[4], filters0); + d[4] = + scale_filter_8(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], filters1); + d[5] = + scale_filter_8(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], filters2); + vst1_u8(dst + 0 * dst_stride, d[0]); + vst1_u8(dst + 1 * dst_stride, d[1]); + vst1_u8(dst + 2 * dst_stride, d[2]); + vst1_u8(dst + 3 * dst_stride, d[3]); + vst1_u8(dst + 4 * dst_stride, d[4]); + vst1_u8(dst + 5 * dst_stride, d[5]); + + s[0] = s[8]; + s[1] = s[9]; + s[2] = s[10]; + s[3] = s[11]; + s[4] = s[12]; + s[5] = s[13]; + s[6] = s[14]; + + dst += 6 * dst_stride; + y -= 6; + } while (y); + t -= stride_hor * (4 * height_ver / 3 + 7); + t += 8; + dst -= height_ver * dst_stride; + dst += 8; + x -= 8; + } while (x); +} + +// There's SIMD optimizations for 1/4, 1/2 and 3/4 downscaling in NEON. +static INLINE bool has_normative_scaler_neon(const int src_width, + const int src_height, + const int dst_width, + const int dst_height) { + const bool has_normative_scaler = + (2 * dst_width == src_width && 2 * dst_height == src_height) || + (4 * dst_width == src_width && 4 * dst_height == src_height) || + (4 * dst_width == 3 * src_width && 4 * dst_height == 3 * src_height); + + return has_normative_scaler; +} + +void av1_resize_and_extend_frame_neon(const YV12_BUFFER_CONFIG *src, + YV12_BUFFER_CONFIG *dst, + const InterpFilter filter, + const int phase, const int num_planes) { + bool has_normative_scaler = + has_normative_scaler_neon(src->y_crop_width, src->y_crop_height, + dst->y_crop_width, dst->y_crop_height); + + if (num_planes > 1) { + has_normative_scaler = + has_normative_scaler && + has_normative_scaler_neon(src->uv_crop_width, src->uv_crop_height, + dst->uv_crop_width, dst->uv_crop_height); + } + + if (!has_normative_scaler) { + av1_resize_and_extend_frame_c(src, dst, filter, phase, num_planes); + return; + } + + // We use AOMMIN(num_planes, MAX_MB_PLANE) instead of num_planes to quiet + // the static analysis warnings. + int malloc_failed = 0; + for (int i = 0; i < AOMMIN(num_planes, MAX_MB_PLANE); ++i) { + const int is_uv = i > 0; + const int src_w = src->crop_widths[is_uv]; + const int src_h = src->crop_heights[is_uv]; + const int dst_w = dst->crop_widths[is_uv]; + const int dst_h = dst->crop_heights[is_uv]; + const int dst_y_w = (dst->crop_widths[0] + 1) & ~1; + const int dst_y_h = (dst->crop_heights[0] + 1) & ~1; + + if (2 * dst_w == src_w && 2 * dst_h == src_h) { + if (phase == 0) { + scale_plane_2_to_1_phase_0(src->buffers[i], src->strides[is_uv], + dst->buffers[i], dst->strides[is_uv], dst_w, + dst_h); + } else if (filter == BILINEAR) { + const int16_t c0 = av1_bilinear_filters[phase][3]; + const int16_t c1 = av1_bilinear_filters[phase][4]; + scale_plane_2_to_1_bilinear(src->buffers[i], src->strides[is_uv], + dst->buffers[i], dst->strides[is_uv], dst_w, + dst_h, c0, c1); + } else { + const int buffer_stride = (dst_y_w + 3) & ~3; + const int buffer_height = (2 * dst_y_h + SUBPEL_TAPS - 2 + 7) & ~7; + uint8_t *const temp_buffer = + (uint8_t *)malloc(buffer_stride * buffer_height); + if (!temp_buffer) { + malloc_failed = 1; + break; + } + const InterpKernel *interp_kernel = + (const InterpKernel *)av1_interp_filter_params_list[filter] + .filter_ptr; + scale_plane_2_to_1_general(src->buffers[i], src->strides[is_uv], + dst->buffers[i], dst->strides[is_uv], dst_w, + dst_h, interp_kernel[phase], temp_buffer); + free(temp_buffer); + } + } else if (4 * dst_w == src_w && 4 * dst_h == src_h) { + if (phase == 0) { + scale_plane_4_to_1_phase_0(src->buffers[i], src->strides[is_uv], + dst->buffers[i], dst->strides[is_uv], dst_w, + dst_h); + } else if (filter == BILINEAR) { + const int16_t c0 = av1_bilinear_filters[phase][3]; + const int16_t c1 = av1_bilinear_filters[phase][4]; + scale_plane_4_to_1_bilinear(src->buffers[i], src->strides[is_uv], + dst->buffers[i], dst->strides[is_uv], dst_w, + dst_h, c0, c1); + } else { + const int buffer_stride = (dst_y_w + 1) & ~1; + const int buffer_height = (4 * dst_y_h + SUBPEL_TAPS - 2 + 7) & ~7; + uint8_t *const temp_buffer = + (uint8_t *)malloc(buffer_stride * buffer_height); + if (!temp_buffer) { + malloc_failed = 1; + break; + } + const InterpKernel *interp_kernel = + (const InterpKernel *)av1_interp_filter_params_list[filter] + .filter_ptr; + scale_plane_4_to_1_general(src->buffers[i], src->strides[is_uv], + dst->buffers[i], dst->strides[is_uv], dst_w, + dst_h, interp_kernel[phase], temp_buffer); + free(temp_buffer); + } + } else { + assert(4 * dst_w == 3 * src_w && 4 * dst_h == 3 * src_h); + // 4 to 3 + const int buffer_stride = (dst_y_w + 5) - ((dst_y_w + 5) % 6) + 2; + const int buffer_height = (4 * dst_y_h / 3 + SUBPEL_TAPS - 1 + 7) & ~7; + uint8_t *const temp_buffer = + (uint8_t *)malloc(buffer_stride * buffer_height); + if (!temp_buffer) { + malloc_failed = 1; + break; + } + if (filter == BILINEAR) { + scale_plane_4_to_3_bilinear(src->buffers[i], src->strides[is_uv], + dst->buffers[i], dst->strides[is_uv], dst_w, + dst_h, phase, temp_buffer); + } else { + const InterpKernel *interp_kernel = + (const InterpKernel *)av1_interp_filter_params_list[filter] + .filter_ptr; + scale_plane_4_to_3_general(src->buffers[i], src->strides[is_uv], + dst->buffers[i], dst->strides[is_uv], dst_w, + dst_h, interp_kernel, phase, temp_buffer); + } + free(temp_buffer); + } + } + + if (malloc_failed) { + av1_resize_and_extend_frame_c(src, dst, filter, phase, num_planes); + } else { + aom_extend_frame_borders(dst, num_planes); + } +} + +static INLINE void scaledconvolve_horiz_w4( + const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, + const ptrdiff_t dst_stride, const InterpKernel *const x_filters, + const int x0_q4, const int x_step_q4, const int w, const int h) { + DECLARE_ALIGNED(16, uint8_t, temp[4 * 4]); + int x, y, z; + + src -= SUBPEL_TAPS / 2 - 1; + + y = h; + do { + int x_q4 = x0_q4; + x = 0; + do { + // process 4 src_x steps + for (z = 0; z < 4; ++z) { + const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; + if (x_q4 & SUBPEL_MASK) { + const int16x8_t filters = vld1q_s16(x_filters[x_q4 & SUBPEL_MASK]); + uint8x8_t s[8], d; + int16x8_t ss[4]; + int16x4_t t[8], tt; + + load_u8_8x4(src_x, src_stride, &s[0], &s[1], &s[2], &s[3]); + transpose_elems_inplace_u8_8x4(&s[0], &s[1], &s[2], &s[3]); + + ss[0] = vreinterpretq_s16_u16(vmovl_u8(s[0])); + ss[1] = vreinterpretq_s16_u16(vmovl_u8(s[1])); + ss[2] = vreinterpretq_s16_u16(vmovl_u8(s[2])); + ss[3] = vreinterpretq_s16_u16(vmovl_u8(s[3])); + t[0] = vget_low_s16(ss[0]); + t[1] = vget_low_s16(ss[1]); + t[2] = vget_low_s16(ss[2]); + t[3] = vget_low_s16(ss[3]); + t[4] = vget_high_s16(ss[0]); + t[5] = vget_high_s16(ss[1]); + t[6] = vget_high_s16(ss[2]); + t[7] = vget_high_s16(ss[3]); + + tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], + filters); + d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7); + store_u8_4x1(&temp[4 * z], d); + } else { + int i; + for (i = 0; i < 4; ++i) { + temp[z * 4 + i] = src_x[i * src_stride + 3]; + } + } + x_q4 += x_step_q4; + } + + // transpose the 4x4 filters values back to dst + { + const uint8x8x4_t d4 = vld4_u8(temp); + store_u8_4x1(&dst[x + 0 * dst_stride], d4.val[0]); + store_u8_4x1(&dst[x + 1 * dst_stride], d4.val[1]); + store_u8_4x1(&dst[x + 2 * dst_stride], d4.val[2]); + store_u8_4x1(&dst[x + 3 * dst_stride], d4.val[3]); + } + x += 4; + } while (x < w); + + src += src_stride * 4; + dst += dst_stride * 4; + y -= 4; + } while (y > 0); +} + +static INLINE void scaledconvolve_horiz_w8( + const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, + const ptrdiff_t dst_stride, const InterpKernel *const x_filters, + const int x0_q4, const int x_step_q4, const int w, const int h) { + DECLARE_ALIGNED(16, uint8_t, temp[8 * 8]); + int x, y, z; + src -= SUBPEL_TAPS / 2 - 1; + + // This function processes 8x8 areas. The intermediate height is not always + // a multiple of 8, so force it to be a multiple of 8 here. + y = (h + 7) & ~7; + + do { + int x_q4 = x0_q4; + x = 0; + do { + uint8x8_t d[8]; + // process 8 src_x steps + for (z = 0; z < 8; ++z) { + const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; + + if (x_q4 & SUBPEL_MASK) { + const int16x8_t filters = vld1q_s16(x_filters[x_q4 & SUBPEL_MASK]); + uint8x8_t s[8]; + load_u8_8x8(src_x, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], + &s[5], &s[6], &s[7]); + transpose_elems_inplace_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], + &s[5], &s[6], &s[7]); + d[0] = scale_filter_8(s, filters); + vst1_u8(&temp[8 * z], d[0]); + } else { + int i; + for (i = 0; i < 8; ++i) { + temp[z * 8 + i] = src_x[i * src_stride + 3]; + } + } + x_q4 += x_step_q4; + } + + // transpose the 8x8 filters values back to dst + load_u8_8x8(temp, 8, &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], + &d[7]); + transpose_elems_inplace_u8_8x8(&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], + &d[6], &d[7]); + store_u8_8x8(dst + x, dst_stride, d[0], d[1], d[2], d[3], d[4], d[5], + d[6], d[7]); + x += 8; + } while (x < w); + + src += src_stride * 8; + dst += dst_stride * 8; + } while (y -= 8); +} + +static INLINE void scaledconvolve_vert_w4( + const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, + const ptrdiff_t dst_stride, const InterpKernel *const y_filters, + const int y0_q4, const int y_step_q4, const int w, const int h) { + int y; + int y_q4 = y0_q4; + + src -= src_stride * (SUBPEL_TAPS / 2 - 1); + y = h; + do { + const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; + + if (y_q4 & SUBPEL_MASK) { + const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); + uint8x8_t s[8], d; + int16x4_t t[8], tt; + + load_u8_8x8(src_y, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], + &s[6], &s[7]); + t[0] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[0]))); + t[1] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[1]))); + t[2] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[2]))); + t[3] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[3]))); + t[4] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[4]))); + t[5] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[5]))); + t[6] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[6]))); + t[7] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[7]))); + + tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], filters); + d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7); + store_u8_4x1(dst, d); + } else { + memcpy(dst, &src_y[3 * src_stride], w); + } + + dst += dst_stride; + y_q4 += y_step_q4; + } while (--y); +} + +static INLINE void scaledconvolve_vert_w8( + const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, + const ptrdiff_t dst_stride, const InterpKernel *const y_filters, + const int y0_q4, const int y_step_q4, const int w, const int h) { + int y; + int y_q4 = y0_q4; + + src -= src_stride * (SUBPEL_TAPS / 2 - 1); + y = h; + do { + const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; + if (y_q4 & SUBPEL_MASK) { + const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); + uint8x8_t s[8], d; + load_u8_8x8(src_y, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], + &s[6], &s[7]); + d = scale_filter_8(s, filters); + vst1_u8(dst, d); + } else { + memcpy(dst, &src_y[3 * src_stride], w); + } + dst += dst_stride; + y_q4 += y_step_q4; + } while (--y); +} + +static INLINE void scaledconvolve_vert_w16( + const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, + const ptrdiff_t dst_stride, const InterpKernel *const y_filters, + const int y0_q4, const int y_step_q4, const int w, const int h) { + int x, y; + int y_q4 = y0_q4; + + src -= src_stride * (SUBPEL_TAPS / 2 - 1); + y = h; + do { + const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; + if (y_q4 & SUBPEL_MASK) { + x = 0; + do { + const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); + uint8x16_t ss[8]; + uint8x8_t s[8], d[2]; + load_u8_16x8(src_y, src_stride, &ss[0], &ss[1], &ss[2], &ss[3], &ss[4], + &ss[5], &ss[6], &ss[7]); + s[0] = vget_low_u8(ss[0]); + s[1] = vget_low_u8(ss[1]); + s[2] = vget_low_u8(ss[2]); + s[3] = vget_low_u8(ss[3]); + s[4] = vget_low_u8(ss[4]); + s[5] = vget_low_u8(ss[5]); + s[6] = vget_low_u8(ss[6]); + s[7] = vget_low_u8(ss[7]); + d[0] = scale_filter_8(s, filters); + + s[0] = vget_high_u8(ss[0]); + s[1] = vget_high_u8(ss[1]); + s[2] = vget_high_u8(ss[2]); + s[3] = vget_high_u8(ss[3]); + s[4] = vget_high_u8(ss[4]); + s[5] = vget_high_u8(ss[5]); + s[6] = vget_high_u8(ss[6]); + s[7] = vget_high_u8(ss[7]); + d[1] = scale_filter_8(s, filters); + vst1q_u8(&dst[x], vcombine_u8(d[0], d[1])); + src_y += 16; + x += 16; + } while (x < w); + } else { + memcpy(dst, &src_y[3 * src_stride], w); + } + dst += dst_stride; + y_q4 += y_step_q4; + } while (--y); +} + +void aom_scaled_2d_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const InterpKernel *filter, + int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, + int w, int h) { + // Note: Fixed size intermediate buffer, temp, places limits on parameters. + // 2d filtering proceeds in 2 steps: + // (1) Interpolate horizontally into an intermediate buffer, temp. + // (2) Interpolate temp vertically to derive the sub-pixel result. + // Deriving the maximum number of rows in the temp buffer (135): + // --Smallest scaling factor is x1/2 ==> y_step_q4 = 32 (Normative). + // --Largest block size is 64x64 pixels. + // --64 rows in the downscaled frame span a distance of (64 - 1) * 32 in the + // original frame (in 1/16th pixel units). + // --Must round-up because block may be located at sub-pixel position. + // --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails. + // --((64 - 1) * 32 + 15) >> 4 + 8 = 135. + // --Require an additional 8 rows for the horiz_w8 transpose tail. + // When calling in frame scaling function, the smallest scaling factor is x1/4 + // ==> y_step_q4 = 64. Since w and h are at most 16, the temp buffer is still + // big enough. + DECLARE_ALIGNED(16, uint8_t, temp[(135 + 8) * 64]); + const int intermediate_height = + (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS; + + assert(w <= 64); + assert(h <= 64); + assert(y_step_q4 <= 32 || (y_step_q4 <= 64 && h <= 32)); + assert(x_step_q4 <= 64); + + if (w >= 8) { + scaledconvolve_horiz_w8(src - src_stride * (SUBPEL_TAPS / 2 - 1), + src_stride, temp, 64, filter, x0_q4, x_step_q4, w, + intermediate_height); + } else { + scaledconvolve_horiz_w4(src - src_stride * (SUBPEL_TAPS / 2 - 1), + src_stride, temp, 64, filter, x0_q4, x_step_q4, w, + intermediate_height); + } + + if (w >= 16) { + scaledconvolve_vert_w16(temp + 64 * (SUBPEL_TAPS / 2 - 1), 64, dst, + dst_stride, filter, y0_q4, y_step_q4, w, h); + } else if (w == 8) { + scaledconvolve_vert_w8(temp + 64 * (SUBPEL_TAPS / 2 - 1), 64, dst, + dst_stride, filter, y0_q4, y_step_q4, w, h); + } else { + scaledconvolve_vert_w4(temp + 64 * (SUBPEL_TAPS / 2 - 1), 64, dst, + dst_stride, filter, y0_q4, y_step_q4, w, h); + } +} diff --git a/third_party/aom/av1/common/arm/selfguided_neon.c b/third_party/aom/av1/common/arm/selfguided_neon.c new file mode 100644 index 0000000000..1d3a3cc038 --- /dev/null +++ b/third_party/aom/av1/common/arm/selfguided_neon.c @@ -0,0 +1,1595 @@ +/* + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/txfm_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "aom_mem/aom_mem.h" +#include "aom_ports/mem.h" +#include "av1/common/av1_common_int.h" +#include "av1/common/common.h" +#include "av1/common/resize.h" +#include "av1/common/restoration.h" + +// Constants used for right shift in final_filter calculation. +#define NB_EVEN 5 +#define NB_ODD 4 + +static INLINE void calc_ab_fast_internal_common( + uint32x4_t s0, uint32x4_t s1, uint32x4_t s2, uint32x4_t s3, uint32x4_t s4, + uint32x4_t s5, uint32x4_t s6, uint32x4_t s7, int32x4_t sr4, int32x4_t sr5, + int32x4_t sr6, int32x4_t sr7, uint32x4_t const_n_val, uint32x4_t s_vec, + uint32x4_t const_val, uint32x4_t one_by_n_minus_1_vec, + uint16x4_t sgrproj_sgr, int32_t *src1, uint16_t *dst_A16, int32_t *src2, + const int buf_stride) { + uint32x4_t q0, q1, q2, q3; + uint32x4_t p0, p1, p2, p3; + uint16x4_t d0, d1, d2, d3; + + s0 = vmulq_u32(s0, const_n_val); + s1 = vmulq_u32(s1, const_n_val); + s2 = vmulq_u32(s2, const_n_val); + s3 = vmulq_u32(s3, const_n_val); + + q0 = vmulq_u32(s4, s4); + q1 = vmulq_u32(s5, s5); + q2 = vmulq_u32(s6, s6); + q3 = vmulq_u32(s7, s7); + + p0 = vcleq_u32(q0, s0); + p1 = vcleq_u32(q1, s1); + p2 = vcleq_u32(q2, s2); + p3 = vcleq_u32(q3, s3); + + q0 = vsubq_u32(s0, q0); + q1 = vsubq_u32(s1, q1); + q2 = vsubq_u32(s2, q2); + q3 = vsubq_u32(s3, q3); + + p0 = vandq_u32(p0, q0); + p1 = vandq_u32(p1, q1); + p2 = vandq_u32(p2, q2); + p3 = vandq_u32(p3, q3); + + p0 = vmulq_u32(p0, s_vec); + p1 = vmulq_u32(p1, s_vec); + p2 = vmulq_u32(p2, s_vec); + p3 = vmulq_u32(p3, s_vec); + + p0 = vrshrq_n_u32(p0, SGRPROJ_MTABLE_BITS); + p1 = vrshrq_n_u32(p1, SGRPROJ_MTABLE_BITS); + p2 = vrshrq_n_u32(p2, SGRPROJ_MTABLE_BITS); + p3 = vrshrq_n_u32(p3, SGRPROJ_MTABLE_BITS); + + p0 = vminq_u32(p0, const_val); + p1 = vminq_u32(p1, const_val); + p2 = vminq_u32(p2, const_val); + p3 = vminq_u32(p3, const_val); + + { + store_u32_4x4((uint32_t *)src1, buf_stride, p0, p1, p2, p3); + + for (int x = 0; x < 4; x++) { + for (int y = 0; y < 4; y++) { + dst_A16[x * buf_stride + y] = av1_x_by_xplus1[src1[x * buf_stride + y]]; + } + } + load_u16_4x4(dst_A16, buf_stride, &d0, &d1, &d2, &d3); + } + p0 = vsubl_u16(sgrproj_sgr, d0); + p1 = vsubl_u16(sgrproj_sgr, d1); + p2 = vsubl_u16(sgrproj_sgr, d2); + p3 = vsubl_u16(sgrproj_sgr, d3); + + s4 = vmulq_u32(vreinterpretq_u32_s32(sr4), one_by_n_minus_1_vec); + s5 = vmulq_u32(vreinterpretq_u32_s32(sr5), one_by_n_minus_1_vec); + s6 = vmulq_u32(vreinterpretq_u32_s32(sr6), one_by_n_minus_1_vec); + s7 = vmulq_u32(vreinterpretq_u32_s32(sr7), one_by_n_minus_1_vec); + + s4 = vmulq_u32(s4, p0); + s5 = vmulq_u32(s5, p1); + s6 = vmulq_u32(s6, p2); + s7 = vmulq_u32(s7, p3); + + p0 = vrshrq_n_u32(s4, SGRPROJ_RECIP_BITS); + p1 = vrshrq_n_u32(s5, SGRPROJ_RECIP_BITS); + p2 = vrshrq_n_u32(s6, SGRPROJ_RECIP_BITS); + p3 = vrshrq_n_u32(s7, SGRPROJ_RECIP_BITS); + + store_s32_4x4(src2, buf_stride, vreinterpretq_s32_u32(p0), + vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2), + vreinterpretq_s32_u32(p3)); +} +static INLINE void calc_ab_internal_common( + uint32x4_t s0, uint32x4_t s1, uint32x4_t s2, uint32x4_t s3, uint32x4_t s4, + uint32x4_t s5, uint32x4_t s6, uint32x4_t s7, uint16x8_t s16_0, + uint16x8_t s16_1, uint16x8_t s16_2, uint16x8_t s16_3, uint16x8_t s16_4, + uint16x8_t s16_5, uint16x8_t s16_6, uint16x8_t s16_7, + uint32x4_t const_n_val, uint32x4_t s_vec, uint32x4_t const_val, + uint16x4_t one_by_n_minus_1_vec, uint16x8_t sgrproj_sgr, int32_t *src1, + uint16_t *dst_A16, int32_t *dst2, const int buf_stride) { + uint16x4_t d0, d1, d2, d3, d4, d5, d6, d7; + uint32x4_t q0, q1, q2, q3, q4, q5, q6, q7; + uint32x4_t p0, p1, p2, p3, p4, p5, p6, p7; + + s0 = vmulq_u32(s0, const_n_val); + s1 = vmulq_u32(s1, const_n_val); + s2 = vmulq_u32(s2, const_n_val); + s3 = vmulq_u32(s3, const_n_val); + s4 = vmulq_u32(s4, const_n_val); + s5 = vmulq_u32(s5, const_n_val); + s6 = vmulq_u32(s6, const_n_val); + s7 = vmulq_u32(s7, const_n_val); + + d0 = vget_low_u16(s16_4); + d1 = vget_low_u16(s16_5); + d2 = vget_low_u16(s16_6); + d3 = vget_low_u16(s16_7); + d4 = vget_high_u16(s16_4); + d5 = vget_high_u16(s16_5); + d6 = vget_high_u16(s16_6); + d7 = vget_high_u16(s16_7); + + q0 = vmull_u16(d0, d0); + q1 = vmull_u16(d1, d1); + q2 = vmull_u16(d2, d2); + q3 = vmull_u16(d3, d3); + q4 = vmull_u16(d4, d4); + q5 = vmull_u16(d5, d5); + q6 = vmull_u16(d6, d6); + q7 = vmull_u16(d7, d7); + + p0 = vcleq_u32(q0, s0); + p1 = vcleq_u32(q1, s1); + p2 = vcleq_u32(q2, s2); + p3 = vcleq_u32(q3, s3); + p4 = vcleq_u32(q4, s4); + p5 = vcleq_u32(q5, s5); + p6 = vcleq_u32(q6, s6); + p7 = vcleq_u32(q7, s7); + + q0 = vsubq_u32(s0, q0); + q1 = vsubq_u32(s1, q1); + q2 = vsubq_u32(s2, q2); + q3 = vsubq_u32(s3, q3); + q4 = vsubq_u32(s4, q4); + q5 = vsubq_u32(s5, q5); + q6 = vsubq_u32(s6, q6); + q7 = vsubq_u32(s7, q7); + + p0 = vandq_u32(p0, q0); + p1 = vandq_u32(p1, q1); + p2 = vandq_u32(p2, q2); + p3 = vandq_u32(p3, q3); + p4 = vandq_u32(p4, q4); + p5 = vandq_u32(p5, q5); + p6 = vandq_u32(p6, q6); + p7 = vandq_u32(p7, q7); + + p0 = vmulq_u32(p0, s_vec); + p1 = vmulq_u32(p1, s_vec); + p2 = vmulq_u32(p2, s_vec); + p3 = vmulq_u32(p3, s_vec); + p4 = vmulq_u32(p4, s_vec); + p5 = vmulq_u32(p5, s_vec); + p6 = vmulq_u32(p6, s_vec); + p7 = vmulq_u32(p7, s_vec); + + p0 = vrshrq_n_u32(p0, SGRPROJ_MTABLE_BITS); + p1 = vrshrq_n_u32(p1, SGRPROJ_MTABLE_BITS); + p2 = vrshrq_n_u32(p2, SGRPROJ_MTABLE_BITS); + p3 = vrshrq_n_u32(p3, SGRPROJ_MTABLE_BITS); + p4 = vrshrq_n_u32(p4, SGRPROJ_MTABLE_BITS); + p5 = vrshrq_n_u32(p5, SGRPROJ_MTABLE_BITS); + p6 = vrshrq_n_u32(p6, SGRPROJ_MTABLE_BITS); + p7 = vrshrq_n_u32(p7, SGRPROJ_MTABLE_BITS); + + p0 = vminq_u32(p0, const_val); + p1 = vminq_u32(p1, const_val); + p2 = vminq_u32(p2, const_val); + p3 = vminq_u32(p3, const_val); + p4 = vminq_u32(p4, const_val); + p5 = vminq_u32(p5, const_val); + p6 = vminq_u32(p6, const_val); + p7 = vminq_u32(p7, const_val); + + { + store_u32_4x4((uint32_t *)src1, buf_stride, p0, p1, p2, p3); + store_u32_4x4((uint32_t *)src1 + 4, buf_stride, p4, p5, p6, p7); + + for (int x = 0; x < 4; x++) { + for (int y = 0; y < 8; y++) { + dst_A16[x * buf_stride + y] = av1_x_by_xplus1[src1[x * buf_stride + y]]; + } + } + load_u16_8x4(dst_A16, buf_stride, &s16_4, &s16_5, &s16_6, &s16_7); + } + + s16_4 = vsubq_u16(sgrproj_sgr, s16_4); + s16_5 = vsubq_u16(sgrproj_sgr, s16_5); + s16_6 = vsubq_u16(sgrproj_sgr, s16_6); + s16_7 = vsubq_u16(sgrproj_sgr, s16_7); + + s0 = vmull_u16(vget_low_u16(s16_0), one_by_n_minus_1_vec); + s1 = vmull_u16(vget_low_u16(s16_1), one_by_n_minus_1_vec); + s2 = vmull_u16(vget_low_u16(s16_2), one_by_n_minus_1_vec); + s3 = vmull_u16(vget_low_u16(s16_3), one_by_n_minus_1_vec); + s4 = vmull_u16(vget_high_u16(s16_0), one_by_n_minus_1_vec); + s5 = vmull_u16(vget_high_u16(s16_1), one_by_n_minus_1_vec); + s6 = vmull_u16(vget_high_u16(s16_2), one_by_n_minus_1_vec); + s7 = vmull_u16(vget_high_u16(s16_3), one_by_n_minus_1_vec); + + s0 = vmulq_u32(s0, vmovl_u16(vget_low_u16(s16_4))); + s1 = vmulq_u32(s1, vmovl_u16(vget_low_u16(s16_5))); + s2 = vmulq_u32(s2, vmovl_u16(vget_low_u16(s16_6))); + s3 = vmulq_u32(s3, vmovl_u16(vget_low_u16(s16_7))); + s4 = vmulq_u32(s4, vmovl_u16(vget_high_u16(s16_4))); + s5 = vmulq_u32(s5, vmovl_u16(vget_high_u16(s16_5))); + s6 = vmulq_u32(s6, vmovl_u16(vget_high_u16(s16_6))); + s7 = vmulq_u32(s7, vmovl_u16(vget_high_u16(s16_7))); + + p0 = vrshrq_n_u32(s0, SGRPROJ_RECIP_BITS); + p1 = vrshrq_n_u32(s1, SGRPROJ_RECIP_BITS); + p2 = vrshrq_n_u32(s2, SGRPROJ_RECIP_BITS); + p3 = vrshrq_n_u32(s3, SGRPROJ_RECIP_BITS); + p4 = vrshrq_n_u32(s4, SGRPROJ_RECIP_BITS); + p5 = vrshrq_n_u32(s5, SGRPROJ_RECIP_BITS); + p6 = vrshrq_n_u32(s6, SGRPROJ_RECIP_BITS); + p7 = vrshrq_n_u32(s7, SGRPROJ_RECIP_BITS); + + store_s32_4x4(dst2, buf_stride, vreinterpretq_s32_u32(p0), + vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2), + vreinterpretq_s32_u32(p3)); + store_s32_4x4(dst2 + 4, buf_stride, vreinterpretq_s32_u32(p4), + vreinterpretq_s32_u32(p5), vreinterpretq_s32_u32(p6), + vreinterpretq_s32_u32(p7)); +} + +static INLINE void boxsum2_square_sum_calc( + int16x4_t t1, int16x4_t t2, int16x4_t t3, int16x4_t t4, int16x4_t t5, + int16x4_t t6, int16x4_t t7, int16x4_t t8, int16x4_t t9, int16x4_t t10, + int16x4_t t11, int32x4_t *r0, int32x4_t *r1, int32x4_t *r2, int32x4_t *r3) { + int32x4_t d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11; + int32x4_t r12, r34, r67, r89, r1011; + int32x4_t r345, r6789, r789; + + d1 = vmull_s16(t1, t1); + d2 = vmull_s16(t2, t2); + d3 = vmull_s16(t3, t3); + d4 = vmull_s16(t4, t4); + d5 = vmull_s16(t5, t5); + d6 = vmull_s16(t6, t6); + d7 = vmull_s16(t7, t7); + d8 = vmull_s16(t8, t8); + d9 = vmull_s16(t9, t9); + d10 = vmull_s16(t10, t10); + d11 = vmull_s16(t11, t11); + + r12 = vaddq_s32(d1, d2); + r34 = vaddq_s32(d3, d4); + r67 = vaddq_s32(d6, d7); + r89 = vaddq_s32(d8, d9); + r1011 = vaddq_s32(d10, d11); + r345 = vaddq_s32(r34, d5); + r6789 = vaddq_s32(r67, r89); + r789 = vsubq_s32(r6789, d6); + *r0 = vaddq_s32(r12, r345); + *r1 = vaddq_s32(r67, r345); + *r2 = vaddq_s32(d5, r6789); + *r3 = vaddq_s32(r789, r1011); +} + +static INLINE void boxsum2(int16_t *src, const int src_stride, int16_t *dst16, + int32_t *dst32, int32_t *dst2, const int dst_stride, + const int width, const int height) { + assert(width > 2 * SGRPROJ_BORDER_HORZ); + assert(height > 2 * SGRPROJ_BORDER_VERT); + + int16_t *dst1_16_ptr, *src_ptr; + int32_t *dst2_ptr; + int h, w, count = 0; + const int dst_stride_2 = (dst_stride << 1); + const int dst_stride_8 = (dst_stride << 3); + + dst1_16_ptr = dst16; + dst2_ptr = dst2; + src_ptr = src; + w = width; + { + int16x8_t t1, t2, t3, t4, t5, t6, t7; + int16x8_t t8, t9, t10, t11, t12; + + int16x8_t q12345, q56789, q34567, q7891011; + int16x8_t q12, q34, q67, q89, q1011; + int16x8_t q345, q6789, q789; + + int32x4_t r12345, r56789, r34567, r7891011; + + do { + h = height; + dst1_16_ptr = dst16 + (count << 3); + dst2_ptr = dst2 + (count << 3); + src_ptr = src + (count << 3); + + dst1_16_ptr += dst_stride_2; + dst2_ptr += dst_stride_2; + do { + load_s16_8x4(src_ptr, src_stride, &t1, &t2, &t3, &t4); + src_ptr += 4 * src_stride; + load_s16_8x4(src_ptr, src_stride, &t5, &t6, &t7, &t8); + src_ptr += 4 * src_stride; + load_s16_8x4(src_ptr, src_stride, &t9, &t10, &t11, &t12); + + q12 = vaddq_s16(t1, t2); + q34 = vaddq_s16(t3, t4); + q67 = vaddq_s16(t6, t7); + q89 = vaddq_s16(t8, t9); + q1011 = vaddq_s16(t10, t11); + q345 = vaddq_s16(q34, t5); + q6789 = vaddq_s16(q67, q89); + q789 = vaddq_s16(q89, t7); + q12345 = vaddq_s16(q12, q345); + q34567 = vaddq_s16(q67, q345); + q56789 = vaddq_s16(t5, q6789); + q7891011 = vaddq_s16(q789, q1011); + + store_s16_8x4(dst1_16_ptr, dst_stride_2, q12345, q34567, q56789, + q7891011); + dst1_16_ptr += dst_stride_8; + + boxsum2_square_sum_calc( + vget_low_s16(t1), vget_low_s16(t2), vget_low_s16(t3), + vget_low_s16(t4), vget_low_s16(t5), vget_low_s16(t6), + vget_low_s16(t7), vget_low_s16(t8), vget_low_s16(t9), + vget_low_s16(t10), vget_low_s16(t11), &r12345, &r34567, &r56789, + &r7891011); + + store_s32_4x4(dst2_ptr, dst_stride_2, r12345, r34567, r56789, r7891011); + + boxsum2_square_sum_calc( + vget_high_s16(t1), vget_high_s16(t2), vget_high_s16(t3), + vget_high_s16(t4), vget_high_s16(t5), vget_high_s16(t6), + vget_high_s16(t7), vget_high_s16(t8), vget_high_s16(t9), + vget_high_s16(t10), vget_high_s16(t11), &r12345, &r34567, &r56789, + &r7891011); + + store_s32_4x4(dst2_ptr + 4, dst_stride_2, r12345, r34567, r56789, + r7891011); + dst2_ptr += (dst_stride_8); + h -= 8; + } while (h > 0); + w -= 8; + count++; + } while (w > 0); + + // memset needed for row pixels as 2nd stage of boxsum filter uses + // first 2 rows of dst16, dst2 buffer which is not filled in first stage. + for (int x = 0; x < 2; x++) { + memset(dst16 + x * dst_stride, 0, (width + 4) * sizeof(*dst16)); + memset(dst2 + x * dst_stride, 0, (width + 4) * sizeof(*dst2)); + } + + // memset needed for extra columns as 2nd stage of boxsum filter uses + // last 2 columns of dst16, dst2 buffer which is not filled in first stage. + for (int x = 2; x < height + 2; x++) { + int dst_offset = x * dst_stride + width + 2; + memset(dst16 + dst_offset, 0, 3 * sizeof(*dst16)); + memset(dst2 + dst_offset, 0, 3 * sizeof(*dst2)); + } + } + + { + int16x4_t s1, s2, s3, s4, s5, s6, s7, s8; + int32x4_t d1, d2, d3, d4, d5, d6, d7, d8; + int32x4_t q12345, q34567, q23456, q45678; + int32x4_t q23, q45, q67; + int32x4_t q2345, q4567; + + int32x4_t r12345, r34567, r23456, r45678; + int32x4_t r23, r45, r67; + int32x4_t r2345, r4567; + + int32_t *src2_ptr, *dst1_32_ptr; + int16_t *src1_ptr; + count = 0; + h = height; + do { + dst1_32_ptr = dst32 + count * dst_stride_8 + (dst_stride_2); + dst2_ptr = dst2 + count * dst_stride_8 + (dst_stride_2); + src1_ptr = dst16 + count * dst_stride_8 + (dst_stride_2); + src2_ptr = dst2 + count * dst_stride_8 + (dst_stride_2); + w = width; + + dst1_32_ptr += 2; + dst2_ptr += 2; + load_s16_4x4(src1_ptr, dst_stride_2, &s1, &s2, &s3, &s4); + transpose_elems_inplace_s16_4x4(&s1, &s2, &s3, &s4); + load_s32_4x4(src2_ptr, dst_stride_2, &d1, &d2, &d3, &d4); + transpose_elems_inplace_s32_4x4(&d1, &d2, &d3, &d4); + do { + src1_ptr += 4; + src2_ptr += 4; + load_s16_4x4(src1_ptr, dst_stride_2, &s5, &s6, &s7, &s8); + transpose_elems_inplace_s16_4x4(&s5, &s6, &s7, &s8); + load_s32_4x4(src2_ptr, dst_stride_2, &d5, &d6, &d7, &d8); + transpose_elems_inplace_s32_4x4(&d5, &d6, &d7, &d8); + q23 = vaddl_s16(s2, s3); + q45 = vaddl_s16(s4, s5); + q67 = vaddl_s16(s6, s7); + q2345 = vaddq_s32(q23, q45); + q4567 = vaddq_s32(q45, q67); + q12345 = vaddq_s32(vmovl_s16(s1), q2345); + q23456 = vaddq_s32(q2345, vmovl_s16(s6)); + q34567 = vaddq_s32(q4567, vmovl_s16(s3)); + q45678 = vaddq_s32(q4567, vmovl_s16(s8)); + + transpose_elems_inplace_s32_4x4(&q12345, &q23456, &q34567, &q45678); + store_s32_4x4(dst1_32_ptr, dst_stride_2, q12345, q23456, q34567, + q45678); + dst1_32_ptr += 4; + s1 = s5; + s2 = s6; + s3 = s7; + s4 = s8; + + r23 = vaddq_s32(d2, d3); + r45 = vaddq_s32(d4, d5); + r67 = vaddq_s32(d6, d7); + r2345 = vaddq_s32(r23, r45); + r4567 = vaddq_s32(r45, r67); + r12345 = vaddq_s32(d1, r2345); + r23456 = vaddq_s32(r2345, d6); + r34567 = vaddq_s32(r4567, d3); + r45678 = vaddq_s32(r4567, d8); + + transpose_elems_inplace_s32_4x4(&r12345, &r23456, &r34567, &r45678); + store_s32_4x4(dst2_ptr, dst_stride_2, r12345, r23456, r34567, r45678); + dst2_ptr += 4; + d1 = d5; + d2 = d6; + d3 = d7; + d4 = d8; + w -= 4; + } while (w > 0); + h -= 8; + count++; + } while (h > 0); + } +} + +static INLINE void calc_ab_internal_lbd(int32_t *A, uint16_t *A16, + uint16_t *B16, int32_t *B, + const int buf_stride, const int width, + const int height, const int r, + const int s, const int ht_inc) { + int32_t *src1, *dst2, count = 0; + uint16_t *dst_A16, *src2; + const uint32_t n = (2 * r + 1) * (2 * r + 1); + const uint32x4_t const_n_val = vdupq_n_u32(n); + const uint16x8_t sgrproj_sgr = vdupq_n_u16(SGRPROJ_SGR); + const uint16x4_t one_by_n_minus_1_vec = vdup_n_u16(av1_one_by_x[n - 1]); + const uint32x4_t const_val = vdupq_n_u32(255); + + uint16x8_t s16_0, s16_1, s16_2, s16_3, s16_4, s16_5, s16_6, s16_7; + + uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7; + + const uint32x4_t s_vec = vdupq_n_u32(s); + int w, h = height; + + do { + dst_A16 = A16 + (count << 2) * buf_stride; + src1 = A + (count << 2) * buf_stride; + src2 = B16 + (count << 2) * buf_stride; + dst2 = B + (count << 2) * buf_stride; + w = width; + do { + load_u32_4x4((uint32_t *)src1, buf_stride, &s0, &s1, &s2, &s3); + load_u32_4x4((uint32_t *)src1 + 4, buf_stride, &s4, &s5, &s6, &s7); + load_u16_8x4(src2, buf_stride, &s16_0, &s16_1, &s16_2, &s16_3); + + s16_4 = s16_0; + s16_5 = s16_1; + s16_6 = s16_2; + s16_7 = s16_3; + + calc_ab_internal_common( + s0, s1, s2, s3, s4, s5, s6, s7, s16_0, s16_1, s16_2, s16_3, s16_4, + s16_5, s16_6, s16_7, const_n_val, s_vec, const_val, + one_by_n_minus_1_vec, sgrproj_sgr, src1, dst_A16, dst2, buf_stride); + + w -= 8; + dst2 += 8; + src1 += 8; + src2 += 8; + dst_A16 += 8; + } while (w > 0); + count++; + h -= (ht_inc * 4); + } while (h > 0); +} + +#if CONFIG_AV1_HIGHBITDEPTH +static INLINE void calc_ab_internal_hbd(int32_t *A, uint16_t *A16, + uint16_t *B16, int32_t *B, + const int buf_stride, const int width, + const int height, const int bit_depth, + const int r, const int s, + const int ht_inc) { + int32_t *src1, *dst2, count = 0; + uint16_t *dst_A16, *src2; + const uint32_t n = (2 * r + 1) * (2 * r + 1); + const int16x8_t bd_min_2_vec = vdupq_n_s16(-(bit_depth - 8)); + const int32x4_t bd_min_1_vec = vdupq_n_s32(-((bit_depth - 8) << 1)); + const uint32x4_t const_n_val = vdupq_n_u32(n); + const uint16x8_t sgrproj_sgr = vdupq_n_u16(SGRPROJ_SGR); + const uint16x4_t one_by_n_minus_1_vec = vdup_n_u16(av1_one_by_x[n - 1]); + const uint32x4_t const_val = vdupq_n_u32(255); + + int32x4_t sr0, sr1, sr2, sr3, sr4, sr5, sr6, sr7; + uint16x8_t s16_0, s16_1, s16_2, s16_3; + uint16x8_t s16_4, s16_5, s16_6, s16_7; + uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7; + + const uint32x4_t s_vec = vdupq_n_u32(s); + int w, h = height; + + do { + src1 = A + (count << 2) * buf_stride; + src2 = B16 + (count << 2) * buf_stride; + dst2 = B + (count << 2) * buf_stride; + dst_A16 = A16 + (count << 2) * buf_stride; + w = width; + do { + load_s32_4x4(src1, buf_stride, &sr0, &sr1, &sr2, &sr3); + load_s32_4x4(src1 + 4, buf_stride, &sr4, &sr5, &sr6, &sr7); + load_u16_8x4(src2, buf_stride, &s16_0, &s16_1, &s16_2, &s16_3); + + s0 = vrshlq_u32(vreinterpretq_u32_s32(sr0), bd_min_1_vec); + s1 = vrshlq_u32(vreinterpretq_u32_s32(sr1), bd_min_1_vec); + s2 = vrshlq_u32(vreinterpretq_u32_s32(sr2), bd_min_1_vec); + s3 = vrshlq_u32(vreinterpretq_u32_s32(sr3), bd_min_1_vec); + s4 = vrshlq_u32(vreinterpretq_u32_s32(sr4), bd_min_1_vec); + s5 = vrshlq_u32(vreinterpretq_u32_s32(sr5), bd_min_1_vec); + s6 = vrshlq_u32(vreinterpretq_u32_s32(sr6), bd_min_1_vec); + s7 = vrshlq_u32(vreinterpretq_u32_s32(sr7), bd_min_1_vec); + + s16_4 = vrshlq_u16(s16_0, bd_min_2_vec); + s16_5 = vrshlq_u16(s16_1, bd_min_2_vec); + s16_6 = vrshlq_u16(s16_2, bd_min_2_vec); + s16_7 = vrshlq_u16(s16_3, bd_min_2_vec); + + calc_ab_internal_common( + s0, s1, s2, s3, s4, s5, s6, s7, s16_0, s16_1, s16_2, s16_3, s16_4, + s16_5, s16_6, s16_7, const_n_val, s_vec, const_val, + one_by_n_minus_1_vec, sgrproj_sgr, src1, dst_A16, dst2, buf_stride); + + w -= 8; + dst2 += 8; + src1 += 8; + src2 += 8; + dst_A16 += 8; + } while (w > 0); + count++; + h -= (ht_inc * 4); + } while (h > 0); +} +#endif // CONFIG_AV1_HIGHBITDEPTH + +static INLINE void calc_ab_fast_internal_lbd(int32_t *A, uint16_t *A16, + int32_t *B, const int buf_stride, + const int width, const int height, + const int r, const int s, + const int ht_inc) { + int32_t *src1, *src2, count = 0; + uint16_t *dst_A16; + const uint32_t n = (2 * r + 1) * (2 * r + 1); + const uint32x4_t const_n_val = vdupq_n_u32(n); + const uint16x4_t sgrproj_sgr = vdup_n_u16(SGRPROJ_SGR); + const uint32x4_t one_by_n_minus_1_vec = vdupq_n_u32(av1_one_by_x[n - 1]); + const uint32x4_t const_val = vdupq_n_u32(255); + + int32x4_t sr0, sr1, sr2, sr3, sr4, sr5, sr6, sr7; + uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7; + + const uint32x4_t s_vec = vdupq_n_u32(s); + int w, h = height; + + do { + src1 = A + (count << 2) * buf_stride; + src2 = B + (count << 2) * buf_stride; + dst_A16 = A16 + (count << 2) * buf_stride; + w = width; + do { + load_s32_4x4(src1, buf_stride, &sr0, &sr1, &sr2, &sr3); + load_s32_4x4(src2, buf_stride, &sr4, &sr5, &sr6, &sr7); + + s0 = vreinterpretq_u32_s32(sr0); + s1 = vreinterpretq_u32_s32(sr1); + s2 = vreinterpretq_u32_s32(sr2); + s3 = vreinterpretq_u32_s32(sr3); + s4 = vreinterpretq_u32_s32(sr4); + s5 = vreinterpretq_u32_s32(sr5); + s6 = vreinterpretq_u32_s32(sr6); + s7 = vreinterpretq_u32_s32(sr7); + + calc_ab_fast_internal_common(s0, s1, s2, s3, s4, s5, s6, s7, sr4, sr5, + sr6, sr7, const_n_val, s_vec, const_val, + one_by_n_minus_1_vec, sgrproj_sgr, src1, + dst_A16, src2, buf_stride); + + w -= 4; + src1 += 4; + src2 += 4; + dst_A16 += 4; + } while (w > 0); + count++; + h -= (ht_inc * 4); + } while (h > 0); +} + +#if CONFIG_AV1_HIGHBITDEPTH +static INLINE void calc_ab_fast_internal_hbd(int32_t *A, uint16_t *A16, + int32_t *B, const int buf_stride, + const int width, const int height, + const int bit_depth, const int r, + const int s, const int ht_inc) { + int32_t *src1, *src2, count = 0; + uint16_t *dst_A16; + const uint32_t n = (2 * r + 1) * (2 * r + 1); + const int32x4_t bd_min_2_vec = vdupq_n_s32(-(bit_depth - 8)); + const int32x4_t bd_min_1_vec = vdupq_n_s32(-((bit_depth - 8) << 1)); + const uint32x4_t const_n_val = vdupq_n_u32(n); + const uint16x4_t sgrproj_sgr = vdup_n_u16(SGRPROJ_SGR); + const uint32x4_t one_by_n_minus_1_vec = vdupq_n_u32(av1_one_by_x[n - 1]); + const uint32x4_t const_val = vdupq_n_u32(255); + + int32x4_t sr0, sr1, sr2, sr3, sr4, sr5, sr6, sr7; + uint32x4_t s0, s1, s2, s3, s4, s5, s6, s7; + + const uint32x4_t s_vec = vdupq_n_u32(s); + int w, h = height; + + do { + src1 = A + (count << 2) * buf_stride; + src2 = B + (count << 2) * buf_stride; + dst_A16 = A16 + (count << 2) * buf_stride; + w = width; + do { + load_s32_4x4(src1, buf_stride, &sr0, &sr1, &sr2, &sr3); + load_s32_4x4(src2, buf_stride, &sr4, &sr5, &sr6, &sr7); + + s0 = vrshlq_u32(vreinterpretq_u32_s32(sr0), bd_min_1_vec); + s1 = vrshlq_u32(vreinterpretq_u32_s32(sr1), bd_min_1_vec); + s2 = vrshlq_u32(vreinterpretq_u32_s32(sr2), bd_min_1_vec); + s3 = vrshlq_u32(vreinterpretq_u32_s32(sr3), bd_min_1_vec); + s4 = vrshlq_u32(vreinterpretq_u32_s32(sr4), bd_min_2_vec); + s5 = vrshlq_u32(vreinterpretq_u32_s32(sr5), bd_min_2_vec); + s6 = vrshlq_u32(vreinterpretq_u32_s32(sr6), bd_min_2_vec); + s7 = vrshlq_u32(vreinterpretq_u32_s32(sr7), bd_min_2_vec); + + calc_ab_fast_internal_common(s0, s1, s2, s3, s4, s5, s6, s7, sr4, sr5, + sr6, sr7, const_n_val, s_vec, const_val, + one_by_n_minus_1_vec, sgrproj_sgr, src1, + dst_A16, src2, buf_stride); + + w -= 4; + src1 += 4; + src2 += 4; + dst_A16 += 4; + } while (w > 0); + count++; + h -= (ht_inc * 4); + } while (h > 0); +} +#endif // CONFIG_AV1_HIGHBITDEPTH + +static INLINE void boxsum1(int16_t *src, const int src_stride, uint16_t *dst1, + int32_t *dst2, const int dst_stride, const int width, + const int height) { + assert(width > 2 * SGRPROJ_BORDER_HORZ); + assert(height > 2 * SGRPROJ_BORDER_VERT); + + int16_t *src_ptr; + int32_t *dst2_ptr; + uint16_t *dst1_ptr; + int h, w, count = 0; + + w = width; + { + int16x8_t s1, s2, s3, s4, s5, s6, s7, s8; + int16x8_t q23, q34, q56, q234, q345, q456, q567; + int32x4_t r23, r56, r345, r456, r567, r78, r678; + int32x4_t r4_low, r4_high, r34_low, r34_high, r234_low, r234_high; + int32x4_t r2, r3, r5, r6, r7, r8; + int16x8_t q678, q78; + + do { + dst1_ptr = dst1 + (count << 3); + dst2_ptr = dst2 + (count << 3); + src_ptr = src + (count << 3); + h = height; + + load_s16_8x4(src_ptr, src_stride, &s1, &s2, &s3, &s4); + src_ptr += 4 * src_stride; + + q23 = vaddq_s16(s2, s3); + q234 = vaddq_s16(q23, s4); + q34 = vaddq_s16(s3, s4); + dst1_ptr += (dst_stride << 1); + + r2 = vmull_s16(vget_low_s16(s2), vget_low_s16(s2)); + r3 = vmull_s16(vget_low_s16(s3), vget_low_s16(s3)); + r4_low = vmull_s16(vget_low_s16(s4), vget_low_s16(s4)); + r23 = vaddq_s32(r2, r3); + r234_low = vaddq_s32(r23, r4_low); + r34_low = vaddq_s32(r3, r4_low); + + r2 = vmull_s16(vget_high_s16(s2), vget_high_s16(s2)); + r3 = vmull_s16(vget_high_s16(s3), vget_high_s16(s3)); + r4_high = vmull_s16(vget_high_s16(s4), vget_high_s16(s4)); + r23 = vaddq_s32(r2, r3); + r234_high = vaddq_s32(r23, r4_high); + r34_high = vaddq_s32(r3, r4_high); + + dst2_ptr += (dst_stride << 1); + + do { + load_s16_8x4(src_ptr, src_stride, &s5, &s6, &s7, &s8); + src_ptr += 4 * src_stride; + + q345 = vaddq_s16(s5, q34); + q56 = vaddq_s16(s5, s6); + q456 = vaddq_s16(s4, q56); + q567 = vaddq_s16(s7, q56); + q78 = vaddq_s16(s7, s8); + q678 = vaddq_s16(s6, q78); + + store_s16_8x4((int16_t *)dst1_ptr, dst_stride, q234, q345, q456, q567); + dst1_ptr += (dst_stride << 2); + + s4 = s8; + q34 = q78; + q234 = q678; + + r5 = vmull_s16(vget_low_s16(s5), vget_low_s16(s5)); + r6 = vmull_s16(vget_low_s16(s6), vget_low_s16(s6)); + r7 = vmull_s16(vget_low_s16(s7), vget_low_s16(s7)); + r8 = vmull_s16(vget_low_s16(s8), vget_low_s16(s8)); + + r345 = vaddq_s32(r5, r34_low); + r56 = vaddq_s32(r5, r6); + r456 = vaddq_s32(r4_low, r56); + r567 = vaddq_s32(r7, r56); + r78 = vaddq_s32(r7, r8); + r678 = vaddq_s32(r6, r78); + store_s32_4x4(dst2_ptr, dst_stride, r234_low, r345, r456, r567); + + r4_low = r8; + r34_low = r78; + r234_low = r678; + + r5 = vmull_s16(vget_high_s16(s5), vget_high_s16(s5)); + r6 = vmull_s16(vget_high_s16(s6), vget_high_s16(s6)); + r7 = vmull_s16(vget_high_s16(s7), vget_high_s16(s7)); + r8 = vmull_s16(vget_high_s16(s8), vget_high_s16(s8)); + + r345 = vaddq_s32(r5, r34_high); + r56 = vaddq_s32(r5, r6); + r456 = vaddq_s32(r4_high, r56); + r567 = vaddq_s32(r7, r56); + r78 = vaddq_s32(r7, r8); + r678 = vaddq_s32(r6, r78); + store_s32_4x4((dst2_ptr + 4), dst_stride, r234_high, r345, r456, r567); + dst2_ptr += (dst_stride << 2); + + r4_high = r8; + r34_high = r78; + r234_high = r678; + + h -= 4; + } while (h > 0); + w -= 8; + count++; + } while (w > 0); + + // memset needed for row pixels as 2nd stage of boxsum filter uses + // first 2 rows of dst1, dst2 buffer which is not filled in first stage. + for (int x = 0; x < 2; x++) { + memset(dst1 + x * dst_stride, 0, (width + 4) * sizeof(*dst1)); + memset(dst2 + x * dst_stride, 0, (width + 4) * sizeof(*dst2)); + } + + // memset needed for extra columns as 2nd stage of boxsum filter uses + // last 2 columns of dst1, dst2 buffer which is not filled in first stage. + for (int x = 2; x < height + 2; x++) { + int dst_offset = x * dst_stride + width + 2; + memset(dst1 + dst_offset, 0, 3 * sizeof(*dst1)); + memset(dst2 + dst_offset, 0, 3 * sizeof(*dst2)); + } + } + + { + int16x4_t d1, d2, d3, d4, d5, d6, d7, d8; + int16x4_t q23, q34, q56, q234, q345, q456, q567; + int32x4_t r23, r56, r234, r345, r456, r567, r34, r78, r678; + int32x4_t r1, r2, r3, r4, r5, r6, r7, r8; + int16x4_t q678, q78; + + int32_t *src2_ptr; + uint16_t *src1_ptr; + count = 0; + h = height; + w = width; + do { + dst1_ptr = dst1 + (count << 2) * dst_stride; + dst2_ptr = dst2 + (count << 2) * dst_stride; + src1_ptr = dst1 + (count << 2) * dst_stride; + src2_ptr = dst2 + (count << 2) * dst_stride; + w = width; + + load_s16_4x4((int16_t *)src1_ptr, dst_stride, &d1, &d2, &d3, &d4); + transpose_elems_inplace_s16_4x4(&d1, &d2, &d3, &d4); + load_s32_4x4(src2_ptr, dst_stride, &r1, &r2, &r3, &r4); + transpose_elems_inplace_s32_4x4(&r1, &r2, &r3, &r4); + src1_ptr += 4; + src2_ptr += 4; + + q23 = vadd_s16(d2, d3); + q234 = vadd_s16(q23, d4); + q34 = vadd_s16(d3, d4); + dst1_ptr += 2; + r23 = vaddq_s32(r2, r3); + r234 = vaddq_s32(r23, r4); + r34 = vaddq_s32(r3, r4); + dst2_ptr += 2; + + do { + load_s16_4x4((int16_t *)src1_ptr, dst_stride, &d5, &d6, &d7, &d8); + transpose_elems_inplace_s16_4x4(&d5, &d6, &d7, &d8); + load_s32_4x4(src2_ptr, dst_stride, &r5, &r6, &r7, &r8); + transpose_elems_inplace_s32_4x4(&r5, &r6, &r7, &r8); + src1_ptr += 4; + src2_ptr += 4; + + q345 = vadd_s16(d5, q34); + q56 = vadd_s16(d5, d6); + q456 = vadd_s16(d4, q56); + q567 = vadd_s16(d7, q56); + q78 = vadd_s16(d7, d8); + q678 = vadd_s16(d6, q78); + transpose_elems_inplace_s16_4x4(&q234, &q345, &q456, &q567); + store_s16_4x4((int16_t *)dst1_ptr, dst_stride, q234, q345, q456, q567); + dst1_ptr += 4; + + d4 = d8; + q34 = q78; + q234 = q678; + + r345 = vaddq_s32(r5, r34); + r56 = vaddq_s32(r5, r6); + r456 = vaddq_s32(r4, r56); + r567 = vaddq_s32(r7, r56); + r78 = vaddq_s32(r7, r8); + r678 = vaddq_s32(r6, r78); + transpose_elems_inplace_s32_4x4(&r234, &r345, &r456, &r567); + store_s32_4x4(dst2_ptr, dst_stride, r234, r345, r456, r567); + dst2_ptr += 4; + + r4 = r8; + r34 = r78; + r234 = r678; + w -= 4; + } while (w > 0); + h -= 4; + count++; + } while (h > 0); + } +} + +static INLINE int32x4_t cross_sum_inp_s32(int32_t *buf, int buf_stride) { + int32x4_t xtr, xt, xtl, xl, x, xr, xbr, xb, xbl; + int32x4_t fours, threes, res; + + xtl = vld1q_s32(buf - buf_stride - 1); + xt = vld1q_s32(buf - buf_stride); + xtr = vld1q_s32(buf - buf_stride + 1); + xl = vld1q_s32(buf - 1); + x = vld1q_s32(buf); + xr = vld1q_s32(buf + 1); + xbl = vld1q_s32(buf + buf_stride - 1); + xb = vld1q_s32(buf + buf_stride); + xbr = vld1q_s32(buf + buf_stride + 1); + + fours = vaddq_s32(xl, vaddq_s32(xt, vaddq_s32(xr, vaddq_s32(xb, x)))); + threes = vaddq_s32(xtl, vaddq_s32(xtr, vaddq_s32(xbr, xbl))); + res = vsubq_s32(vshlq_n_s32(vaddq_s32(fours, threes), 2), threes); + return res; +} + +static INLINE void cross_sum_inp_u16(uint16_t *buf, int buf_stride, + int32x4_t *a0, int32x4_t *a1) { + uint16x8_t xtr, xt, xtl, xl, x, xr, xbr, xb, xbl; + uint16x8_t r0, r1; + + xtl = vld1q_u16(buf - buf_stride - 1); + xt = vld1q_u16(buf - buf_stride); + xtr = vld1q_u16(buf - buf_stride + 1); + xl = vld1q_u16(buf - 1); + x = vld1q_u16(buf); + xr = vld1q_u16(buf + 1); + xbl = vld1q_u16(buf + buf_stride - 1); + xb = vld1q_u16(buf + buf_stride); + xbr = vld1q_u16(buf + buf_stride + 1); + + xb = vaddq_u16(xb, x); + xt = vaddq_u16(xt, xr); + xl = vaddq_u16(xl, xb); + xl = vaddq_u16(xl, xt); + + r0 = vshlq_n_u16(xl, 2); + + xbl = vaddq_u16(xbl, xbr); + xtl = vaddq_u16(xtl, xtr); + xtl = vaddq_u16(xtl, xbl); + + r1 = vshlq_n_u16(xtl, 2); + r1 = vsubq_u16(r1, xtl); + + *a0 = vreinterpretq_s32_u32( + vaddq_u32(vmovl_u16(vget_low_u16(r0)), vmovl_u16(vget_low_u16(r1)))); + *a1 = vreinterpretq_s32_u32( + vaddq_u32(vmovl_u16(vget_high_u16(r0)), vmovl_u16(vget_high_u16(r1)))); +} + +static INLINE int32x4_t cross_sum_fast_even_row(int32_t *buf, int buf_stride) { + int32x4_t xtr, xt, xtl, xbr, xb, xbl; + int32x4_t fives, sixes, fives_plus_sixes; + + xtl = vld1q_s32(buf - buf_stride - 1); + xt = vld1q_s32(buf - buf_stride); + xtr = vld1q_s32(buf - buf_stride + 1); + xbl = vld1q_s32(buf + buf_stride - 1); + xb = vld1q_s32(buf + buf_stride); + xbr = vld1q_s32(buf + buf_stride + 1); + + fives = vaddq_s32(xtl, vaddq_s32(xtr, vaddq_s32(xbr, xbl))); + sixes = vaddq_s32(xt, xb); + fives_plus_sixes = vaddq_s32(fives, sixes); + + return vaddq_s32( + vaddq_s32(vshlq_n_s32(fives_plus_sixes, 2), fives_plus_sixes), sixes); +} + +static INLINE void cross_sum_fast_even_row_inp16(uint16_t *buf, int buf_stride, + int32x4_t *a0, int32x4_t *a1) { + uint16x8_t xtr, xt, xtl, xbr, xb, xbl, xb0; + + xtl = vld1q_u16(buf - buf_stride - 1); + xt = vld1q_u16(buf - buf_stride); + xtr = vld1q_u16(buf - buf_stride + 1); + xbl = vld1q_u16(buf + buf_stride - 1); + xb = vld1q_u16(buf + buf_stride); + xbr = vld1q_u16(buf + buf_stride + 1); + + xbr = vaddq_u16(xbr, xbl); + xtr = vaddq_u16(xtr, xtl); + xbr = vaddq_u16(xbr, xtr); + xtl = vshlq_n_u16(xbr, 2); + xbr = vaddq_u16(xtl, xbr); + + xb = vaddq_u16(xb, xt); + xb0 = vshlq_n_u16(xb, 1); + xb = vshlq_n_u16(xb, 2); + xb = vaddq_u16(xb, xb0); + + *a0 = vreinterpretq_s32_u32( + vaddq_u32(vmovl_u16(vget_low_u16(xbr)), vmovl_u16(vget_low_u16(xb)))); + *a1 = vreinterpretq_s32_u32( + vaddq_u32(vmovl_u16(vget_high_u16(xbr)), vmovl_u16(vget_high_u16(xb)))); +} + +static INLINE int32x4_t cross_sum_fast_odd_row(int32_t *buf) { + int32x4_t xl, x, xr; + int32x4_t fives, sixes, fives_plus_sixes; + + xl = vld1q_s32(buf - 1); + x = vld1q_s32(buf); + xr = vld1q_s32(buf + 1); + fives = vaddq_s32(xl, xr); + sixes = x; + fives_plus_sixes = vaddq_s32(fives, sixes); + + return vaddq_s32( + vaddq_s32(vshlq_n_s32(fives_plus_sixes, 2), fives_plus_sixes), sixes); +} + +static INLINE void cross_sum_fast_odd_row_inp16(uint16_t *buf, int32x4_t *a0, + int32x4_t *a1) { + uint16x8_t xl, x, xr; + uint16x8_t x0; + + xl = vld1q_u16(buf - 1); + x = vld1q_u16(buf); + xr = vld1q_u16(buf + 1); + xl = vaddq_u16(xl, xr); + x0 = vshlq_n_u16(xl, 2); + xl = vaddq_u16(xl, x0); + + x0 = vshlq_n_u16(x, 1); + x = vshlq_n_u16(x, 2); + x = vaddq_u16(x, x0); + + *a0 = vreinterpretq_s32_u32( + vaddq_u32(vmovl_u16(vget_low_u16(xl)), vmovl_u16(vget_low_u16(x)))); + *a1 = vreinterpretq_s32_u32( + vaddq_u32(vmovl_u16(vget_high_u16(xl)), vmovl_u16(vget_high_u16(x)))); +} + +static void final_filter_fast_internal(uint16_t *A, int32_t *B, + const int buf_stride, int16_t *src, + const int src_stride, int32_t *dst, + const int dst_stride, const int width, + const int height) { + int16x8_t s0; + int32_t *B_tmp, *dst_ptr; + uint16_t *A_tmp; + int16_t *src_ptr; + int32x4_t a_res0, a_res1, b_res0, b_res1; + int w, h, count = 0; + assert(SGRPROJ_SGR_BITS == 8); + assert(SGRPROJ_RST_BITS == 4); + + A_tmp = A; + B_tmp = B; + src_ptr = src; + dst_ptr = dst; + h = height; + do { + A_tmp = (A + count * buf_stride); + B_tmp = (B + count * buf_stride); + src_ptr = (src + count * src_stride); + dst_ptr = (dst + count * dst_stride); + w = width; + if (!(count & 1)) { + do { + s0 = vld1q_s16(src_ptr); + cross_sum_fast_even_row_inp16(A_tmp, buf_stride, &a_res0, &a_res1); + a_res0 = vmulq_s32(vmovl_s16(vget_low_s16(s0)), a_res0); + a_res1 = vmulq_s32(vmovl_s16(vget_high_s16(s0)), a_res1); + + b_res0 = cross_sum_fast_even_row(B_tmp, buf_stride); + b_res1 = cross_sum_fast_even_row(B_tmp + 4, buf_stride); + a_res0 = vaddq_s32(a_res0, b_res0); + a_res1 = vaddq_s32(a_res1, b_res1); + + a_res0 = + vrshrq_n_s32(a_res0, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS); + a_res1 = + vrshrq_n_s32(a_res1, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS); + + vst1q_s32(dst_ptr, a_res0); + vst1q_s32(dst_ptr + 4, a_res1); + + A_tmp += 8; + B_tmp += 8; + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w > 0); + } else { + do { + s0 = vld1q_s16(src_ptr); + cross_sum_fast_odd_row_inp16(A_tmp, &a_res0, &a_res1); + a_res0 = vmulq_s32(vmovl_s16(vget_low_s16(s0)), a_res0); + a_res1 = vmulq_s32(vmovl_s16(vget_high_s16(s0)), a_res1); + + b_res0 = cross_sum_fast_odd_row(B_tmp); + b_res1 = cross_sum_fast_odd_row(B_tmp + 4); + a_res0 = vaddq_s32(a_res0, b_res0); + a_res1 = vaddq_s32(a_res1, b_res1); + + a_res0 = + vrshrq_n_s32(a_res0, SGRPROJ_SGR_BITS + NB_ODD - SGRPROJ_RST_BITS); + a_res1 = + vrshrq_n_s32(a_res1, SGRPROJ_SGR_BITS + NB_ODD - SGRPROJ_RST_BITS); + + vst1q_s32(dst_ptr, a_res0); + vst1q_s32(dst_ptr + 4, a_res1); + + A_tmp += 8; + B_tmp += 8; + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w > 0); + } + count++; + h -= 1; + } while (h > 0); +} + +void final_filter_internal(uint16_t *A, int32_t *B, const int buf_stride, + int16_t *src, const int src_stride, int32_t *dst, + const int dst_stride, const int width, + const int height) { + int16x8_t s0; + int32_t *B_tmp, *dst_ptr; + uint16_t *A_tmp; + int16_t *src_ptr; + int32x4_t a_res0, a_res1, b_res0, b_res1; + int w, h, count = 0; + + assert(SGRPROJ_SGR_BITS == 8); + assert(SGRPROJ_RST_BITS == 4); + h = height; + + do { + A_tmp = (A + count * buf_stride); + B_tmp = (B + count * buf_stride); + src_ptr = (src + count * src_stride); + dst_ptr = (dst + count * dst_stride); + w = width; + do { + s0 = vld1q_s16(src_ptr); + cross_sum_inp_u16(A_tmp, buf_stride, &a_res0, &a_res1); + a_res0 = vmulq_s32(vmovl_s16(vget_low_s16(s0)), a_res0); + a_res1 = vmulq_s32(vmovl_s16(vget_high_s16(s0)), a_res1); + + b_res0 = cross_sum_inp_s32(B_tmp, buf_stride); + b_res1 = cross_sum_inp_s32(B_tmp + 4, buf_stride); + a_res0 = vaddq_s32(a_res0, b_res0); + a_res1 = vaddq_s32(a_res1, b_res1); + + a_res0 = + vrshrq_n_s32(a_res0, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS); + a_res1 = + vrshrq_n_s32(a_res1, SGRPROJ_SGR_BITS + NB_EVEN - SGRPROJ_RST_BITS); + vst1q_s32(dst_ptr, a_res0); + vst1q_s32(dst_ptr + 4, a_res1); + + A_tmp += 8; + B_tmp += 8; + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w > 0); + count++; + h -= 1; + } while (h > 0); +} + +static INLINE void restoration_fast_internal(uint16_t *dgd16, int width, + int height, int dgd_stride, + int32_t *dst, int dst_stride, + int bit_depth, int sgr_params_idx, + int radius_idx) { + const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; + const int r = params->r[radius_idx]; + const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; + const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; + + const int buf_stride = ((width_ext + 3) & ~3) + 16; + int32_t A_[RESTORATION_PROC_UNIT_PELS]; + uint16_t A16_[RESTORATION_PROC_UNIT_PELS]; + int32_t B_[RESTORATION_PROC_UNIT_PELS]; + int32_t *square_sum_buf = A_; + int32_t *sum_buf = B_; + uint16_t *tmp16_buf = A16_; + + assert(r <= MAX_RADIUS && "Need MAX_RADIUS >= r"); + assert(r <= SGRPROJ_BORDER_VERT - 1 && r <= SGRPROJ_BORDER_HORZ - 1 && + "Need SGRPROJ_BORDER_* >= r+1"); + + assert(radius_idx == 0); + assert(r == 2); + + // input(dgd16) is 16bit. + // sum of pixels 1st stage output will be in 16bit(tmp16_buf). End output is + // kept in 32bit [sum_buf]. sum of squares output is kept in 32bit + // buffer(square_sum_buf). + boxsum2((int16_t *)(dgd16 - dgd_stride * SGRPROJ_BORDER_VERT - + SGRPROJ_BORDER_HORZ), + dgd_stride, (int16_t *)tmp16_buf, sum_buf, square_sum_buf, buf_stride, + width_ext, height_ext); + + square_sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; + sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; + tmp16_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; + + // Calculation of a, b. a output is in 16bit tmp_buf which is in range of + // [1, 256] for all bit depths. b output is kept in 32bit buffer. + +#if CONFIG_AV1_HIGHBITDEPTH + if (bit_depth > 8) { + calc_ab_fast_internal_hbd( + (square_sum_buf - buf_stride - 1), (tmp16_buf - buf_stride - 1), + (sum_buf - buf_stride - 1), buf_stride * 2, width + 2, height + 2, + bit_depth, r, params->s[radius_idx], 2); + } else { + calc_ab_fast_internal_lbd( + (square_sum_buf - buf_stride - 1), (tmp16_buf - buf_stride - 1), + (sum_buf - buf_stride - 1), buf_stride * 2, width + 2, height + 2, r, + params->s[radius_idx], 2); + } +#else + (void)bit_depth; + calc_ab_fast_internal_lbd((square_sum_buf - buf_stride - 1), + (tmp16_buf - buf_stride - 1), + (sum_buf - buf_stride - 1), buf_stride * 2, + width + 2, height + 2, r, params->s[radius_idx], 2); +#endif + final_filter_fast_internal(tmp16_buf, sum_buf, buf_stride, (int16_t *)dgd16, + dgd_stride, dst, dst_stride, width, height); +} + +static INLINE void restoration_internal(uint16_t *dgd16, int width, int height, + int dgd_stride, int32_t *dst, + int dst_stride, int bit_depth, + int sgr_params_idx, int radius_idx) { + const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; + const int r = params->r[radius_idx]; + const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; + const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; + + int buf_stride = ((width_ext + 3) & ~3) + 16; + int32_t A_[RESTORATION_PROC_UNIT_PELS]; + uint16_t A16_[RESTORATION_PROC_UNIT_PELS]; + uint16_t B16_[RESTORATION_PROC_UNIT_PELS]; + int32_t B_[RESTORATION_PROC_UNIT_PELS]; + int32_t *square_sum_buf = A_; + uint16_t *sum_buf = B16_; + uint16_t *A16 = A16_; + int32_t *B = B_; + + assert(r <= MAX_RADIUS && "Need MAX_RADIUS >= r"); + assert(r <= SGRPROJ_BORDER_VERT - 1 && r <= SGRPROJ_BORDER_HORZ - 1 && + "Need SGRPROJ_BORDER_* >= r+1"); + + assert(radius_idx == 1); + assert(r == 1); + + // input(dgd16) is 16bit. + // sum of pixels output will be in 16bit(sum_buf). + // sum of squares output is kept in 32bit buffer(square_sum_buf). + boxsum1((int16_t *)(dgd16 - dgd_stride * SGRPROJ_BORDER_VERT - + SGRPROJ_BORDER_HORZ), + dgd_stride, sum_buf, square_sum_buf, buf_stride, width_ext, + height_ext); + + square_sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; + B += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; + A16 += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; + sum_buf += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; + +#if CONFIG_AV1_HIGHBITDEPTH + // Calculation of a, b. a output is in 16bit tmp_buf which is in range of + // [1, 256] for all bit depths. b output is kept in 32bit buffer. + if (bit_depth > 8) { + calc_ab_internal_hbd((square_sum_buf - buf_stride - 1), + (A16 - buf_stride - 1), (sum_buf - buf_stride - 1), + (B - buf_stride - 1), buf_stride, width + 2, + height + 2, bit_depth, r, params->s[radius_idx], 1); + } else { + calc_ab_internal_lbd((square_sum_buf - buf_stride - 1), + (A16 - buf_stride - 1), (sum_buf - buf_stride - 1), + (B - buf_stride - 1), buf_stride, width + 2, + height + 2, r, params->s[radius_idx], 1); + } +#else + (void)bit_depth; + calc_ab_internal_lbd((square_sum_buf - buf_stride - 1), + (A16 - buf_stride - 1), (sum_buf - buf_stride - 1), + (B - buf_stride - 1), buf_stride, width + 2, height + 2, + r, params->s[radius_idx], 1); +#endif + final_filter_internal(A16, B, buf_stride, (int16_t *)dgd16, dgd_stride, dst, + dst_stride, width, height); +} + +static INLINE void src_convert_u8_to_u16(const uint8_t *src, + const int src_stride, uint16_t *dst, + const int dst_stride, const int width, + const int height) { + const uint8_t *src_ptr; + uint16_t *dst_ptr; + int h, w, count = 0; + + uint8x8_t t1, t2, t3, t4; + uint16x8_t s1, s2, s3, s4; + h = height; + do { + src_ptr = src + (count << 2) * src_stride; + dst_ptr = dst + (count << 2) * dst_stride; + w = width; + if (w >= 7) { + do { + load_u8_8x4(src_ptr, src_stride, &t1, &t2, &t3, &t4); + s1 = vmovl_u8(t1); + s2 = vmovl_u8(t2); + s3 = vmovl_u8(t3); + s4 = vmovl_u8(t4); + store_u16_8x4(dst_ptr, dst_stride, s1, s2, s3, s4); + + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w > 7); + } + + for (int y = 0; y < w; y++) { + dst_ptr[y] = src_ptr[y]; + dst_ptr[y + 1 * dst_stride] = src_ptr[y + 1 * src_stride]; + dst_ptr[y + 2 * dst_stride] = src_ptr[y + 2 * src_stride]; + dst_ptr[y + 3 * dst_stride] = src_ptr[y + 3 * src_stride]; + } + count++; + h -= 4; + } while (h > 3); + + src_ptr = src + (count << 2) * src_stride; + dst_ptr = dst + (count << 2) * dst_stride; + for (int x = 0; x < h; x++) { + for (int y = 0; y < width; y++) { + dst_ptr[y + x * dst_stride] = src_ptr[y + x * src_stride]; + } + } + + // memset uninitialized rows of src buffer as they are needed for the + // boxsum filter calculation. + for (int x = height; x < height + 5; x++) + memset(dst + x * dst_stride, 0, (width + 2) * sizeof(*dst)); +} + +#if CONFIG_AV1_HIGHBITDEPTH +static INLINE void src_convert_hbd_copy(const uint16_t *src, int src_stride, + uint16_t *dst, const int dst_stride, + int width, int height) { + const uint16_t *src_ptr; + uint16_t *dst_ptr; + int h, w, count = 0; + uint16x8_t s1, s2, s3, s4; + + h = height; + do { + src_ptr = src + (count << 2) * src_stride; + dst_ptr = dst + (count << 2) * dst_stride; + w = width; + do { + load_u16_8x4(src_ptr, src_stride, &s1, &s2, &s3, &s4); + store_u16_8x4(dst_ptr, dst_stride, s1, s2, s3, s4); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w > 7); + + for (int y = 0; y < w; y++) { + dst_ptr[y] = src_ptr[y]; + dst_ptr[y + 1 * dst_stride] = src_ptr[y + 1 * src_stride]; + dst_ptr[y + 2 * dst_stride] = src_ptr[y + 2 * src_stride]; + dst_ptr[y + 3 * dst_stride] = src_ptr[y + 3 * src_stride]; + } + count++; + h -= 4; + } while (h > 3); + + src_ptr = src + (count << 2) * src_stride; + dst_ptr = dst + (count << 2) * dst_stride; + + for (int x = 0; x < h; x++) { + memcpy((dst_ptr + x * dst_stride), (src_ptr + x * src_stride), + sizeof(uint16_t) * width); + } + // memset uninitialized rows of src buffer as they are needed for the + // boxsum filter calculation. + for (int x = height; x < height + 5; x++) + memset(dst + x * dst_stride, 0, (width + 2) * sizeof(*dst)); +} +#endif // CONFIG_AV1_HIGHBITDEPTH + +int av1_selfguided_restoration_neon(const uint8_t *dat8, int width, int height, + int stride, int32_t *flt0, int32_t *flt1, + int flt_stride, int sgr_params_idx, + int bit_depth, int highbd) { + const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; + assert(!(params->r[0] == 0 && params->r[1] == 0)); + + uint16_t dgd16_[RESTORATION_PROC_UNIT_PELS]; + const int dgd16_stride = width + 2 * SGRPROJ_BORDER_HORZ; + uint16_t *dgd16 = + dgd16_ + dgd16_stride * SGRPROJ_BORDER_VERT + SGRPROJ_BORDER_HORZ; + const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; + const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; + const int dgd_stride = stride; + +#if CONFIG_AV1_HIGHBITDEPTH + if (highbd) { + const uint16_t *dgd16_tmp = CONVERT_TO_SHORTPTR(dat8); + src_convert_hbd_copy( + dgd16_tmp - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ, + dgd_stride, + dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ, + dgd16_stride, width_ext, height_ext); + } else { + src_convert_u8_to_u16( + dat8 - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ, + dgd_stride, + dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ, + dgd16_stride, width_ext, height_ext); + } +#else + (void)highbd; + src_convert_u8_to_u16( + dat8 - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ, dgd_stride, + dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ, + dgd16_stride, width_ext, height_ext); +#endif + + if (params->r[0] > 0) + restoration_fast_internal(dgd16, width, height, dgd16_stride, flt0, + flt_stride, bit_depth, sgr_params_idx, 0); + if (params->r[1] > 0) + restoration_internal(dgd16, width, height, dgd16_stride, flt1, flt_stride, + bit_depth, sgr_params_idx, 1); + return 0; +} + +int av1_apply_selfguided_restoration_neon(const uint8_t *dat8, int width, + int height, int stride, int eps, + const int *xqd, uint8_t *dst8, + int dst_stride, int32_t *tmpbuf, + int bit_depth, int highbd) { + int32_t *flt0 = tmpbuf; + int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX; + assert(width * height <= RESTORATION_UNITPELS_MAX); + uint16_t dgd16_[RESTORATION_PROC_UNIT_PELS]; + const int dgd16_stride = width + 2 * SGRPROJ_BORDER_HORZ; + uint16_t *dgd16 = + dgd16_ + dgd16_stride * SGRPROJ_BORDER_VERT + SGRPROJ_BORDER_HORZ; + const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; + const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; + const int dgd_stride = stride; + const sgr_params_type *const params = &av1_sgr_params[eps]; + int xq[2]; + + assert(!(params->r[0] == 0 && params->r[1] == 0)); + +#if CONFIG_AV1_HIGHBITDEPTH + if (highbd) { + const uint16_t *dgd16_tmp = CONVERT_TO_SHORTPTR(dat8); + src_convert_hbd_copy( + dgd16_tmp - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ, + dgd_stride, + dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ, + dgd16_stride, width_ext, height_ext); + } else { + src_convert_u8_to_u16( + dat8 - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ, + dgd_stride, + dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ, + dgd16_stride, width_ext, height_ext); + } +#else + (void)highbd; + src_convert_u8_to_u16( + dat8 - SGRPROJ_BORDER_VERT * dgd_stride - SGRPROJ_BORDER_HORZ, dgd_stride, + dgd16 - SGRPROJ_BORDER_VERT * dgd16_stride - SGRPROJ_BORDER_HORZ, + dgd16_stride, width_ext, height_ext); +#endif + if (params->r[0] > 0) + restoration_fast_internal(dgd16, width, height, dgd16_stride, flt0, width, + bit_depth, eps, 0); + if (params->r[1] > 0) + restoration_internal(dgd16, width, height, dgd16_stride, flt1, width, + bit_depth, eps, 1); + + av1_decode_xq(xqd, xq, params); + + { + int16_t *src_ptr; + uint8_t *dst_ptr; +#if CONFIG_AV1_HIGHBITDEPTH + uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst8); + uint16_t *dst16_ptr; +#endif + int16x4_t d0, d4; + int16x8_t r0, s0; + uint16x8_t r4; + int32x4_t u0, u4, v0, v4, f00, f10; + uint8x8_t t0; + int count = 0, w = width, h = height, rc = 0; + + const int32x4_t xq0_vec = vdupq_n_s32(xq[0]); + const int32x4_t xq1_vec = vdupq_n_s32(xq[1]); + const int16x8_t zero = vdupq_n_s16(0); + const uint16x8_t max = vdupq_n_u16((1 << bit_depth) - 1); + src_ptr = (int16_t *)dgd16; + do { + w = width; + count = 0; + dst_ptr = dst8 + rc * dst_stride; +#if CONFIG_AV1_HIGHBITDEPTH + dst16_ptr = dst16 + rc * dst_stride; +#endif + do { + s0 = vld1q_s16(src_ptr + count); + + u0 = vshll_n_s16(vget_low_s16(s0), SGRPROJ_RST_BITS); + u4 = vshll_n_s16(vget_high_s16(s0), SGRPROJ_RST_BITS); + + v0 = vshlq_n_s32(u0, SGRPROJ_PRJ_BITS); + v4 = vshlq_n_s32(u4, SGRPROJ_PRJ_BITS); + + if (params->r[0] > 0) { + f00 = vld1q_s32(flt0 + count); + f10 = vld1q_s32(flt0 + count + 4); + + f00 = vsubq_s32(f00, u0); + f10 = vsubq_s32(f10, u4); + + v0 = vmlaq_s32(v0, xq0_vec, f00); + v4 = vmlaq_s32(v4, xq0_vec, f10); + } + + if (params->r[1] > 0) { + f00 = vld1q_s32(flt1 + count); + f10 = vld1q_s32(flt1 + count + 4); + + f00 = vsubq_s32(f00, u0); + f10 = vsubq_s32(f10, u4); + + v0 = vmlaq_s32(v0, xq1_vec, f00); + v4 = vmlaq_s32(v4, xq1_vec, f10); + } + + d0 = vqrshrn_n_s32(v0, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); + d4 = vqrshrn_n_s32(v4, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); + + r0 = vcombine_s16(d0, d4); + + r4 = vreinterpretq_u16_s16(vmaxq_s16(r0, zero)); + +#if CONFIG_AV1_HIGHBITDEPTH + if (highbd) { + r4 = vminq_u16(r4, max); + vst1q_u16(dst16_ptr, r4); + dst16_ptr += 8; + } else { + t0 = vqmovn_u16(r4); + vst1_u8(dst_ptr, t0); + dst_ptr += 8; + } +#else + (void)max; + t0 = vqmovn_u16(r4); + vst1_u8(dst_ptr, t0); + dst_ptr += 8; +#endif + w -= 8; + count += 8; + } while (w > 0); + + src_ptr += dgd16_stride; + flt1 += width; + flt0 += width; + rc++; + h--; + } while (h > 0); + } + return 0; +} diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.c b/third_party/aom/av1/common/arm/warp_plane_neon.c new file mode 100644 index 0000000000..4723154398 --- /dev/null +++ b/third_party/aom/av1/common/arm/warp_plane_neon.c @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include "warp_plane_neon.h" + +static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, + int alpha) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + // Loading the 8 filter taps + int16x8_t f[4]; + load_filters_4(f, sx, alpha); + + int16x8_t in16_lo = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(in))); + int16x8_t in16_hi = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(in))); + + int16x8_t m0 = vmulq_s16(f[0], in16_lo); + int16x8_t m1 = vmulq_s16(f[1], vextq_s16(in16_lo, in16_hi, 1)); + int16x8_t m2 = vmulq_s16(f[2], vextq_s16(in16_lo, in16_hi, 2)); + int16x8_t m3 = vmulq_s16(f[3], vextq_s16(in16_lo, in16_hi, 3)); + + int32x4_t m0123_pairs[] = { vpaddlq_s16(m0), vpaddlq_s16(m1), vpaddlq_s16(m2), + vpaddlq_s16(m3) }; + + int32x4_t tmp_res_low = horizontal_add_4d_s32x4(m0123_pairs); + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + + uint16x8_t res = + vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), vdup_n_u16(0)); + return vreinterpretq_s16_u16(res); +} + +static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, + int alpha) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + // Loading the 8 filter taps + int16x8_t f[8]; + load_filters_8(f, sx, alpha); + + int16x8_t in16_lo = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(in))); + int16x8_t in16_hi = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(in))); + + int16x8_t m0 = vmulq_s16(f[0], in16_lo); + int16x8_t m1 = vmulq_s16(f[1], vextq_s16(in16_lo, in16_hi, 1)); + int16x8_t m2 = vmulq_s16(f[2], vextq_s16(in16_lo, in16_hi, 2)); + int16x8_t m3 = vmulq_s16(f[3], vextq_s16(in16_lo, in16_hi, 3)); + int16x8_t m4 = vmulq_s16(f[4], vextq_s16(in16_lo, in16_hi, 4)); + int16x8_t m5 = vmulq_s16(f[5], vextq_s16(in16_lo, in16_hi, 5)); + int16x8_t m6 = vmulq_s16(f[6], vextq_s16(in16_lo, in16_hi, 6)); + int16x8_t m7 = vmulq_s16(f[7], vextq_s16(in16_lo, in16_hi, 7)); + + int32x4_t m0123_pairs[] = { vpaddlq_s16(m0), vpaddlq_s16(m1), vpaddlq_s16(m2), + vpaddlq_s16(m3) }; + int32x4_t m4567_pairs[] = { vpaddlq_s16(m4), vpaddlq_s16(m5), vpaddlq_s16(m6), + vpaddlq_s16(m7) }; + + int32x4_t tmp_res_low = horizontal_add_4d_s32x4(m0123_pairs); + int32x4_t tmp_res_high = horizontal_add_4d_s32x4(m4567_pairs); + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + tmp_res_high = vaddq_s32(tmp_res_high, add_const); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), + vqrshrun_n_s32(tmp_res_high, ROUND0_BITS)); + return vreinterpretq_s16_u16(res); +} + +static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + int16x8_t f_s16 = + vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS))); + + int16x8_t in16_lo = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(in))); + int16x8_t in16_hi = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(in))); + + int16x8_t m0 = vmulq_s16(f_s16, in16_lo); + int16x8_t m1 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 1)); + int16x8_t m2 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 2)); + int16x8_t m3 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 3)); + + int32x4_t m0123_pairs[] = { vpaddlq_s16(m0), vpaddlq_s16(m1), vpaddlq_s16(m2), + vpaddlq_s16(m3) }; + + int32x4_t tmp_res_low = horizontal_add_4d_s32x4(m0123_pairs); + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + + uint16x8_t res = + vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), vdup_n_u16(0)); + return vreinterpretq_s16_u16(res); +} + +static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + int16x8_t f_s16 = + vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS))); + + int16x8_t in16_lo = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(in))); + int16x8_t in16_hi = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(in))); + + int16x8_t m0 = vmulq_s16(f_s16, in16_lo); + int16x8_t m1 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 1)); + int16x8_t m2 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 2)); + int16x8_t m3 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 3)); + int16x8_t m4 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 4)); + int16x8_t m5 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 5)); + int16x8_t m6 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 6)); + int16x8_t m7 = vmulq_s16(f_s16, vextq_s16(in16_lo, in16_hi, 7)); + + int32x4_t m0123_pairs[] = { vpaddlq_s16(m0), vpaddlq_s16(m1), vpaddlq_s16(m2), + vpaddlq_s16(m3) }; + int32x4_t m4567_pairs[] = { vpaddlq_s16(m4), vpaddlq_s16(m5), vpaddlq_s16(m6), + vpaddlq_s16(m7) }; + + int32x4_t tmp_res_low = horizontal_add_4d_s32x4(m0123_pairs); + int32x4_t tmp_res_high = horizontal_add_4d_s32x4(m4567_pairs); + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + tmp_res_high = vaddq_s32(tmp_res_high, add_const); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), + vqrshrun_n_s32(tmp_res_high, ROUND0_BITS)); + return vreinterpretq_s16_u16(res); +} + +static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, + int sy) { + int16x4_t s0 = vget_low_s16(src[0]); + int16x4_t s1 = vget_low_s16(src[1]); + int16x4_t s2 = vget_low_s16(src[2]); + int16x4_t s3 = vget_low_s16(src[3]); + int16x4_t s4 = vget_low_s16(src[4]); + int16x4_t s5 = vget_low_s16(src[5]); + int16x4_t s6 = vget_low_s16(src[6]); + int16x4_t s7 = vget_low_s16(src[7]); + + int16x8_t f = + vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS))); + + int32x4_t m0123 = vmull_lane_s16(s0, vget_low_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, s1, vget_low_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, s2, vget_low_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, s3, vget_low_s16(f), 3); + m0123 = vmlal_lane_s16(m0123, s4, vget_high_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, s5, vget_high_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, s6, vget_high_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, s7, vget_high_s16(f), 3); + + *res = m0123; +} + +static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, + int sy, int gamma) { + int16x8_t s0, s1, s2, s3; + transpose_elems_s16_4x8( + vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]), + vget_low_s16(src[3]), vget_low_s16(src[4]), vget_low_s16(src[5]), + vget_low_s16(src[6]), vget_low_s16(src[7]), &s0, &s1, &s2, &s3); + + int16x8_t f[4]; + load_filters_4(f, sy, gamma); + + int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); + m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0])); + int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1])); + m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1])); + int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2])); + m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2])); + int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3])); + m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3])); + + int32x4_t m0123_pairs[] = { m0, m1, m2, m3 }; + + *res = horizontal_add_4d_s32x4(m0123_pairs); +} + +static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy) { + int16x8_t s0 = src[0]; + int16x8_t s1 = src[1]; + int16x8_t s2 = src[2]; + int16x8_t s3 = src[3]; + int16x8_t s4 = src[4]; + int16x8_t s5 = src[5]; + int16x8_t s6 = src[6]; + int16x8_t s7 = src[7]; + + int16x8_t f = + vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS))); + + int32x4_t m0123 = vmull_lane_s16(vget_low_s16(s0), vget_low_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s1), vget_low_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s2), vget_low_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s3), vget_low_s16(f), 3); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s4), vget_high_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s5), vget_high_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s6), vget_high_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s7), vget_high_s16(f), 3); + + int32x4_t m4567 = vmull_lane_s16(vget_high_s16(s0), vget_low_s16(f), 0); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s1), vget_low_s16(f), 1); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s2), vget_low_s16(f), 2); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s3), vget_low_s16(f), 3); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s4), vget_high_s16(f), 0); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s5), vget_high_s16(f), 1); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s6), vget_high_s16(f), 2); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s7), vget_high_s16(f), 3); + + *res_low = m0123; + *res_high = m4567; +} + +static INLINE void vertical_filter_8x1_f8(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy, + int gamma) { + int16x8_t s0 = src[0]; + int16x8_t s1 = src[1]; + int16x8_t s2 = src[2]; + int16x8_t s3 = src[3]; + int16x8_t s4 = src[4]; + int16x8_t s5 = src[5]; + int16x8_t s6 = src[6]; + int16x8_t s7 = src[7]; + transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); + + int16x8_t f[8]; + load_filters_8(f, sy, gamma); + + int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); + m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0])); + int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1])); + m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1])); + int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2])); + m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2])); + int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3])); + m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3])); + int32x4_t m4 = vmull_s16(vget_low_s16(s4), vget_low_s16(f[4])); + m4 = vmlal_s16(m4, vget_high_s16(s4), vget_high_s16(f[4])); + int32x4_t m5 = vmull_s16(vget_low_s16(s5), vget_low_s16(f[5])); + m5 = vmlal_s16(m5, vget_high_s16(s5), vget_high_s16(f[5])); + int32x4_t m6 = vmull_s16(vget_low_s16(s6), vget_low_s16(f[6])); + m6 = vmlal_s16(m6, vget_high_s16(s6), vget_high_s16(f[6])); + int32x4_t m7 = vmull_s16(vget_low_s16(s7), vget_low_s16(f[7])); + m7 = vmlal_s16(m7, vget_high_s16(s7), vget_high_s16(f[7])); + + int32x4_t m0123_pairs[] = { m0, m1, m2, m3 }; + int32x4_t m4567_pairs[] = { m4, m5, m6, m7 }; + + *res_low = horizontal_add_4d_s32x4(m0123_pairs); + *res_high = horizontal_add_4d_s32x4(m4567_pairs); +} + +void av1_warp_affine_neon(const int32_t *mat, const uint8_t *ref, int width, + int height, int stride, uint8_t *pred, int p_col, + int p_row, int p_width, int p_height, int p_stride, + int subsampling_x, int subsampling_y, + ConvolveParams *conv_params, int16_t alpha, + int16_t beta, int16_t gamma, int16_t delta) { + av1_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row, + p_width, p_height, p_stride, subsampling_x, + subsampling_y, conv_params, alpha, beta, gamma, delta); +} diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.h b/third_party/aom/av1/common/arm/warp_plane_neon.h new file mode 100644 index 0000000000..5afd72f4ab --- /dev/null +++ b/third_party/aom/av1/common/arm/warp_plane_neon.h @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ +#ifndef AOM_AV1_COMMON_ARM_WARP_PLANE_NEON_H_ +#define AOM_AV1_COMMON_ARM_WARP_PLANE_NEON_H_ + +#include <assert.h> +#include <arm_neon.h> +#include <memory.h> +#include <math.h> + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/sum_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "aom_ports/mem.h" +#include "config/av1_rtcd.h" +#include "av1/common/warped_motion.h" +#include "av1/common/scale.h" + +static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, + int alpha); + +static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, + int alpha); + +static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx); + +static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx); + +static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, + int sy); + +static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, + int sy, int gamma); + +static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy); + +static INLINE void vertical_filter_8x1_f8(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy, + int gamma); + +static INLINE void load_filters_4(int16x8_t out[], int offset, int stride) { + out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[2] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 2 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[3] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 3 * stride) >> + WARPEDDIFF_PREC_BITS))); +} + +static INLINE void load_filters_8(int16x8_t out[], int offset, int stride) { + out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[2] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 2 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[3] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 3 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[4] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 4 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[5] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 5 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[6] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 6 * stride) >> + WARPEDDIFF_PREC_BITS))); + out[7] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 7 * stride) >> + WARPEDDIFF_PREC_BITS))); +} + +static INLINE int clamp_iy(int iy, int height) { + return clamp(iy, 0, height - 1); +} + +static INLINE void warp_affine_horizontal(const uint8_t *ref, int width, + int height, int stride, int p_width, + int p_height, int16_t alpha, + int16_t beta, const int64_t x4, + const int64_t y4, const int i, + int16x8_t tmp[]) { + const int bd = 8; + const int reduce_bits_horiz = ROUND0_BITS; + const int height_limit = AOMMIN(8, p_height - i) + 7; + + int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS); + int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS); + + int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1); + sx4 += alpha * (-4) + beta * (-4) + (1 << (WARPEDDIFF_PREC_BITS - 1)) + + (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS); + sx4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1); + + if (ix4 <= -7) { + for (int k = 0; k < height_limit; ++k) { + int iy = clamp_iy(iy4 + k - 7, height); + int16_t dup_val = + (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1)) + + ref[iy * stride] * (1 << (FILTER_BITS - reduce_bits_horiz)); + tmp[k] = vdupq_n_s16(dup_val); + } + return; + } else if (ix4 >= width + 6) { + for (int k = 0; k < height_limit; ++k) { + int iy = clamp_iy(iy4 + k - 7, height); + int16_t dup_val = (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1)) + + ref[iy * stride + (width - 1)] * + (1 << (FILTER_BITS - reduce_bits_horiz)); + tmp[k] = vdupq_n_s16(dup_val); + } + return; + } + + static const uint8_t kIotaArr[] = { 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15 }; + const uint8x16_t indx = vld1q_u8(kIotaArr); + + const int out_of_boundary_left = -(ix4 - 6); + const int out_of_boundary_right = (ix4 + 8) - width; + +#define APPLY_HORIZONTAL_SHIFT(fn, ...) \ + do { \ + if (out_of_boundary_left >= 0 || out_of_boundary_right >= 0) { \ + for (int k = 0; k < height_limit; ++k) { \ + const int iy = clamp_iy(iy4 + k - 7, height); \ + const uint8_t *src = ref + iy * stride + ix4 - 7; \ + uint8x16_t src_1 = vld1q_u8(src); \ + \ + if (out_of_boundary_left >= 0) { \ + int limit = out_of_boundary_left + 1; \ + uint8x16_t cmp_vec = vdupq_n_u8(out_of_boundary_left); \ + uint8x16_t vec_dup = vdupq_n_u8(*(src + limit)); \ + uint8x16_t mask_val = vcleq_u8(indx, cmp_vec); \ + src_1 = vbslq_u8(mask_val, vec_dup, src_1); \ + } \ + if (out_of_boundary_right >= 0) { \ + int limit = 15 - (out_of_boundary_right + 1); \ + uint8x16_t cmp_vec = vdupq_n_u8(15 - out_of_boundary_right); \ + uint8x16_t vec_dup = vdupq_n_u8(*(src + limit)); \ + uint8x16_t mask_val = vcgeq_u8(indx, cmp_vec); \ + src_1 = vbslq_u8(mask_val, vec_dup, src_1); \ + } \ + tmp[k] = (fn)(src_1, __VA_ARGS__); \ + } \ + } else { \ + for (int k = 0; k < height_limit; ++k) { \ + const int iy = clamp_iy(iy4 + k - 7, height); \ + const uint8_t *src = ref + iy * stride + ix4 - 7; \ + uint8x16_t src_1 = vld1q_u8(src); \ + tmp[k] = (fn)(src_1, __VA_ARGS__); \ + } \ + } \ + } while (0) + + if (p_width == 4) { + if (beta == 0) { + if (alpha == 0) { + APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f1, sx4); + } else { + APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f4, sx4, alpha); + } + } else { + if (alpha == 0) { + APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f1, + (sx4 + beta * (k - 3))); + } else { + APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f4, (sx4 + beta * (k - 3)), + alpha); + } + } + } else { + if (beta == 0) { + if (alpha == 0) { + APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f1, sx4); + } else { + APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f8, sx4, alpha); + } + } else { + if (alpha == 0) { + APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f1, + (sx4 + beta * (k - 3))); + } else { + APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f8, (sx4 + beta * (k - 3)), + alpha); + } + } + } +} + +static INLINE void warp_affine_vertical( + uint8_t *pred, int p_width, int p_height, int p_stride, int is_compound, + uint16_t *dst, int dst_stride, int do_average, int use_dist_wtd_comp_avg, + int16_t gamma, int16_t delta, const int64_t y4, const int i, const int j, + int16x8_t tmp[], const int fwd, const int bwd) { + const int bd = 8; + const int reduce_bits_horiz = ROUND0_BITS; + const int offset_bits_vert = bd + 2 * FILTER_BITS - reduce_bits_horiz; + int add_const_vert; + if (is_compound) { + add_const_vert = + (1 << offset_bits_vert) + (1 << (COMPOUND_ROUND1_BITS - 1)); + } else { + add_const_vert = + (1 << offset_bits_vert) + (1 << (2 * FILTER_BITS - ROUND0_BITS - 1)); + } + const int sub_constant = (1 << (bd - 1)) + (1 << bd); + + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int res_sub_const = + (1 << (2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS - 1)) - + (1 << (offset_bits - COMPOUND_ROUND1_BITS)) - + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + + int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1); + sy4 += gamma * (-4) + delta * (-4) + (1 << (WARPEDDIFF_PREC_BITS - 1)) + + (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS); + sy4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1); + + if (p_width > 4) { + for (int k = -4; k < AOMMIN(4, p_height - i - 4); ++k) { + int sy = sy4 + delta * (k + 4); + const int16x8_t *v_src = tmp + (k + 4); + + int32x4_t res_lo, res_hi; + if (gamma == 0) { + vertical_filter_8x1_f1(v_src, &res_lo, &res_hi, sy); + } else { + vertical_filter_8x1_f8(v_src, &res_lo, &res_hi, sy, gamma); + } + + res_lo = vaddq_s32(res_lo, vdupq_n_s32(add_const_vert)); + res_hi = vaddq_s32(res_hi, vdupq_n_s32(add_const_vert)); + + if (is_compound) { + uint16_t *const p = (uint16_t *)&dst[(i + k + 4) * dst_stride + j]; + int16x8_t res_s16 = + vcombine_s16(vshrn_n_s32(res_lo, COMPOUND_ROUND1_BITS), + vshrn_n_s32(res_hi, COMPOUND_ROUND1_BITS)); + if (do_average) { + int16x8_t tmp16 = vreinterpretq_s16_u16(vld1q_u16(p)); + if (use_dist_wtd_comp_avg) { + int32x4_t tmp32_lo = vmull_n_s16(vget_low_s16(tmp16), fwd); + int32x4_t tmp32_hi = vmull_n_s16(vget_high_s16(tmp16), fwd); + tmp32_lo = vmlal_n_s16(tmp32_lo, vget_low_s16(res_s16), bwd); + tmp32_hi = vmlal_n_s16(tmp32_hi, vget_high_s16(res_s16), bwd); + tmp16 = vcombine_s16(vshrn_n_s32(tmp32_lo, DIST_PRECISION_BITS), + vshrn_n_s32(tmp32_hi, DIST_PRECISION_BITS)); + } else { + tmp16 = vhaddq_s16(tmp16, res_s16); + } + int16x8_t res = vaddq_s16(tmp16, vdupq_n_s16(res_sub_const)); + uint8x8_t res8 = vqshrun_n_s16( + res, 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS); + vst1_u8(&pred[(i + k + 4) * p_stride + j], res8); + } else { + vst1q_u16(p, vreinterpretq_u16_s16(res_s16)); + } + } else { + int16x8_t res16 = + vcombine_s16(vshrn_n_s32(res_lo, 2 * FILTER_BITS - ROUND0_BITS), + vshrn_n_s32(res_hi, 2 * FILTER_BITS - ROUND0_BITS)); + res16 = vsubq_s16(res16, vdupq_n_s16(sub_constant)); + + uint8_t *const p = (uint8_t *)&pred[(i + k + 4) * p_stride + j]; + vst1_u8(p, vqmovun_s16(res16)); + } + } + } else { + // p_width == 4 + for (int k = -4; k < AOMMIN(4, p_height - i - 4); ++k) { + int sy = sy4 + delta * (k + 4); + const int16x8_t *v_src = tmp + (k + 4); + + int32x4_t res_lo; + if (gamma == 0) { + vertical_filter_4x1_f1(v_src, &res_lo, sy); + } else { + vertical_filter_4x1_f4(v_src, &res_lo, sy, gamma); + } + + res_lo = vaddq_s32(res_lo, vdupq_n_s32(add_const_vert)); + + if (is_compound) { + uint16_t *const p = (uint16_t *)&dst[(i + k + 4) * dst_stride + j]; + + int16x4_t res_lo_s16 = vshrn_n_s32(res_lo, COMPOUND_ROUND1_BITS); + if (do_average) { + uint8_t *const dst8 = &pred[(i + k + 4) * p_stride + j]; + int16x4_t tmp16_lo = vreinterpret_s16_u16(vld1_u16(p)); + if (use_dist_wtd_comp_avg) { + int32x4_t tmp32_lo = vmull_n_s16(tmp16_lo, fwd); + tmp32_lo = vmlal_n_s16(tmp32_lo, res_lo_s16, bwd); + tmp16_lo = vshrn_n_s32(tmp32_lo, DIST_PRECISION_BITS); + } else { + tmp16_lo = vhadd_s16(tmp16_lo, res_lo_s16); + } + int16x4_t res = vadd_s16(tmp16_lo, vdup_n_s16(res_sub_const)); + uint8x8_t res8 = vqshrun_n_s16( + vcombine_s16(res, vdup_n_s16(0)), + 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS); + vst1_lane_u32((uint32_t *)dst8, vreinterpret_u32_u8(res8), 0); + } else { + uint16x4_t res_u16_low = vreinterpret_u16_s16(res_lo_s16); + vst1_u16(p, res_u16_low); + } + } else { + int16x4_t res16 = vshrn_n_s32(res_lo, 2 * FILTER_BITS - ROUND0_BITS); + res16 = vsub_s16(res16, vdup_n_s16(sub_constant)); + + uint8_t *const p = (uint8_t *)&pred[(i + k + 4) * p_stride + j]; + uint8x8_t val = vqmovun_s16(vcombine_s16(res16, vdup_n_s16(0))); + vst1_lane_u32((uint32_t *)p, vreinterpret_u32_u8(val), 0); + } + } + } +} + +static INLINE void av1_warp_affine_common( + const int32_t *mat, const uint8_t *ref, int width, int height, int stride, + uint8_t *pred, int p_col, int p_row, int p_width, int p_height, + int p_stride, int subsampling_x, int subsampling_y, + ConvolveParams *conv_params, int16_t alpha, int16_t beta, int16_t gamma, + int16_t delta) { + const int w0 = conv_params->fwd_offset; + const int w1 = conv_params->bck_offset; + const int is_compound = conv_params->is_compound; + uint16_t *const dst = conv_params->dst; + const int dst_stride = conv_params->dst_stride; + const int do_average = conv_params->do_average; + const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg; + + assert(IMPLIES(is_compound, dst != NULL)); + assert(IMPLIES(do_average, is_compound)); + + for (int i = 0; i < p_height; i += 8) { + for (int j = 0; j < p_width; j += 8) { + const int32_t src_x = (p_col + j + 4) << subsampling_x; + const int32_t src_y = (p_row + i + 4) << subsampling_y; + const int64_t dst_x = + (int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0]; + const int64_t dst_y = + (int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1]; + + const int64_t x4 = dst_x >> subsampling_x; + const int64_t y4 = dst_y >> subsampling_y; + + int16x8_t tmp[15]; + warp_affine_horizontal(ref, width, height, stride, p_width, p_height, + alpha, beta, x4, y4, i, tmp); + warp_affine_vertical(pred, p_width, p_height, p_stride, is_compound, dst, + dst_stride, do_average, use_dist_wtd_comp_avg, gamma, + delta, y4, i, j, tmp, w0, w1); + } + } +} + +#endif // AOM_AV1_COMMON_ARM_WARP_PLANE_NEON_H_ diff --git a/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c new file mode 100644 index 0000000000..39e3ad99f4 --- /dev/null +++ b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include "warp_plane_neon.h" + +DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = { + 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, + 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, + 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 +}; + +static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, + int alpha) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + // Loading the 8 filter taps + int16x8_t f[4]; + load_filters_4(f, sx, alpha); + + int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1])); + int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3])); + + uint8x8_t in0 = vget_low_u8(in); + uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1)); + uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2)); + uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3)); + + int32x4_t m01 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in0, in1), f01_u8); + int32x4_t m23 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in2, in3), f23_u8); + + int32x4_t tmp_res_low = vpaddq_s32(m01, m23); + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + + uint16x8_t res = + vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), vdup_n_u16(0)); + return vreinterpretq_s16_u16(res); +} + +static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, + int alpha) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + // Loading the 8 filter taps + int16x8_t f[8]; + load_filters_8(f, sx, alpha); + + int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1])); + int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3])); + int8x16_t f45_u8 = vcombine_s8(vmovn_s16(f[4]), vmovn_s16(f[5])); + int8x16_t f67_u8 = vcombine_s8(vmovn_s16(f[6]), vmovn_s16(f[7])); + + uint8x8_t in0 = vget_low_u8(in); + uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1)); + uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2)); + uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3)); + uint8x8_t in4 = vget_low_u8(vextq_u8(in, in, 4)); + uint8x8_t in5 = vget_low_u8(vextq_u8(in, in, 5)); + uint8x8_t in6 = vget_low_u8(vextq_u8(in, in, 6)); + uint8x8_t in7 = vget_low_u8(vextq_u8(in, in, 7)); + + int32x4_t m01 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in0, in1), f01_u8); + int32x4_t m23 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in2, in3), f23_u8); + int32x4_t m45 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in4, in5), f45_u8); + int32x4_t m67 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in6, in7), f67_u8); + + int32x4_t tmp_res_low = vpaddq_s32(m01, m23); + int32x4_t tmp_res_high = vpaddq_s32(m45, m67); + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + tmp_res_high = vaddq_s32(tmp_res_high, add_const); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), + vqrshrun_n_s32(tmp_res_high, ROUND0_BITS)); + return vreinterpretq_s16_u16(res); +} + +static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + int16x8_t f_s16 = + vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS))); + + int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16)); + + uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]); + uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + uint8x16_t in_0123 = vqtbl1q_u8(in, perm0); + uint8x16_t in_4567 = vqtbl1q_u8(in, perm1); + + int32x4_t m0123 = vusdotq_laneq_s32(vdupq_n_s32(0), in_0123, f_s8, 0); + m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1); + + int32x4_t tmp_res_low = m0123; + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + + uint16x8_t res = + vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), vdup_n_u16(0)); + return vreinterpretq_s16_u16(res); +} + +static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + int16x8_t f_s16 = + vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS))); + + int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16)); + + uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]); + uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]); + uint8x16_t perm2 = vld1q_u8(&usdot_permute_idx[32]); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + uint8x16_t in_0123 = vqtbl1q_u8(in, perm0); + uint8x16_t in_4567 = vqtbl1q_u8(in, perm1); + uint8x16_t in_89ab = vqtbl1q_u8(in, perm2); + + int32x4_t m0123 = vusdotq_laneq_s32(vdupq_n_s32(0), in_0123, f_s8, 0); + m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1); + + int32x4_t m4567 = vusdotq_laneq_s32(vdupq_n_s32(0), in_4567, f_s8, 0); + m4567 = vusdotq_laneq_s32(m4567, in_89ab, f_s8, 1); + + int32x4_t tmp_res_low = m0123; + int32x4_t tmp_res_high = m4567; + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + tmp_res_high = vaddq_s32(tmp_res_high, add_const); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), + vqrshrun_n_s32(tmp_res_high, ROUND0_BITS)); + return vreinterpretq_s16_u16(res); +} + +static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, + int sy) { + int16x4_t s0 = vget_low_s16(src[0]); + int16x4_t s1 = vget_low_s16(src[1]); + int16x4_t s2 = vget_low_s16(src[2]); + int16x4_t s3 = vget_low_s16(src[3]); + int16x4_t s4 = vget_low_s16(src[4]); + int16x4_t s5 = vget_low_s16(src[5]); + int16x4_t s6 = vget_low_s16(src[6]); + int16x4_t s7 = vget_low_s16(src[7]); + + int16x8_t f = + vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS))); + + int32x4_t m0123 = vmull_lane_s16(s0, vget_low_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, s1, vget_low_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, s2, vget_low_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, s3, vget_low_s16(f), 3); + m0123 = vmlal_lane_s16(m0123, s4, vget_high_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, s5, vget_high_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, s6, vget_high_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, s7, vget_high_s16(f), 3); + + *res = m0123; +} + +static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, + int sy, int gamma) { + int16x8_t s0, s1, s2, s3; + transpose_elems_s16_4x8( + vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]), + vget_low_s16(src[3]), vget_low_s16(src[4]), vget_low_s16(src[5]), + vget_low_s16(src[6]), vget_low_s16(src[7]), &s0, &s1, &s2, &s3); + + int16x8_t f[4]; + load_filters_4(f, sy, gamma); + + int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); + m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0])); + int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1])); + m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1])); + int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2])); + m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2])); + int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3])); + m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3])); + + int32x4_t m0123_pairs[] = { m0, m1, m2, m3 }; + + *res = horizontal_add_4d_s32x4(m0123_pairs); +} + +static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy) { + int16x8_t s0 = src[0]; + int16x8_t s1 = src[1]; + int16x8_t s2 = src[2]; + int16x8_t s3 = src[3]; + int16x8_t s4 = src[4]; + int16x8_t s5 = src[5]; + int16x8_t s6 = src[6]; + int16x8_t s7 = src[7]; + + int16x8_t f = + vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS))); + + int32x4_t m0123 = vmull_lane_s16(vget_low_s16(s0), vget_low_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s1), vget_low_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s2), vget_low_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s3), vget_low_s16(f), 3); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s4), vget_high_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s5), vget_high_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s6), vget_high_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s7), vget_high_s16(f), 3); + + int32x4_t m4567 = vmull_lane_s16(vget_high_s16(s0), vget_low_s16(f), 0); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s1), vget_low_s16(f), 1); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s2), vget_low_s16(f), 2); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s3), vget_low_s16(f), 3); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s4), vget_high_s16(f), 0); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s5), vget_high_s16(f), 1); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s6), vget_high_s16(f), 2); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s7), vget_high_s16(f), 3); + + *res_low = m0123; + *res_high = m4567; +} + +static INLINE void vertical_filter_8x1_f8(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy, + int gamma) { + int16x8_t s0 = src[0]; + int16x8_t s1 = src[1]; + int16x8_t s2 = src[2]; + int16x8_t s3 = src[3]; + int16x8_t s4 = src[4]; + int16x8_t s5 = src[5]; + int16x8_t s6 = src[6]; + int16x8_t s7 = src[7]; + transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); + + int16x8_t f[8]; + load_filters_8(f, sy, gamma); + + int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); + m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0])); + int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1])); + m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1])); + int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2])); + m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2])); + int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3])); + m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3])); + int32x4_t m4 = vmull_s16(vget_low_s16(s4), vget_low_s16(f[4])); + m4 = vmlal_s16(m4, vget_high_s16(s4), vget_high_s16(f[4])); + int32x4_t m5 = vmull_s16(vget_low_s16(s5), vget_low_s16(f[5])); + m5 = vmlal_s16(m5, vget_high_s16(s5), vget_high_s16(f[5])); + int32x4_t m6 = vmull_s16(vget_low_s16(s6), vget_low_s16(f[6])); + m6 = vmlal_s16(m6, vget_high_s16(s6), vget_high_s16(f[6])); + int32x4_t m7 = vmull_s16(vget_low_s16(s7), vget_low_s16(f[7])); + m7 = vmlal_s16(m7, vget_high_s16(s7), vget_high_s16(f[7])); + + int32x4_t m0123_pairs[] = { m0, m1, m2, m3 }; + int32x4_t m4567_pairs[] = { m4, m5, m6, m7 }; + + *res_low = horizontal_add_4d_s32x4(m0123_pairs); + *res_high = horizontal_add_4d_s32x4(m4567_pairs); +} + +void av1_warp_affine_neon_i8mm(const int32_t *mat, const uint8_t *ref, + int width, int height, int stride, uint8_t *pred, + int p_col, int p_row, int p_width, int p_height, + int p_stride, int subsampling_x, + int subsampling_y, ConvolveParams *conv_params, + int16_t alpha, int16_t beta, int16_t gamma, + int16_t delta) { + av1_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row, + p_width, p_height, p_stride, subsampling_x, + subsampling_y, conv_params, alpha, beta, gamma, delta); +} diff --git a/third_party/aom/av1/common/arm/warp_plane_sve.c b/third_party/aom/av1/common/arm/warp_plane_sve.c new file mode 100644 index 0000000000..8a4bf5747b --- /dev/null +++ b/third_party/aom/av1/common/arm/warp_plane_sve.c @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> + +#include "aom_dsp/arm/dot_sve.h" +#include "warp_plane_neon.h" + +DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = { + 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, + 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, + 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 +}; + +static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, + int alpha) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + // Loading the 8 filter taps + int16x8_t f[4]; + load_filters_4(f, sx, alpha); + + int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1])); + int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3])); + + uint8x8_t in0 = vget_low_u8(in); + uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1)); + uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2)); + uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3)); + + int32x4_t m01 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in0, in1), f01_u8); + int32x4_t m23 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in2, in3), f23_u8); + + int32x4_t tmp_res_low = vpaddq_s32(m01, m23); + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + + uint16x8_t res = + vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), vdup_n_u16(0)); + return vreinterpretq_s16_u16(res); +} + +static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, + int alpha) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + // Loading the 8 filter taps + int16x8_t f[8]; + load_filters_8(f, sx, alpha); + + int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1])); + int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3])); + int8x16_t f45_u8 = vcombine_s8(vmovn_s16(f[4]), vmovn_s16(f[5])); + int8x16_t f67_u8 = vcombine_s8(vmovn_s16(f[6]), vmovn_s16(f[7])); + + uint8x8_t in0 = vget_low_u8(in); + uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1)); + uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2)); + uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3)); + uint8x8_t in4 = vget_low_u8(vextq_u8(in, in, 4)); + uint8x8_t in5 = vget_low_u8(vextq_u8(in, in, 5)); + uint8x8_t in6 = vget_low_u8(vextq_u8(in, in, 6)); + uint8x8_t in7 = vget_low_u8(vextq_u8(in, in, 7)); + + int32x4_t m01 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in0, in1), f01_u8); + int32x4_t m23 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in2, in3), f23_u8); + int32x4_t m45 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in4, in5), f45_u8); + int32x4_t m67 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in6, in7), f67_u8); + + int32x4_t tmp_res_low = vpaddq_s32(m01, m23); + int32x4_t tmp_res_high = vpaddq_s32(m45, m67); + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + tmp_res_high = vaddq_s32(tmp_res_high, add_const); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), + vqrshrun_n_s32(tmp_res_high, ROUND0_BITS)); + return vreinterpretq_s16_u16(res); +} + +static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + int16x8_t f_s16 = + vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS))); + + int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16)); + + uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]); + uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + uint8x16_t in_0123 = vqtbl1q_u8(in, perm0); + uint8x16_t in_4567 = vqtbl1q_u8(in, perm1); + + int32x4_t m0123 = vusdotq_laneq_s32(vdupq_n_s32(0), in_0123, f_s8, 0); + m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1); + + int32x4_t tmp_res_low = m0123; + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + + uint16x8_t res = + vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), vdup_n_u16(0)); + return vreinterpretq_s16_u16(res); +} + +static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) { + const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); + + int16x8_t f_s16 = + vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS))); + + int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16)); + + uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]); + uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]); + uint8x16_t perm2 = vld1q_u8(&usdot_permute_idx[32]); + + // Permute samples ready for dot product. + // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } + // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } + // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } + uint8x16_t in_0123 = vqtbl1q_u8(in, perm0); + uint8x16_t in_4567 = vqtbl1q_u8(in, perm1); + uint8x16_t in_89ab = vqtbl1q_u8(in, perm2); + + int32x4_t m0123 = vusdotq_laneq_s32(vdupq_n_s32(0), in_0123, f_s8, 0); + m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1); + + int32x4_t m4567 = vusdotq_laneq_s32(vdupq_n_s32(0), in_4567, f_s8, 0); + m4567 = vusdotq_laneq_s32(m4567, in_89ab, f_s8, 1); + + int32x4_t tmp_res_low = m0123; + int32x4_t tmp_res_high = m4567; + + tmp_res_low = vaddq_s32(tmp_res_low, add_const); + tmp_res_high = vaddq_s32(tmp_res_high, add_const); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), + vqrshrun_n_s32(tmp_res_high, ROUND0_BITS)); + return vreinterpretq_s16_u16(res); +} + +static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, + int sy) { + int16x4_t s0 = vget_low_s16(src[0]); + int16x4_t s1 = vget_low_s16(src[1]); + int16x4_t s2 = vget_low_s16(src[2]); + int16x4_t s3 = vget_low_s16(src[3]); + int16x4_t s4 = vget_low_s16(src[4]); + int16x4_t s5 = vget_low_s16(src[5]); + int16x4_t s6 = vget_low_s16(src[6]); + int16x4_t s7 = vget_low_s16(src[7]); + + int16x8_t f = + vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS))); + + int32x4_t m0123 = vmull_lane_s16(s0, vget_low_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, s1, vget_low_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, s2, vget_low_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, s3, vget_low_s16(f), 3); + m0123 = vmlal_lane_s16(m0123, s4, vget_high_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, s5, vget_high_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, s6, vget_high_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, s7, vget_high_s16(f), 3); + + *res = m0123; +} + +static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, + int sy, int gamma) { + int16x8_t s0, s1, s2, s3; + transpose_elems_s16_4x8( + vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]), + vget_low_s16(src[3]), vget_low_s16(src[4]), vget_low_s16(src[5]), + vget_low_s16(src[6]), vget_low_s16(src[7]), &s0, &s1, &s2, &s3); + + int16x8_t f[4]; + load_filters_4(f, sy, gamma); + + int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), s0, f[0]); + int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), s1, f[1]); + int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), s2, f[2]); + int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), s3, f[3]); + + int64x2_t m01 = vpaddq_s64(m0, m1); + int64x2_t m23 = vpaddq_s64(m2, m3); + + *res = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23)); +} + +static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy) { + int16x8_t s0 = src[0]; + int16x8_t s1 = src[1]; + int16x8_t s2 = src[2]; + int16x8_t s3 = src[3]; + int16x8_t s4 = src[4]; + int16x8_t s5 = src[5]; + int16x8_t s6 = src[6]; + int16x8_t s7 = src[7]; + + int16x8_t f = + vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS))); + + int32x4_t m0123 = vmull_lane_s16(vget_low_s16(s0), vget_low_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s1), vget_low_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s2), vget_low_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s3), vget_low_s16(f), 3); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s4), vget_high_s16(f), 0); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s5), vget_high_s16(f), 1); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s6), vget_high_s16(f), 2); + m0123 = vmlal_lane_s16(m0123, vget_low_s16(s7), vget_high_s16(f), 3); + + int32x4_t m4567 = vmull_lane_s16(vget_high_s16(s0), vget_low_s16(f), 0); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s1), vget_low_s16(f), 1); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s2), vget_low_s16(f), 2); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s3), vget_low_s16(f), 3); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s4), vget_high_s16(f), 0); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s5), vget_high_s16(f), 1); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s6), vget_high_s16(f), 2); + m4567 = vmlal_lane_s16(m4567, vget_high_s16(s7), vget_high_s16(f), 3); + + *res_low = m0123; + *res_high = m4567; +} + +static INLINE void vertical_filter_8x1_f8(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy, + int gamma) { + int16x8_t s0 = src[0]; + int16x8_t s1 = src[1]; + int16x8_t s2 = src[2]; + int16x8_t s3 = src[3]; + int16x8_t s4 = src[4]; + int16x8_t s5 = src[5]; + int16x8_t s6 = src[6]; + int16x8_t s7 = src[7]; + transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); + + int16x8_t f[8]; + load_filters_8(f, sy, gamma); + + int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), s0, f[0]); + int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), s1, f[1]); + int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), s2, f[2]); + int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), s3, f[3]); + int64x2_t m4 = aom_sdotq_s16(vdupq_n_s64(0), s4, f[4]); + int64x2_t m5 = aom_sdotq_s16(vdupq_n_s64(0), s5, f[5]); + int64x2_t m6 = aom_sdotq_s16(vdupq_n_s64(0), s6, f[6]); + int64x2_t m7 = aom_sdotq_s16(vdupq_n_s64(0), s7, f[7]); + + int64x2_t m01 = vpaddq_s64(m0, m1); + int64x2_t m23 = vpaddq_s64(m2, m3); + int64x2_t m45 = vpaddq_s64(m4, m5); + int64x2_t m67 = vpaddq_s64(m6, m7); + + *res_low = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23)); + *res_high = vcombine_s32(vmovn_s64(m45), vmovn_s64(m67)); +} + +void av1_warp_affine_sve(const int32_t *mat, const uint8_t *ref, int width, + int height, int stride, uint8_t *pred, int p_col, + int p_row, int p_width, int p_height, int p_stride, + int subsampling_x, int subsampling_y, + ConvolveParams *conv_params, int16_t alpha, + int16_t beta, int16_t gamma, int16_t delta) { + av1_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row, + p_width, p_height, p_stride, subsampling_x, + subsampling_y, conv_params, alpha, beta, gamma, delta); +} diff --git a/third_party/aom/av1/common/arm/wiener_convolve_neon.c b/third_party/aom/av1/common/arm/wiener_convolve_neon.c new file mode 100644 index 0000000000..6440c16adb --- /dev/null +++ b/third_party/aom/av1/common/arm/wiener_convolve_neon.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include <arm_neon.h> +#include <assert.h> + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/arm/mem_neon.h" +#include "aom_dsp/arm/transpose_neon.h" +#include "aom_dsp/txfm_common.h" +#include "aom_ports/mem.h" +#include "av1/common/common.h" +#include "av1/common/restoration.h" + +static INLINE uint16x8_t wiener_convolve5_8_2d_h( + const uint8x8_t t0, const uint8x8_t t1, const uint8x8_t t2, + const uint8x8_t t3, const uint8x8_t t4, const int16x4_t x_filter, + const int32x4_t round_vec, const uint16x8_t im_max_val) { + // Since the Wiener filter is symmetric about the middle tap (tap 2) add + // mirrored source elements before multiplying filter coefficients. + int16x8_t s04 = vreinterpretq_s16_u16(vaddl_u8(t0, t4)); + int16x8_t s13 = vreinterpretq_s16_u16(vaddl_u8(t1, t3)); + int16x8_t s2 = vreinterpretq_s16_u16(vmovl_u8(t2)); + + // x_filter[0] = 0. (5-tap filters are 0-padded to 7 taps.) + int32x4_t sum_lo = vmlal_lane_s16(round_vec, vget_low_s16(s04), x_filter, 1); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s13), x_filter, 2); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s2), x_filter, 3); + + int32x4_t sum_hi = vmlal_lane_s16(round_vec, vget_high_s16(s04), x_filter, 1); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s13), x_filter, 2); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s2), x_filter, 3); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum_lo, WIENER_ROUND0_BITS), + vqrshrun_n_s32(sum_hi, WIENER_ROUND0_BITS)); + + return vminq_u16(res, im_max_val); +} + +static INLINE void convolve_add_src_horiz_5tap_neon( + const uint8_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr, + ptrdiff_t dst_stride, int w, int h, const int16x4_t x_filter, + const int32x4_t round_vec, const uint16x8_t im_max_val) { + do { + const uint8_t *s = src_ptr; + uint16_t *d = dst_ptr; + int width = w; + + do { + uint8x8_t s0, s1, s2, s3, s4; + load_u8_8x5(s, 1, &s0, &s1, &s2, &s3, &s4); + + uint16x8_t d0 = wiener_convolve5_8_2d_h(s0, s1, s2, s3, s4, x_filter, + round_vec, im_max_val); + + vst1q_u16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); +} + +static INLINE uint16x8_t wiener_convolve7_8_2d_h( + const uint8x8_t t0, const uint8x8_t t1, const uint8x8_t t2, + const uint8x8_t t3, const uint8x8_t t4, const uint8x8_t t5, + const uint8x8_t t6, const int16x4_t x_filter, const int32x4_t round_vec, + const uint16x8_t im_max_val) { + // Since the Wiener filter is symmetric about the middle tap (tap 3) add + // mirrored source elements before multiplying by filter coefficients. + int16x8_t s06 = vreinterpretq_s16_u16(vaddl_u8(t0, t6)); + int16x8_t s15 = vreinterpretq_s16_u16(vaddl_u8(t1, t5)); + int16x8_t s24 = vreinterpretq_s16_u16(vaddl_u8(t2, t4)); + int16x8_t s3 = vreinterpretq_s16_u16(vmovl_u8(t3)); + + int32x4_t sum_lo = vmlal_lane_s16(round_vec, vget_low_s16(s06), x_filter, 0); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s15), x_filter, 1); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s24), x_filter, 2); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s3), x_filter, 3); + + int32x4_t sum_hi = vmlal_lane_s16(round_vec, vget_high_s16(s06), x_filter, 0); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s15), x_filter, 1); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s24), x_filter, 2); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s3), x_filter, 3); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum_lo, WIENER_ROUND0_BITS), + vqrshrun_n_s32(sum_hi, WIENER_ROUND0_BITS)); + + return vminq_u16(res, im_max_val); +} + +static INLINE void convolve_add_src_horiz_7tap_neon( + const uint8_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr, + ptrdiff_t dst_stride, int w, int h, const int16x4_t x_filter, + const int32x4_t round_vec, const uint16x8_t im_max_val) { + do { + const uint8_t *s = src_ptr; + uint16_t *d = dst_ptr; + int width = w; + + do { + uint8x8_t s0, s1, s2, s3, s4, s5, s6; + load_u8_8x7(s, 1, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + + uint16x8_t d0 = wiener_convolve7_8_2d_h(s0, s1, s2, s3, s4, s5, s6, + x_filter, round_vec, im_max_val); + + vst1q_u16(d, d0); + + s += 8; + d += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + } while (--h != 0); +} + +static INLINE uint8x8_t wiener_convolve5_8_2d_v( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x4_t y_filter, + const int32x4_t round_vec) { + // Since the Wiener filter is symmetric about the middle tap (tap 2) add + // mirrored source elements before multiplying by filter coefficients. + int16x8_t s04 = vaddq_s16(s0, s4); + int16x8_t s13 = vaddq_s16(s1, s3); + + int32x4_t sum_lo = vmlal_lane_s16(round_vec, vget_low_s16(s04), y_filter, 1); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s13), y_filter, 2); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s2), y_filter, 3); + + int32x4_t sum_hi = vmlal_lane_s16(round_vec, vget_high_s16(s04), y_filter, 1); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s13), y_filter, 2); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s2), y_filter, 3); + + int16x4_t res_lo = vshrn_n_s32(sum_lo, 2 * FILTER_BITS - WIENER_ROUND0_BITS); + int16x4_t res_hi = vshrn_n_s32(sum_hi, 2 * FILTER_BITS - WIENER_ROUND0_BITS); + + return vqmovun_s16(vcombine_s16(res_lo, res_hi)); +} + +static INLINE void convolve_add_src_vert_5tap_neon( + const uint16_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, int w, int h, const int16x4_t y_filter, + const int32x4_t round_vec) { + do { + const int16_t *s = (int16_t *)src; + uint8_t *d = dst; + int height = h; + + while (height > 3) { + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; + load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); + + uint8x8_t d0 = + wiener_convolve5_8_2d_v(s0, s1, s2, s3, s4, y_filter, round_vec); + uint8x8_t d1 = + wiener_convolve5_8_2d_v(s1, s2, s3, s4, s5, y_filter, round_vec); + uint8x8_t d2 = + wiener_convolve5_8_2d_v(s2, s3, s4, s5, s6, y_filter, round_vec); + uint8x8_t d3 = + wiener_convolve5_8_2d_v(s3, s4, s5, s6, s7, y_filter, round_vec); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } + + while (height-- != 0) { + int16x8_t s0, s1, s2, s3, s4; + load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4); + + uint8x8_t d0 = + wiener_convolve5_8_2d_v(s0, s1, s2, s3, s4, y_filter, round_vec); + + vst1_u8(d, d0); + + d += dst_stride; + s += src_stride; + } + + src += 8; + dst += 8; + w -= 8; + } while (w != 0); +} + +static INLINE uint8x8_t wiener_convolve7_8_2d_v( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, + const int16x8_t s6, const int16x4_t y_filter, const int32x4_t round_vec) { + // Since the Wiener filter is symmetric about the middle tap (tap 3) add + // mirrored source elements before multiplying by filter coefficients. + int16x8_t s06 = vaddq_s16(s0, s6); + int16x8_t s15 = vaddq_s16(s1, s5); + int16x8_t s24 = vaddq_s16(s2, s4); + + int32x4_t sum_lo = vmlal_lane_s16(round_vec, vget_low_s16(s06), y_filter, 0); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s15), y_filter, 1); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s24), y_filter, 2); + sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s3), y_filter, 3); + + int32x4_t sum_hi = vmlal_lane_s16(round_vec, vget_high_s16(s06), y_filter, 0); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s15), y_filter, 1); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s24), y_filter, 2); + sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s3), y_filter, 3); + + int16x4_t res_lo = vshrn_n_s32(sum_lo, 2 * FILTER_BITS - WIENER_ROUND0_BITS); + int16x4_t res_hi = vshrn_n_s32(sum_hi, 2 * FILTER_BITS - WIENER_ROUND0_BITS); + + return vqmovun_s16(vcombine_s16(res_lo, res_hi)); +} + +static INLINE void convolve_add_src_vert_7tap_neon( + const uint16_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, int w, int h, const int16x4_t y_filter, + const int32x4_t round_vec) { + do { + const int16_t *s = (int16_t *)src; + uint8_t *d = dst; + int height = h; + + while (height > 3) { + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9; + load_s16_8x10(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, + &s9); + + uint8x8_t d0 = wiener_convolve7_8_2d_v(s0, s1, s2, s3, s4, s5, s6, + y_filter, round_vec); + uint8x8_t d1 = wiener_convolve7_8_2d_v(s1, s2, s3, s4, s5, s6, s7, + y_filter, round_vec); + uint8x8_t d2 = wiener_convolve7_8_2d_v(s2, s3, s4, s5, s6, s7, s8, + y_filter, round_vec); + uint8x8_t d3 = wiener_convolve7_8_2d_v(s3, s4, s5, s6, s7, s8, s9, + y_filter, round_vec); + + store_u8_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } + + while (height-- != 0) { + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + + uint8x8_t d0 = wiener_convolve7_8_2d_v(s0, s1, s2, s3, s4, s5, s6, + y_filter, round_vec); + + vst1_u8(d, d0); + + d += dst_stride; + s += src_stride; + } + + src += 8; + dst += 8; + w -= 8; + } while (w != 0); +} + +static AOM_INLINE int get_wiener_filter_taps(const int16_t *filter) { + assert(filter[7] == 0); + if (filter[0] == 0 && filter[6] == 0) { + return WIENER_WIN_REDUCED; + } + return WIENER_WIN; +} + +// Wiener filter 2D +// Apply horizontal filter and store in a temporary buffer. When applying +// vertical filter, overwrite the original pixel values. +void av1_wiener_convolve_add_src_neon(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const int16_t *x_filter, int x_step_q4, + const int16_t *y_filter, int y_step_q4, + int w, int h, + const WienerConvolveParams *conv_params) { + (void)x_step_q4; + (void)y_step_q4; + (void)conv_params; + + assert(w % 8 == 0); + assert(w <= MAX_SB_SIZE && h <= MAX_SB_SIZE); + assert(x_step_q4 == 16 && y_step_q4 == 16); + assert(x_filter[7] == 0 && y_filter[7] == 0); + // For bd == 8, assert horizontal filtering output will not exceed 15-bit: + assert(8 + 1 + FILTER_BITS - conv_params->round_0 <= 15); + + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + WIENER_WIN - 1) * MAX_SB_SIZE]); + + const int x_filter_taps = get_wiener_filter_taps(x_filter); + const int y_filter_taps = get_wiener_filter_taps(y_filter); + int16x4_t x_filter_s16 = vld1_s16(x_filter); + int16x4_t y_filter_s16 = vld1_s16(y_filter); + // Add 128 to tap 3. (Needed for rounding.) + x_filter_s16 = vadd_s16(x_filter_s16, vcreate_s16(128ULL << 48)); + y_filter_s16 = vadd_s16(y_filter_s16, vcreate_s16(128ULL << 48)); + + const int im_stride = MAX_SB_SIZE; + const int im_h = h + y_filter_taps - 1; + const int horiz_offset = x_filter_taps / 2; + const int vert_offset = (y_filter_taps / 2) * (int)src_stride; + + const int bd = 8; + const uint16x8_t im_max_val = + vdupq_n_u16((1 << (bd + 1 + FILTER_BITS - WIENER_ROUND0_BITS)) - 1); + const int32x4_t horiz_round_vec = vdupq_n_s32(1 << (bd + FILTER_BITS - 1)); + + const int32x4_t vert_round_vec = + vdupq_n_s32((1 << (2 * FILTER_BITS - WIENER_ROUND0_BITS - 1)) - + (1 << (bd + (2 * FILTER_BITS - WIENER_ROUND0_BITS) - 1))); + + if (x_filter_taps == WIENER_WIN_REDUCED) { + convolve_add_src_horiz_5tap_neon(src - horiz_offset - vert_offset, + src_stride, im_block, im_stride, w, im_h, + x_filter_s16, horiz_round_vec, im_max_val); + } else { + convolve_add_src_horiz_7tap_neon(src - horiz_offset - vert_offset, + src_stride, im_block, im_stride, w, im_h, + x_filter_s16, horiz_round_vec, im_max_val); + } + + if (y_filter_taps == WIENER_WIN_REDUCED) { + convolve_add_src_vert_5tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter_s16, vert_round_vec); + } else { + convolve_add_src_vert_7tap_neon(im_block, im_stride, dst, dst_stride, w, h, + y_filter_s16, vert_round_vec); + } +} |