summaryrefslogtreecommitdiffstats
path: root/third_party/aom/av1/common/arm
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c532
-rw-r--r--third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h293
-rw-r--r--third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c1555
-rw-r--r--third_party/aom/av1/common/arm/highbd_convolve_sve2.c1720
-rw-r--r--third_party/aom/av1/common/arm/highbd_convolve_sve2.h97
-rw-r--r--third_party/aom/av1/common/arm/highbd_warp_plane_neon.c30
-rw-r--r--third_party/aom/av1/common/arm/highbd_warp_plane_neon.h60
-rw-r--r--third_party/aom/av1/common/arm/highbd_warp_plane_sve.c32
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon.c38
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon.h60
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c38
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_sve.c40
12 files changed, 4070 insertions, 425 deletions
diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c
index fc03a2ee04..9247ded6bf 100644
--- a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c
+++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c
@@ -20,266 +20,9 @@
#include "aom_ports/mem.h"
#include "av1/common/convolve.h"
#include "av1/common/filter.h"
+#include "av1/common/arm/highbd_compound_convolve_neon.h"
#include "av1/common/arm/highbd_convolve_neon.h"
-#define ROUND_SHIFT 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS
-
-static INLINE void highbd_12_comp_avg_neon(const uint16_t *src_ptr,
- int src_stride, uint16_t *dst_ptr,
- int dst_stride, int w, int h,
- ConvolveParams *conv_params,
- const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint16x4_t offset_vec = vdup_n_u16(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
-
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint16x4_t avg = vhadd_u16(src, ref);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint16x8_t avg = vhaddq_u16(s, r);
- int32x4_t d0_lo =
- vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
- int32x4_t d0_hi =
- vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
-
- uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT - 2),
- vqrshrun_n_s32(d0_hi, ROUND_SHIFT - 2));
- d0 = vminq_u16(d0, max);
- vst1q_u16(dst, d0);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- }
-}
-
-static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride,
- uint16_t *dst_ptr, int dst_stride,
- int w, int h,
- ConvolveParams *conv_params,
- const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint16x4_t offset_vec = vdup_n_u16(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
-
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint16x4_t avg = vhadd_u16(src, ref);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint16x8_t avg = vhaddq_u16(s, r);
- int32x4_t d0_lo =
- vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
- int32x4_t d0_hi =
- vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
-
- uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT),
- vqrshrun_n_s32(d0_hi, ROUND_SHIFT));
- d0 = vminq_u16(d0, max);
- vst1q_u16(dst, d0);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- }
-}
-
-static INLINE void highbd_12_dist_wtd_comp_avg_neon(
- const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
- int w, int h, ConvolveParams *conv_params, const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint32x4_t offset_vec = vdupq_n_u32(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
- uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
- uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
-
- // Weighted averaging
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
- wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
- wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
- wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
- wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
-
- uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
- wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
- wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
- int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
-
- uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT - 2),
- vqrshrun_n_s32(d1, ROUND_SHIFT - 2));
- d01 = vminq_u16(d01, max);
- vst1q_u16(dst, d01);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- }
-}
-
-static INLINE void highbd_dist_wtd_comp_avg_neon(
- const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
- int w, int h, ConvolveParams *conv_params, const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint32x4_t offset_vec = vdupq_n_u32(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
- uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
- uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
-
- // Weighted averaging
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
- wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
- wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
- wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
- wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
-
- uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
- wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
- wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
- int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
-
- uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT),
- vqrshrun_n_s32(d1, ROUND_SHIFT));
- d01 = vminq_u16(d01, max);
- vst1q_u16(dst, d01);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- }
-}
-
static INLINE uint16x4_t highbd_12_convolve6_4(
const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
@@ -743,9 +486,6 @@ void av1_highbd_dist_wtd_convolve_x_neon(
const int im_stride = MAX_SB_SIZE;
const int horiz_offset = filter_params_x->taps / 2 - 1;
assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
- const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
- const int offset_avg = (1 << (offset_bits - conv_params->round_1)) +
- (1 << (offset_bits - conv_params->round_1 - 1));
const int offset_convolve = (1 << (conv_params->round_0 - 1)) +
(1 << (bd + FILTER_BITS)) +
(1 << (bd + FILTER_BITS - 1));
@@ -768,10 +508,10 @@ void av1_highbd_dist_wtd_convolve_x_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
- w, h, conv_params, offset_avg, bd);
+ w, h, conv_params);
} else {
highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, offset_avg, bd);
+ conv_params);
}
} else {
if (x_filter_taps <= 6 && w != 4) {
@@ -795,10 +535,10 @@ void av1_highbd_dist_wtd_convolve_x_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
- h, conv_params, offset_avg, bd);
+ h, conv_params, bd);
} else {
highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, offset_avg, bd);
+ conv_params, bd);
}
} else {
if (x_filter_taps <= 6 && w != 4) {
@@ -971,6 +711,212 @@ static INLINE void highbd_dist_wtd_convolve_y_6tap_neon(
}
}
+static INLINE uint16x4_t highbd_12_convolve4_4(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ return vqshrun_n_s32(sum, ROUND0_BITS + 2);
+}
+
+static INLINE uint16x8_t highbd_12_convolve4_8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS + 2),
+ vqshrun_n_s32(sum1, ROUND0_BITS + 2));
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_y_4tap_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, const int16_t *y_filter_ptr, const int offset) {
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+ const int32x4_t offset_vec = vdupq_n_s32(offset);
+
+ if (w == 4) {
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 =
+ highbd_12_convolve4_4(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x4_t d1 =
+ highbd_12_convolve4_4(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x4_t d2 =
+ highbd_12_convolve4_4(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x4_t d3 =
+ highbd_12_convolve4_4(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ } else {
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 =
+ highbd_12_convolve4_8(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x8_t d1 =
+ highbd_12_convolve4_8(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x8_t d2 =
+ highbd_12_convolve4_8(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x8_t d3 =
+ highbd_12_convolve4_8(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ return vqshrun_n_s32(sum, ROUND0_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS),
+ vqshrun_n_s32(sum1, ROUND0_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_y_4tap_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, const int16_t *y_filter_ptr, const int offset) {
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+ const int32x4_t offset_vec = vdupq_n_s32(offset);
+
+ if (w == 4) {
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 = highbd_convolve4_4(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x4_t d1 = highbd_convolve4_4(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x4_t d2 = highbd_convolve4_4(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x4_t d3 = highbd_convolve4_4(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ } else {
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 =
+ highbd_convolve4_8(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x8_t d1 =
+ highbd_convolve4_8(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x8_t d2 =
+ highbd_convolve4_8(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x8_t d3 =
+ highbd_convolve4_8(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w != 0);
+ }
+}
+
static INLINE void highbd_12_dist_wtd_convolve_y_8tap_neon(
const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
int w, int h, const int16_t *y_filter_ptr, const int offset) {
@@ -1148,9 +1094,6 @@ void av1_highbd_dist_wtd_convolve_y_neon(
const int im_stride = MAX_SB_SIZE;
const int vert_offset = filter_params_y->taps / 2 - 1;
assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
- const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
- const int round_offset_avg = (1 << (offset_bits - conv_params->round_1)) +
- (1 << (offset_bits - conv_params->round_1 - 1));
const int round_offset_conv = (1 << (conv_params->round_0 - 1)) +
(1 << (bd + FILTER_BITS)) +
(1 << (bd + FILTER_BITS - 1));
@@ -1162,7 +1105,11 @@ void av1_highbd_dist_wtd_convolve_y_neon(
if (bd == 12) {
if (conv_params->do_average) {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_y_4tap_neon(
+ src + 2 * src_stride, src_stride, im_block, im_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_12_dist_wtd_convolve_y_6tap_neon(
src + src_stride, src_stride, im_block, im_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1173,14 +1120,17 @@ void av1_highbd_dist_wtd_convolve_y_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
- w, h, conv_params, round_offset_avg,
- bd);
+ w, h, conv_params);
} else {
highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params);
}
} else {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_y_4tap_neon(
+ src + 2 * src_stride, src_stride, dst16, dst16_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_12_dist_wtd_convolve_y_6tap_neon(
src + src_stride, src_stride, dst16, dst16_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1192,7 +1142,11 @@ void av1_highbd_dist_wtd_convolve_y_neon(
}
} else {
if (conv_params->do_average) {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_y_4tap_neon(src + 2 * src_stride, src_stride,
+ im_block, im_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride,
im_block, im_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1203,13 +1157,17 @@ void av1_highbd_dist_wtd_convolve_y_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
- h, conv_params, round_offset_avg, bd);
+ h, conv_params, bd);
} else {
highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params, bd);
}
} else {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_y_4tap_neon(src + 2 * src_stride, src_stride,
+ dst16, dst16_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride,
dst16, dst16_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1285,18 +1243,18 @@ void av1_highbd_dist_wtd_convolve_2d_copy_neon(const uint16_t *src,
if (conv_params->use_dist_wtd_comp_avg) {
if (bd == 12) {
highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
- w, h, conv_params, round_offset, bd);
+ w, h, conv_params);
} else {
highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
- h, conv_params, round_offset, bd);
+ h, conv_params, bd);
}
} else {
if (bd == 12) {
highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset, bd);
+ conv_params);
} else {
highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset, bd);
+ conv_params, bd);
}
}
}
@@ -1949,9 +1907,6 @@ void av1_highbd_dist_wtd_convolve_2d_neon(
(1 << (bd + FILTER_BITS - 1)) + (1 << (conv_params->round_0 - 1));
const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
const int round_offset_conv_y = (1 << y_offset_bits);
- const int round_offset_avg =
- ((1 << (y_offset_bits - conv_params->round_1)) +
- (1 << (y_offset_bits - conv_params->round_1 - 1)));
const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
@@ -2012,19 +1967,18 @@ void av1_highbd_dist_wtd_convolve_2d_neon(
if (conv_params->use_dist_wtd_comp_avg) {
if (bd == 12) {
highbd_12_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride,
- w, h, conv_params, round_offset_avg,
- bd);
+ w, h, conv_params);
} else {
highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w,
- h, conv_params, round_offset_avg, bd);
+ h, conv_params, bd);
}
} else {
if (bd == 12) {
highbd_12_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params);
} else {
highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params, bd);
}
}
}
diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h
new file mode 100644
index 0000000000..c9344f3adf
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_ports/mem.h"
+
+#define ROUND_SHIFT 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS
+
+static INLINE void highbd_12_comp_avg_neon(const uint16_t *src_ptr,
+ int src_stride, uint16_t *dst_ptr,
+ int dst_stride, int w, int h,
+ ConvolveParams *conv_params) {
+ const int offset_bits = 12 + 2 * FILTER_BITS - ROUND0_BITS - 2;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint16x4_t offset_vec = vdup_n_u16((uint16_t)offset);
+ const uint16x8_t max = vdupq_n_u16((1 << 12) - 1);
+
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint16x4_t avg = vhadd_u16(src, ref);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint16x8_t avg = vhaddq_u16(s, r);
+ int32x4_t d0_lo =
+ vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
+ int32x4_t d0_hi =
+ vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
+
+ uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT - 2),
+ vqrshrun_n_s32(d0_hi, ROUND_SHIFT - 2));
+ d0 = vminq_u16(d0, max);
+ vst1q_u16(dst, d0);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride,
+ uint16_t *dst_ptr, int dst_stride,
+ int w, int h,
+ ConvolveParams *conv_params,
+ const int bd) {
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint16x4_t offset_vec = vdup_n_u16((uint16_t)offset);
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint16x4_t avg = vhadd_u16(src, ref);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint16x8_t avg = vhaddq_u16(s, r);
+ int32x4_t d0_lo =
+ vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
+ int32x4_t d0_hi =
+ vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
+
+ uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT),
+ vqrshrun_n_s32(d0_hi, ROUND_SHIFT));
+ d0 = vminq_u16(d0, max);
+ vst1q_u16(dst, d0);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void highbd_12_dist_wtd_comp_avg_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, ConvolveParams *conv_params) {
+ const int offset_bits = 12 + 2 * FILTER_BITS - ROUND0_BITS - 2;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint32x4_t offset_vec = vdupq_n_u32(offset);
+ const uint16x8_t max = vdupq_n_u16((1 << 12) - 1);
+ uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
+ uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
+
+ // Weighted averaging
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
+ wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
+ wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
+ wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
+ wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
+
+ uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
+ wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
+ wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
+ int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
+
+ uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT - 2),
+ vqrshrun_n_s32(d1, ROUND_SHIFT - 2));
+ d01 = vminq_u16(d01, max);
+ vst1q_u16(dst, d01);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void highbd_dist_wtd_comp_avg_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, ConvolveParams *conv_params, const int bd) {
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint32x4_t offset_vec = vdupq_n_u32(offset);
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
+ uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
+
+ // Weighted averaging
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
+ wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
+ wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
+ wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
+ wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
+
+ uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
+ wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
+ wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
+ int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
+
+ uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT),
+ vqrshrun_n_s32(d1, ROUND_SHIFT));
+ d01 = vminq_u16(d01, max);
+ vst1q_u16(dst, d01);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ }
+}
diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c b/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c
new file mode 100644
index 0000000000..1d6c9b4faf
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c
@@ -0,0 +1,1555 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_ports/mem.h"
+#include "av1/common/convolve.h"
+#include "av1/common/filter.h"
+#include "av1/common/filter.h"
+#include "av1/common/arm/highbd_compound_convolve_neon.h"
+#include "av1/common/arm/highbd_convolve_neon.h"
+#include "av1/common/arm/highbd_convolve_sve2.h"
+
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = {
+ 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
+ 4, 5, 6, 7, 5, 6, 7, 0, 6, 7, 0, 1, 7, 0, 1, 2,
+};
+
+static INLINE uint16x8_t highbd_12_convolve8_8_x(int16x8_t s0[8],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS + 2),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS + 2));
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_x_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x1_t offset_vec =
+ vcreate_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1)));
+ const int64x2_t offset_lo = vcombine_s64(offset_vec, vdup_n_s64(0));
+
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset_lo);
+ uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset_lo);
+ uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset_lo);
+ uint16x8_t d3 = highbd_12_convolve8_8_x(s3, filter, offset_lo);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_x(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_x_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x1_t offset_vec =
+ vcreate_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)));
+ const int64x2_t offset_lo = vcombine_s64(offset_vec, vdup_n_s64(0));
+
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset_lo);
+ uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset_lo);
+ uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset_lo);
+ uint16x8_t d3 = highbd_convolve8_8_x(s3, filter, offset_lo);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+}
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDeinterleaveTbl[8]) = {
+ 0, 2, 4, 6, 1, 3, 5, 7,
+};
+// clang-format on
+
+static INLINE uint16x4_t highbd_12_convolve4_4_x(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset,
+ uint16x8x2_t permute_tbl) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS + 2);
+}
+
+static INLINE uint16x8_t highbd_12_convolve4_8_x(int16x8_t s0[4],
+ int16x8_t filter,
+ int64x2_t offset,
+ uint16x8_t tbl) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, ROUND0_BITS + 2),
+ vqrshrun_n_s32(sum2637, ROUND0_BITS + 2));
+ return aom_tbl_u16(res, tbl);
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_x_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1)));
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_12_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_12_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_x(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset,
+ uint16x8x2_t permute_tbl) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_x(int16x8_t s0[4], int16x8_t filter,
+ int64x2_t offset,
+ uint16x8_t tbl) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, ROUND0_BITS),
+ vqrshrun_n_s32(sum2637, ROUND0_BITS));
+ return aom_tbl_u16(res, tbl);
+}
+
+static INLINE void highbd_dist_wtd_convolve_x_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)));
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+void av1_highbd_dist_wtd_convolve_x_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params, int bd) {
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+
+ if (x_filter_taps == 6) {
+ av1_highbd_dist_wtd_convolve_x_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn,
+ conv_params, bd);
+ return;
+ }
+
+ int dst16_stride = conv_params->dst_stride;
+ const int im_stride = MAX_SB_SIZE;
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
+
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+ src -= horiz_offset;
+
+ if (bd == 12) {
+ if (conv_params->do_average) {
+ if (x_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_x_4tap_sve2(src + 2, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr);
+ } else {
+ highbd_12_dist_wtd_convolve_x_8tap_sve2(src, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr);
+ }
+
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
+ w, h, conv_params);
+
+ } else {
+ highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params);
+ }
+ } else {
+ if (x_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_x_4tap_sve2(
+ src + 2, src_stride, dst16, dst16_stride, w, h, x_filter_ptr);
+ } else {
+ highbd_12_dist_wtd_convolve_x_8tap_sve2(
+ src, src_stride, dst16, dst16_stride, w, h, x_filter_ptr);
+ }
+ }
+ } else {
+ if (conv_params->do_average) {
+ if (x_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_x_4tap_sve2(src + 2, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr, bd);
+ } else {
+ highbd_dist_wtd_convolve_x_8tap_sve2(src, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr, bd);
+ }
+
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
+ h, conv_params, bd);
+ } else {
+ highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params, bd);
+ }
+ } else {
+ if (x_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_x_4tap_sve2(
+ src + 2, src_stride, dst16, dst16_stride, w, h, x_filter_ptr, bd);
+ } else {
+ highbd_dist_wtd_convolve_x_8tap_sve2(
+ src, src_stride, dst16, dst16_stride, w, h, x_filter_ptr, bd);
+ }
+ }
+ }
+}
+
+static INLINE uint16x4_t highbd_12_convolve8_4_y(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS + 2);
+}
+
+static INLINE uint16x8_t highbd_12_convolve8_8_y(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS + 2),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS + 2));
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_y_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1)));
+ const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ int16_t *s = (int16_t *)src;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 = highbd_12_convolve8_4_y(s0123, s4567, y_filter, offset);
+ uint16x4_t d1 = highbd_12_convolve8_4_y(s1234, s5678, y_filter, offset);
+ uint16x4_t d2 = highbd_12_convolve8_4_y(s2345, s6789, y_filter, offset);
+ uint16x4_t d3 = highbd_12_convolve8_4_y(s3456, s789A, y_filter, offset);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_y(s0123, s4567, y_filter, offset);
+ uint16x8_t d1 = highbd_12_convolve8_8_y(s1234, s5678, y_filter, offset);
+ uint16x8_t d2 = highbd_12_convolve8_8_y(s2345, s6789, y_filter, offset);
+ uint16x8_t d3 = highbd_12_convolve8_8_y(s3456, s789A, y_filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_y_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr, const int bd) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)));
+ const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ int16_t *s = (int16_t *)src;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 = highbd_convolve8_4_y(s0123, s4567, y_filter, offset);
+ uint16x4_t d1 = highbd_convolve8_4_y(s1234, s5678, y_filter, offset);
+ uint16x4_t d2 = highbd_convolve8_4_y(s2345, s6789, y_filter, offset);
+ uint16x4_t d3 = highbd_convolve8_4_y(s3456, s789A, y_filter, offset);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 = highbd_convolve8_8_y(s0123, s4567, y_filter, offset);
+ uint16x8_t d1 = highbd_convolve8_8_y(s1234, s5678, y_filter, offset);
+ uint16x8_t d2 = highbd_convolve8_8_y(s2345, s6789, y_filter, offset);
+ uint16x8_t d3 = highbd_convolve8_8_y(s3456, s789A, y_filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void av1_highbd_dist_wtd_convolve_y_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn,
+ ConvolveParams *conv_params, int bd) {
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+ if (y_filter_taps != 8) {
+ av1_highbd_dist_wtd_convolve_y_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn,
+ conv_params, bd);
+ return;
+ }
+
+ int dst16_stride = conv_params->dst_stride;
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+ assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
+
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+ src -= vert_offset * src_stride;
+
+ if (bd == 12) {
+ if (conv_params->do_average) {
+ highbd_12_dist_wtd_convolve_y_8tap_sve2(src, src_stride, im_block,
+ im_stride, w, h, y_filter_ptr);
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
+ w, h, conv_params);
+ } else {
+ highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params);
+ }
+ } else {
+ highbd_12_dist_wtd_convolve_y_8tap_sve2(src, src_stride, dst16,
+ dst16_stride, w, h, y_filter_ptr);
+ }
+ } else {
+ if (conv_params->do_average) {
+ highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, im_block, im_stride,
+ w, h, y_filter_ptr, bd);
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
+ h, conv_params, bd);
+ } else {
+ highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params, bd);
+ }
+ } else {
+ highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, dst16, dst16_stride,
+ w, h, y_filter_ptr, bd);
+ }
+ }
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x2_t offset = vdupq_n_s64(1 << (12 + FILTER_BITS - 2));
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset);
+ uint16x8_t d3 = highbd_12_convolve8_8_x(s3, filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)src;
+ do {
+ int16x8_t s0[8], s1[8], s2[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4],
+ &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], &s1[4],
+ &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], &s2[4],
+ &s2[5], &s2[6], &s2[7]);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x2_t offset = vdupq_n_s64(1 << (bd + FILTER_BITS - 2));
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset);
+ uint16x8_t d3 = highbd_convolve8_8_x(s3, filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)src;
+ do {
+ int16x8_t s0[8], s1[8], s2[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4],
+ &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], &s1[4],
+ &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], &s2[4],
+ &s2[5], &s2[6], &s2[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x2_t offset = vdupq_n_s64(1 << (12 + FILTER_BITS - 1));
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_12_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+
+ uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl);
+
+ store_u16_4x3(dst, dst_stride, d0, d1, d2);
+
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_12_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+
+ uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x2_t offset = vdupq_n_s64(1 << (bd + FILTER_BITS - 1));
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+
+ uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl);
+
+ store_u16_4x3(dst, dst_stride, d0, d1, d2);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_2d_v(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, COMPOUND_ROUND1_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_2d_v(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, COMPOUND_ROUND1_BITS),
+ vqrshrun_n_s32(sum4567, COMPOUND_ROUND1_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_vert_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr, int offset) {
+ const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+ const int64x2_t offset_s64 = vdupq_n_s64(offset);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ int16_t *s = (int16_t *)src;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 =
+ highbd_convolve8_4_2d_v(s0123, s4567, y_filter, offset_s64);
+ uint16x4_t d1 =
+ highbd_convolve8_4_2d_v(s1234, s5678, y_filter, offset_s64);
+ uint16x4_t d2 =
+ highbd_convolve8_4_2d_v(s2345, s6789, y_filter, offset_s64);
+ uint16x4_t d3 =
+ highbd_convolve8_4_2d_v(s3456, s789A, y_filter, offset_s64);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 =
+ highbd_convolve8_8_2d_v(s0123, s4567, y_filter, offset_s64);
+ uint16x8_t d1 =
+ highbd_convolve8_8_2d_v(s1234, s5678, y_filter, offset_s64);
+ uint16x8_t d2 =
+ highbd_convolve8_8_2d_v(s2345, s6789, y_filter, offset_s64);
+ uint16x8_t d3 =
+ highbd_convolve8_8_2d_v(s3456, s789A, y_filter, offset_s64);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_2d_v(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_2d_v(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS),
+ vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_vert_4tap_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, const int16_t *y_filter_ptr, const int offset) {
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+ const int32x4_t offset_vec = vdupq_n_s32(offset);
+
+ if (w == 4) {
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 =
+ highbd_convolve4_4_2d_v(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x4_t d1 =
+ highbd_convolve4_4_2d_v(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x4_t d2 =
+ highbd_convolve4_4_2d_v(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x4_t d3 =
+ highbd_convolve4_4_2d_v(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ } else {
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 =
+ highbd_convolve4_8_2d_v(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x8_t d1 =
+ highbd_convolve4_8_2d_v(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x8_t d2 =
+ highbd_convolve4_8_2d_v(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x8_t d3 =
+ highbd_convolve4_8_2d_v(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w != 0);
+ }
+}
+
+void av1_highbd_dist_wtd_convolve_2d_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int subpel_x_qn,
+ const int subpel_y_qn, ConvolveParams *conv_params, int bd) {
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block2[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ int dst16_stride = conv_params->dst_stride;
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+ const int clamped_x_taps = x_filter_taps < 4 ? 4 : x_filter_taps;
+
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+ const int clamped_y_taps = y_filter_taps < 4 ? 4 : y_filter_taps;
+
+ if (x_filter_taps == 6 || y_filter_taps == 6) {
+ av1_highbd_dist_wtd_convolve_2d_neon(
+ src, src_stride, dst, dst_stride, w, h, filter_params_x,
+ filter_params_y, subpel_x_qn, subpel_y_qn, conv_params, bd);
+ return;
+ }
+
+ const int im_h = h + clamped_y_taps - 1;
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = clamped_y_taps / 2 - 1;
+ const int horiz_offset = clamped_x_taps / 2 - 1;
+ const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int round_offset_conv_y = (1 << y_offset_bits);
+
+ const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
+
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+ if (bd == 12) {
+ if (x_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr);
+ } else {
+ highbd_12_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr);
+ }
+ } else {
+ if (x_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, bd);
+ } else {
+ highbd_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, bd);
+ }
+ }
+
+ if (conv_params->do_average) {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_2d_vert_4tap_neon(im_block, im_stride, im_block2,
+ im_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ } else {
+ highbd_dist_wtd_convolve_2d_vert_8tap_sve2(im_block, im_stride, im_block2,
+ im_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ }
+ if (conv_params->use_dist_wtd_comp_avg) {
+ if (bd == 12) {
+ highbd_12_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride,
+ w, h, conv_params);
+
+ } else {
+ highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w,
+ h, conv_params, bd);
+ }
+ } else {
+ if (bd == 12) {
+ highbd_12_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
+ conv_params);
+
+ } else {
+ highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
+ conv_params, bd);
+ }
+ }
+ } else {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_2d_vert_4tap_neon(
+ im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ } else {
+ highbd_dist_wtd_convolve_2d_vert_8tap_sve2(
+ im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ }
+ }
+}
diff --git a/third_party/aom/av1/common/arm/highbd_convolve_sve2.c b/third_party/aom/av1/common/arm/highbd_convolve_sve2.c
new file mode 100644
index 0000000000..82eb12fcea
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_convolve_sve2.c
@@ -0,0 +1,1720 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_ports/mem.h"
+#include "av1/common/convolve.h"
+#include "av1/common/filter.h"
+#include "av1/common/arm/highbd_convolve_sve2.h"
+
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = {
+ 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
+ 4, 5, 6, 7, 5, 6, 7, 0, 6, 7, 0, 1, 7, 0, 1, 2,
+};
+
+static INLINE uint16x4_t convolve12_4_x(
+ int16x8_t s0, int16x8_t s1, int16x8_t filter_0_7, int16x8_t filter_4_11,
+ const int64x2_t offset, uint16x8x4_t permute_tbl, uint16x4_t max) {
+ int16x8_t permuted_samples[6];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int32x4_t res0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(res0123, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t convolve12_8_x(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t filter_0_7,
+ int16x8_t filter_4_11, int64x2_t offset,
+ uint16x8x4_t permute_tbl,
+ uint16x8_t max) {
+ int16x8_t permuted_samples[8];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+ permuted_samples[6] = aom_tbl2_s16(s1, s2, permute_tbl.val[2]);
+ permuted_samples[7] = aom_tbl2_s16(s1, s2, permute_tbl.val[3]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int64x2_t sum45 =
+ aom_svdot_lane_s16(offset, permuted_samples[2], filter_0_7, 0);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[4], filter_0_7, 1);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[6], filter_4_11, 1);
+
+ int64x2_t sum67 =
+ aom_svdot_lane_s16(offset, permuted_samples[3], filter_0_7, 0);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[5], filter_0_7, 1);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[7], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve_x_sr_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, int bd) {
+ // This shim allows to do only one rounding shift instead of two.
+ const int64x2_t offset = vdupq_n_s64(1 << (conv_params->round_0 - 1));
+
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x4_t permute_tbl = vld1q_u16_x4(kDotProdTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 = vreinterpretq_u16_u64(vcombine_u64(
+ vdup_n_u64(0), vdup_n_u64(svcnth() * 0x0001000000000000ULL)));
+ permute_tbl.val[2] = vaddq_u16(permute_tbl.val[2], correction0);
+
+ uint16x8_t correction1 = vreinterpretq_u16_u64(
+ vcombine_u64(vdup_n_u64(svcnth() * 0x0001000100000000ULL),
+ vdup_n_u64(svcnth() * 0x0001000100010000ULL)));
+ permute_tbl.val[3] = vaddq_u16(permute_tbl.val[3], correction1);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ const int16_t *s = (const int16_t *)src;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x4(s, src_stride, &s0, &s2, &s4, &s6);
+ load_s16_8x4(s + 8, src_stride, &s1, &s3, &s5, &s7);
+
+ uint16x4_t d0 = convolve12_4_x(s0, s1, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x4_t d1 = convolve12_4_x(s2, s3, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x4_t d2 = convolve12_4_x(s4, s5, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x4_t d3 = convolve12_4_x(s6, s7, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11;
+ load_s16_8x4(s, src_stride, &s0, &s3, &s6, &s9);
+ load_s16_8x4(s + 8, src_stride, &s1, &s4, &s7, &s10);
+ load_s16_8x4(s + 16, src_stride, &s2, &s5, &s8, &s11);
+
+ uint16x8_t d0 = convolve12_8_x(s0, s1, s2, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x8_t d1 = convolve12_8_x(s3, s4, s5, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x8_t d2 = convolve12_8_x(s6, s7, s8, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x8_t d3 = convolve12_8_x(s9, s10, s11, y_filter_0_7,
+ y_filter_4_11, offset, permute_tbl, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE uint16x8_t convolve8_8_x(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset, uint16x8_t max) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve_x_sr_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, int bd) {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ // This shim allows to do only one rounding shift instead of two.
+ const int64_t offset = 1 << (conv_params->round_0 - 1);
+ const int64x2_t offset_lo = vcombine_s64((int64x1_t)(offset), vdup_n_s64(0));
+
+ const int16x8_t filter = vld1q_s16(y_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = convolve8_8_x(s0, filter, offset_lo, max);
+ uint16x8_t d1 = convolve8_8_x(s1, filter, offset_lo, max);
+ uint16x8_t d2 = convolve8_8_x(s2, filter, offset_lo, max);
+ uint16x8_t d3 = convolve8_8_x(s3, filter, offset_lo, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+}
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDeinterleaveTbl[8]) = {
+ 0, 2, 4, 6, 1, 3, 5, 7,
+};
+// clang-format on
+
+static INLINE uint16x4_t convolve4_4_x(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset,
+ uint16x8x2_t permute_tbl,
+ uint16x4_t max) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t convolve4_8_x(int16x8_t s0[4], int16x8_t filter,
+ int64x2_t offset, uint16x8_t tbl,
+ uint16x8_t max) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, FILTER_BITS),
+ vqrshrun_n_s32(sum2637, FILTER_BITS));
+ res = aom_tbl_u16(res, tbl);
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve_x_sr_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr,
+ ConvolveParams *conv_params, int bd) {
+ // This shim allows to do only one rounding shift instead of two.
+ const int64x2_t offset = vdupq_n_s64(1 << (conv_params->round_0 - 1));
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = convolve4_4_x(s0, filter, offset, permute_tbl, max);
+ uint16x4_t d1 = convolve4_4_x(s1, filter, offset, permute_tbl, max);
+ uint16x4_t d2 = convolve4_4_x(s2, filter, offset, permute_tbl, max);
+ uint16x4_t d3 = convolve4_4_x(s3, filter, offset, permute_tbl, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = convolve4_8_x(s0, filter, offset, idx, max);
+ uint16x8_t d1 = convolve4_8_x(s1, filter, offset, idx, max);
+ uint16x8_t d2 = convolve4_8_x(s2, filter, offset, idx, max);
+ uint16x8_t d3 = convolve4_8_x(s3, filter, offset, idx, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+void av1_highbd_convolve_x_sr_sve2(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const int subpel_x_qn,
+ ConvolveParams *conv_params, int bd) {
+ if (w == 2 || h == 2) {
+ av1_highbd_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn, conv_params, bd);
+ return;
+ }
+
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+
+ if (x_filter_taps == 6) {
+ av1_highbd_convolve_x_sr_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn, conv_params,
+ bd);
+ return;
+ }
+
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+ src -= horiz_offset;
+
+ if (x_filter_taps == 12) {
+ highbd_convolve_x_sr_12tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ x_filter_ptr, conv_params, bd);
+ return;
+ }
+
+ if (x_filter_taps == 8) {
+ highbd_convolve_x_sr_8tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ x_filter_ptr, conv_params, bd);
+ return;
+ }
+
+ highbd_convolve_x_sr_4tap_sve2(src + 2, src_stride, dst, dst_stride, w, h,
+ x_filter_ptr, conv_params, bd);
+}
+
+static INLINE uint16x4_t highbd_convolve12_4_y(int16x8_t s0[2], int16x8_t s1[2],
+ int16x8_t s2[2],
+ int16x8_t filter_0_7,
+ int16x8_t filter_4_11,
+ uint16x4_t max) {
+ int64x2_t sum[2];
+
+ sum[0] = aom_svdot_lane_s16(vdupq_n_s64(0), s0[0], filter_0_7, 0);
+ sum[0] = aom_svdot_lane_s16(sum[0], s1[0], filter_0_7, 1);
+ sum[0] = aom_svdot_lane_s16(sum[0], s2[0], filter_4_11, 1);
+
+ sum[1] = aom_svdot_lane_s16(vdupq_n_s64(0), s0[1], filter_0_7, 0);
+ sum[1] = aom_svdot_lane_s16(sum[1], s1[1], filter_0_7, 1);
+ sum[1] = aom_svdot_lane_s16(sum[1], s2[1], filter_4_11, 1);
+
+ int32x4_t res_s32 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[1]));
+
+ uint16x4_t res = vqrshrun_n_s32(res_s32, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE void highbd_convolve_y_sr_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr, int bd) {
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+
+ do {
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+ int h = height;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA;
+ load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8,
+ &s9, &sA);
+ s += 11 * src_stride;
+
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2], s4567[2], s5678[2],
+ s6789[2], s789A[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+ transpose_concat_4x4(s4, s5, s6, s7, s4567);
+ transpose_concat_4x4(s5, s6, s7, s8, s5678);
+ transpose_concat_4x4(s6, s7, s8, s9, s6789);
+ transpose_concat_4x4(s7, s8, s9, sA, s789A);
+
+ do {
+ int16x4_t sB, sC, sD, sE;
+ load_s16_4x4(s, src_stride, &sB, &sC, &sD, &sE);
+
+ int16x8_t s89AB[2], s9ABC[2], sABCD[2], sBCDE[2];
+ transpose_concat_4x4(sB, sC, sD, sE, sBCDE);
+
+ // Use the above transpose and reuse data from the previous loop to get
+ // the rest.
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[0], s89AB);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[1], s9ABC);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[2], sABCD);
+
+ uint16x4_t d0 = highbd_convolve12_4_y(s0123, s4567, s89AB, y_filter_0_7,
+ y_filter_4_11, max);
+ uint16x4_t d1 = highbd_convolve12_4_y(s1234, s5678, s9ABC, y_filter_0_7,
+ y_filter_4_11, max);
+ uint16x4_t d2 = highbd_convolve12_4_y(s2345, s6789, sABCD, y_filter_0_7,
+ y_filter_4_11, max);
+ uint16x4_t d3 = highbd_convolve12_4_y(s3456, s789A, sBCDE, y_filter_0_7,
+ y_filter_4_11, max);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s4567[0] = s89AB[0];
+ s4567[1] = s89AB[1];
+ s5678[0] = s9ABC[0];
+ s5678[1] = s9ABC[1];
+ s6789[0] = sABCD[0];
+ s6789[1] = sABCD[1];
+ s789A[0] = sBCDE[0];
+ s789A[1] = sBCDE[1];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 4;
+ dst += 4;
+ width -= 4;
+ } while (width != 0);
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ uint16x4_t max) {
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ uint16x8_t max) {
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_y_sr_8tap_sve2(const uint16_t *src, ptrdiff_t src_stride,
+ uint16_t *dst, ptrdiff_t dst_stride,
+ int width, int height,
+ const int16_t *filter_y, int bd) {
+ assert(w >= 4 && h >= 4);
+
+ const int16x8_t y_filter = vld1q_s16(filter_y);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 = highbd_convolve8_4_y(s0123, s4567, y_filter, max);
+ uint16x4_t d1 = highbd_convolve8_4_y(s1234, s5678, y_filter, max);
+ uint16x4_t d2 = highbd_convolve8_4_y(s2345, s6789, y_filter, max);
+ uint16x4_t d3 = highbd_convolve8_4_y(s3456, s789A, y_filter, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 = highbd_convolve8_8_y(s0123, s4567, y_filter, max);
+ uint16x8_t d1 = highbd_convolve8_8_y(s1234, s5678, y_filter, max);
+ uint16x8_t d2 = highbd_convolve8_8_y(s2345, s6789, y_filter, max);
+ uint16x8_t d3 = highbd_convolve8_8_y(s3456, s789A, y_filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_y(int16x8_t samples[2],
+ int16x8_t filter,
+ uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[1], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_y(int16x8_t samples[4],
+ int16x8_t filter,
+ uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[1], filter, 0);
+ int64x2_t sum45 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[2], filter, 0);
+ int64x2_t sum67 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[3], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_y_sr_4tap_sve2(const uint16_t *src, ptrdiff_t src_stride,
+ uint16_t *dst, ptrdiff_t dst_stride,
+ int width, int height,
+ const int16_t *filter_y, int bd) {
+ assert(w >= 4 && h >= 4);
+
+ const int16x8_t y_filter =
+ vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ uint16x4_t d0 = highbd_convolve4_4_y(s0123, y_filter, max);
+ uint16x4_t d1 = highbd_convolve4_4_y(s1234, y_filter, max);
+ uint16x4_t d2 = highbd_convolve4_4_y(s2345, y_filter, max);
+ uint16x4_t d3 = highbd_convolve4_4_y(s3456, y_filter, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample
+ // permute required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ uint16x8_t d0 = highbd_convolve4_8_y(s0123, y_filter, max);
+ uint16x8_t d1 = highbd_convolve4_8_y(s1234, y_filter, max);
+ uint16x8_t d2 = highbd_convolve4_8_y(s2345, y_filter, max);
+ uint16x8_t d3 = highbd_convolve4_8_y(s3456, y_filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void av1_highbd_convolve_y_sr_sve2(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_y_qn, int bd) {
+ if (w == 2 || h == 2) {
+ av1_highbd_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn, bd);
+ return;
+ }
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+ if (y_filter_taps == 6) {
+ av1_highbd_convolve_y_sr_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn, bd);
+ return;
+ }
+
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+ src -= vert_offset * src_stride;
+
+ if (y_filter_taps > 8) {
+ highbd_convolve_y_sr_12tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ y_filter_ptr, bd);
+ return;
+ }
+
+ if (y_filter_taps == 4) {
+ highbd_convolve_y_sr_4tap_sve2(src + 2 * src_stride, src_stride, dst,
+ dst_stride, w, h, y_filter_ptr, bd);
+ return;
+ }
+
+ highbd_convolve_y_sr_8tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ y_filter_ptr, bd);
+}
+
+static INLINE uint16x4_t convolve12_4_2d_h(
+ int16x8_t s0, int16x8_t s1, int16x8_t filter_0_7, int16x8_t filter_4_11,
+ const int64x2_t offset, int32x4_t shift, uint16x8x4_t permute_tbl) {
+ int16x8_t permuted_samples[6];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ return vqmovun_s32(sum0123);
+}
+
+static INLINE uint16x8_t convolve12_8_2d_h(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t filter_0_7,
+ int16x8_t filter_4_11,
+ int64x2_t offset, int32x4_t shift,
+ uint16x8x4_t permute_tbl) {
+ int16x8_t permuted_samples[8];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+ permuted_samples[6] = aom_tbl2_s16(s1, s2, permute_tbl.val[2]);
+ permuted_samples[7] = aom_tbl2_s16(s1, s2, permute_tbl.val[3]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int64x2_t sum45 =
+ aom_svdot_lane_s16(offset, permuted_samples[2], filter_0_7, 0);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[4], filter_0_7, 1);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[6], filter_4_11, 1);
+
+ int64x2_t sum67 =
+ aom_svdot_lane_s16(offset, permuted_samples[3], filter_0_7, 0);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[5], filter_0_7, 1);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[7], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ sum4567 = vqrshlq_s32(sum4567, shift);
+
+ return vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+}
+
+static INLINE void highbd_convolve_2d_sr_horiz_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, const int x_offset) {
+ const int64x2_t offset = vdupq_n_s64(x_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x4_t permute_tbl = vld1q_u16_x4(kDotProdTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 = vreinterpretq_u16_u64(vcombine_u64(
+ vdup_n_u64(0), vdup_n_u64(svcnth() * 0x0001000000000000ULL)));
+ permute_tbl.val[2] = vaddq_u16(permute_tbl.val[2], correction0);
+
+ uint16x8_t correction1 = vreinterpretq_u16_u64(
+ vcombine_u64(vdup_n_u64(svcnth() * 0x0001000100000000ULL),
+ vdup_n_u64(svcnth() * 0x0001000100010000ULL)));
+ permute_tbl.val[3] = vaddq_u16(permute_tbl.val[3], correction1);
+
+ if (width == 4) {
+ const int16_t *s = (const int16_t *)src;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x4(s, src_stride, &s0, &s2, &s4, &s6);
+ load_s16_8x4(s + 8, src_stride, &s1, &s3, &s5, &s7);
+
+ uint16x4_t d0 = convolve12_4_2d_h(s0, s1, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+ uint16x4_t d1 = convolve12_4_2d_h(s2, s3, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+ uint16x4_t d2 = convolve12_4_2d_h(s4, s5, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+ uint16x4_t d3 = convolve12_4_2d_h(s6, s7, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ dst += 4 * dst_stride;
+ s += 4 * src_stride;
+ height -= 4;
+ } while (height > 0);
+ } else {
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11;
+ load_s16_8x4(s, src_stride, &s0, &s3, &s6, &s9);
+ load_s16_8x4(s + 8, src_stride, &s1, &s4, &s7, &s10);
+ load_s16_8x4(s + 16, src_stride, &s2, &s5, &s8, &s11);
+
+ uint16x8_t d0 =
+ convolve12_8_2d_h(s0, s1, s2, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+ uint16x8_t d1 =
+ convolve12_8_2d_h(s3, s4, s5, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+ uint16x8_t d2 =
+ convolve12_8_2d_h(s6, s7, s8, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+ uint16x8_t d3 =
+ convolve12_8_2d_h(s9, s10, s11, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ }
+}
+
+static INLINE uint16x8_t convolve8_8_2d_h(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset, int32x4_t shift) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ sum4567 = vqrshlq_s32(sum4567, shift);
+
+ return vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+}
+
+static INLINE void highbd_convolve_2d_sr_horiz_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, const int x_offset) {
+ const int64x2_t offset = vdupq_n_s64(x_offset);
+ const int64x2_t offset_lo = vcombine_s64(vget_low_s64(offset), vdup_n_s64(0));
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+ const int16x8_t filter = vld1q_s16(y_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = convolve8_8_2d_h(s0, filter, offset_lo, shift);
+ uint16x8_t d1 = convolve8_8_2d_h(s1, filter, offset_lo, shift);
+ uint16x8_t d2 = convolve8_8_2d_h(s2, filter, offset_lo, shift);
+ uint16x8_t d3 = convolve8_8_2d_h(s3, filter, offset_lo, shift);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+}
+
+static INLINE uint16x4_t convolve4_4_2d_h(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset, int32x4_t shift,
+ uint16x8x2_t permute_tbl) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ return vqmovun_s32(sum0123);
+}
+
+static INLINE uint16x8_t convolve4_8_2d_h(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset, int32x4_t shift,
+ uint16x8_t tbl) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ sum4567 = vqrshlq_s32(sum4567, shift);
+
+ uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+ return aom_tbl_u16(res, tbl);
+}
+
+static INLINE void highbd_convolve_2d_sr_horiz_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr,
+ ConvolveParams *conv_params, const int x_offset) {
+ const int64x2_t offset = vdupq_n_s64(x_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ const int16_t *s = (const int16_t *)(src);
+
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = convolve4_4_2d_h(s0, filter, offset, shift, permute_tbl);
+ uint16x4_t d1 = convolve4_4_2d_h(s1, filter, offset, shift, permute_tbl);
+ uint16x4_t d2 = convolve4_4_2d_h(s2, filter, offset, shift, permute_tbl);
+ uint16x4_t d3 = convolve4_4_2d_h(s3, filter, offset, shift, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = convolve4_8_2d_h(s0, filter, offset, shift, idx);
+ uint16x8_t d1 = convolve4_8_2d_h(s1, filter, offset, shift, idx);
+ uint16x8_t d2 = convolve4_8_2d_h(s2, filter, offset, shift, idx);
+ uint16x8_t d3 = convolve4_8_2d_h(s3, filter, offset, shift, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve12_4_2d_v(
+ int16x8_t s0[2], int16x8_t s1[2], int16x8_t s2[2], int16x8_t filter_0_7,
+ int16x8_t filter_4_11, int32x4_t shift, int64x2_t offset, uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, s0[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, s1[0], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, s2[0], filter_4_11, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, s0[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, s1[1], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, s2[1], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vshlq_s32(sum0123, shift);
+
+ uint16x4_t res = vqmovun_s32(sum0123);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE void highbd_convolve_2d_sr_vert_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, int bd, const int y_offset) {
+ const int64x2_t offset = vdupq_n_s64(y_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_1);
+
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+
+ do {
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = (uint16_t *)dst;
+ int h = height;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA;
+ load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8,
+ &s9, &sA);
+ s += 11 * src_stride;
+
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2], s4567[2], s5678[2],
+ s6789[2], s789A[2];
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+ transpose_concat_4x4(s4, s5, s6, s7, s4567);
+ transpose_concat_4x4(s5, s6, s7, s8, s5678);
+ transpose_concat_4x4(s6, s7, s8, s9, s6789);
+ transpose_concat_4x4(s7, s8, s9, sA, s789A);
+
+ do {
+ int16x4_t sB, sC, sD, sE;
+ load_s16_4x4(s, src_stride, &sB, &sC, &sD, &sE);
+
+ int16x8_t s89AB[2], s9ABC[2], sABCD[2], sBCDE[2];
+ transpose_concat_4x4(sB, sC, sD, sE, sBCDE);
+
+ // Use the above transpose and reuse data from the previous loop to get
+ // the rest.
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[0], s89AB);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[1], s9ABC);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[2], sABCD);
+
+ uint16x4_t d0 = highbd_convolve12_4_2d_v(
+ s0123, s4567, s89AB, y_filter_0_7, y_filter_4_11, shift, offset, max);
+ uint16x4_t d1 = highbd_convolve12_4_2d_v(
+ s1234, s5678, s9ABC, y_filter_0_7, y_filter_4_11, shift, offset, max);
+ uint16x4_t d2 = highbd_convolve12_4_2d_v(
+ s2345, s6789, sABCD, y_filter_0_7, y_filter_4_11, shift, offset, max);
+ uint16x4_t d3 = highbd_convolve12_4_2d_v(
+ s3456, s789A, sBCDE, y_filter_0_7, y_filter_4_11, shift, offset, max);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s4567[0] = s89AB[0];
+ s4567[1] = s89AB[1];
+ s5678[0] = s9ABC[0];
+ s5678[1] = s9ABC[1];
+ s6789[0] = sABCD[0];
+ s6789[1] = sABCD[1];
+ s789A[0] = sBCDE[0];
+ s789A[1] = sBCDE[1];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 4;
+ dst += 4;
+ width -= 4;
+ } while (width != 0);
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_2d_v(
+ int16x8_t samples_lo[2], int16x8_t samples_hi[2], int16x8_t filter,
+ int32x4_t shift, int64x2_t offset, uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vshlq_s32(sum0123, shift);
+
+ uint16x4_t res = vqmovun_s32(sum0123);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_2d_v(
+ int16x8_t samples_lo[4], int16x8_t samples_hi[4], int16x8_t filter,
+ int32x4_t shift, int64x2_t offset, uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ sum0123 = vshlq_s32(sum0123, shift);
+ sum4567 = vshlq_s32(sum4567, shift);
+
+ uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_2d_sr_vert_8tap_sve2(const uint16_t *src,
+ ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, int width,
+ int height, const int16_t *filter_y,
+ ConvolveParams *conv_params, int bd,
+ const int y_offset) {
+ assert(w >= 4 && h >= 4);
+ const int64x2_t offset = vdupq_n_s64(y_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_1);
+ const int16x8_t y_filter = vld1q_s16(filter_y);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 =
+ highbd_convolve8_4_2d_v(s0123, s4567, y_filter, shift, offset, max);
+ uint16x4_t d1 =
+ highbd_convolve8_4_2d_v(s1234, s5678, y_filter, shift, offset, max);
+ uint16x4_t d2 =
+ highbd_convolve8_4_2d_v(s2345, s6789, y_filter, shift, offset, max);
+ uint16x4_t d3 =
+ highbd_convolve8_4_2d_v(s3456, s789A, y_filter, shift, offset, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 =
+ highbd_convolve8_8_2d_v(s0123, s4567, y_filter, shift, offset, max);
+ uint16x8_t d1 =
+ highbd_convolve8_8_2d_v(s1234, s5678, y_filter, shift, offset, max);
+ uint16x8_t d2 =
+ highbd_convolve8_8_2d_v(s2345, s6789, y_filter, shift, offset, max);
+ uint16x8_t d3 =
+ highbd_convolve8_8_2d_v(s3456, s789A, y_filter, shift, offset, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_2d_v(int16x8_t samples[2],
+ int16x8_t filter,
+ int32x4_t shift,
+ int64x2_t offset,
+ uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples[1], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vshlq_s32(sum0123, shift);
+
+ uint16x4_t res = vqmovun_s32(sum0123);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_2d_v(int16x8_t samples[4],
+ int16x8_t filter,
+ int32x4_t shift,
+ int64x2_t offset,
+ uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples[1], filter, 0);
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples[2], filter, 0);
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples[3], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ sum0123 = vshlq_s32(sum0123, shift);
+ sum4567 = vshlq_s32(sum4567, shift);
+
+ uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_2d_sr_vert_4tap_sve2(const uint16_t *src,
+ ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, int width,
+ int height, const int16_t *filter_y,
+ ConvolveParams *conv_params, int bd,
+ const int y_offset) {
+ assert(w >= 4 && h >= 4);
+ const int64x2_t offset = vdupq_n_s64(y_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_1);
+
+ const int16x8_t y_filter =
+ vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)(src);
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ uint16x4_t d0 =
+ highbd_convolve4_4_2d_v(s0123, y_filter, shift, offset, max);
+ uint16x4_t d1 =
+ highbd_convolve4_4_2d_v(s1234, y_filter, shift, offset, max);
+ uint16x4_t d2 =
+ highbd_convolve4_4_2d_v(s2345, y_filter, shift, offset, max);
+ uint16x4_t d3 =
+ highbd_convolve4_4_2d_v(s3456, y_filter, shift, offset, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)(src);
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample
+ // permute required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ uint16x8_t d0 =
+ highbd_convolve4_8_2d_v(s0123, y_filter, shift, offset, max);
+ uint16x8_t d1 =
+ highbd_convolve4_8_2d_v(s1234, y_filter, shift, offset, max);
+ uint16x8_t d2 =
+ highbd_convolve4_8_2d_v(s2345, y_filter, shift, offset, max);
+ uint16x8_t d3 =
+ highbd_convolve4_8_2d_v(s3456, y_filter, shift, offset, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void av1_highbd_convolve_2d_sr_sve2(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_qn,
+ const int subpel_y_qn,
+ ConvolveParams *conv_params, int bd) {
+ if (w == 2 || h == 2) {
+ av1_highbd_convolve_2d_sr_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y, subpel_x_qn,
+ subpel_y_qn, conv_params, bd);
+ return;
+ }
+
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+ if (x_filter_taps == 6 || y_filter_taps == 6) {
+ av1_highbd_convolve_2d_sr_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y,
+ subpel_x_qn, subpel_y_qn, conv_params, bd);
+ return;
+ }
+
+ const int clamped_x_taps = x_filter_taps < 4 ? 4 : x_filter_taps;
+ const int clamped_y_taps = y_filter_taps < 4 ? 4 : y_filter_taps;
+
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = clamped_y_taps / 2 - 1;
+ const int horiz_offset = clamped_x_taps / 2 - 1;
+ const int x_offset = (1 << (bd + FILTER_BITS - 1));
+ const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ // The extra shim of (1 << (conv_params->round_1 - 1)) allows us to do a
+ // simple shift left instead of a rounding saturating shift left.
+ const int y_offset =
+ (1 << (conv_params->round_1 - 1)) - (1 << (y_offset_bits - 1));
+
+ const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
+
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+ const int im_h = h + clamped_y_taps - 1;
+
+ if (x_filter_taps > 8) {
+ highbd_convolve_2d_sr_horiz_12tap_sve2(src_ptr, src_stride, im_block,
+ im_stride, w, im_h, x_filter_ptr,
+ conv_params, x_offset);
+
+ highbd_convolve_2d_sr_vert_12tap_sve2(im_block, im_stride, dst, dst_stride,
+ w, h, y_filter_ptr, conv_params, bd,
+ y_offset);
+ return;
+ }
+
+ if (x_filter_taps <= 4) {
+ highbd_convolve_2d_sr_horiz_4tap_sve2(src_ptr, src_stride, im_block,
+ im_stride, w, im_h, x_filter_ptr,
+ conv_params, x_offset);
+ } else {
+ highbd_convolve_2d_sr_horiz_8tap_sve2(src_ptr, src_stride, im_block,
+ im_stride, w, im_h, x_filter_ptr,
+ conv_params, x_offset);
+ }
+
+ if (y_filter_taps <= 4) {
+ highbd_convolve_2d_sr_vert_4tap_sve2(im_block, im_stride, dst, dst_stride,
+ w, h, y_filter_ptr, conv_params, bd,
+ y_offset);
+ } else {
+ highbd_convolve_2d_sr_vert_8tap_sve2(im_block, im_stride, dst, dst_stride,
+ w, h, y_filter_ptr, conv_params, bd,
+ y_offset);
+ }
+}
diff --git a/third_party/aom/av1/common/arm/highbd_convolve_sve2.h b/third_party/aom/av1/common/arm/highbd_convolve_sve2.h
new file mode 100644
index 0000000000..05e23deef4
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_convolve_sve2.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
+#define AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
+
+#include <arm_neon.h>
+
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdMergeBlockTbl[24]) = {
+ // Shift left and insert new last column in transposed 4x4 block.
+ 1, 2, 3, 0, 5, 6, 7, 4,
+ // Shift left and insert two new columns in transposed 4x4 block.
+ 2, 3, 0, 1, 6, 7, 4, 5,
+ // Shift left and insert three new columns in transposed 4x4 block.
+ 3, 0, 1, 2, 7, 4, 5, 6,
+};
+// clang-format on
+
+static INLINE void transpose_concat_4x4(int16x4_t s0, int16x4_t s1,
+ int16x4_t s2, int16x4_t s3,
+ int16x8_t res[2]) {
+ // Transpose 16-bit elements and concatenate result rows as follows:
+ // s0: 00, 01, 02, 03
+ // s1: 10, 11, 12, 13
+ // s2: 20, 21, 22, 23
+ // s3: 30, 31, 32, 33
+ //
+ // res[0]: 00 10 20 30 01 11 21 31
+ // res[1]: 02 12 22 32 03 13 23 33
+
+ int16x8_t s0q = vcombine_s16(s0, vdup_n_s16(0));
+ int16x8_t s1q = vcombine_s16(s1, vdup_n_s16(0));
+ int16x8_t s2q = vcombine_s16(s2, vdup_n_s16(0));
+ int16x8_t s3q = vcombine_s16(s3, vdup_n_s16(0));
+
+ int32x4_t s01 = vreinterpretq_s32_s16(vzip1q_s16(s0q, s1q));
+ int32x4_t s23 = vreinterpretq_s32_s16(vzip1q_s16(s2q, s3q));
+
+ int32x4x2_t s0123 = vzipq_s32(s01, s23);
+
+ res[0] = vreinterpretq_s16_s32(s0123.val[0]);
+ res[1] = vreinterpretq_s16_s32(s0123.val[1]);
+}
+
+static INLINE void transpose_concat_8x4(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t s3,
+ int16x8_t res[4]) {
+ // Transpose 16-bit elements and concatenate result rows as follows:
+ // s0: 00, 01, 02, 03, 04, 05, 06, 07
+ // s1: 10, 11, 12, 13, 14, 15, 16, 17
+ // s2: 20, 21, 22, 23, 24, 25, 26, 27
+ // s3: 30, 31, 32, 33, 34, 35, 36, 37
+ //
+ // res[0]: 00 10 20 30 01 11 21 31
+ // res[1]: 02 12 22 32 03 13 23 33
+ // res[2]: 04 14 24 34 05 15 25 35
+ // res[3]: 06 16 26 36 07 17 27 37
+
+ int16x8x2_t tr01_16 = vzipq_s16(s0, s1);
+ int16x8x2_t tr23_16 = vzipq_s16(s2, s3);
+ int32x4x2_t tr01_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[0]),
+ vreinterpretq_s32_s16(tr23_16.val[0]));
+ int32x4x2_t tr23_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[1]),
+ vreinterpretq_s32_s16(tr23_16.val[1]));
+
+ res[0] = vreinterpretq_s16_s32(tr01_32.val[0]);
+ res[1] = vreinterpretq_s16_s32(tr01_32.val[1]);
+ res[2] = vreinterpretq_s16_s32(tr23_32.val[0]);
+ res[3] = vreinterpretq_s16_s32(tr23_32.val[1]);
+}
+
+static INLINE void aom_tbl2x4_s16(int16x8_t t0[4], int16x8_t t1[4],
+ uint16x8_t tbl, int16x8_t res[4]) {
+ res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+ res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+ res[2] = aom_tbl2_s16(t0[2], t1[2], tbl);
+ res[3] = aom_tbl2_s16(t0[3], t1[3], tbl);
+}
+
+static INLINE void aom_tbl2x2_s16(int16x8_t t0[2], int16x8_t t1[2],
+ uint16x8_t tbl, int16x8_t res[2]) {
+ res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+ res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+}
+
+#endif // AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c
index c6f1e3ad92..89647bc921 100644
--- a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c
+++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c
@@ -23,8 +23,8 @@
#include "config/av1_rtcd.h"
#include "highbd_warp_plane_neon.h"
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[4];
load_filters_4(f, sx, alpha);
@@ -57,8 +57,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[8];
load_filters_8(f, sx, alpha);
@@ -111,8 +111,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -144,8 +144,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -197,7 +197,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -213,7 +214,8 @@ static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
return m0123;
}
-static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -238,8 +240,8 @@ static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
return (int32x4x2_t){ { m0123, m4567 } };
}
-static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]),
@@ -262,8 +264,8 @@ static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
return horizontal_add_4d_s32x4(m0123);
}
-static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0 = tmp[0];
int16x8_t s1 = tmp[1];
int16x8_t s2 = tmp[2];
diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h
index 3b8982898e..48af4a707b 100644
--- a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h
+++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h
@@ -23,29 +23,31 @@
#include "av1/common/warped_motion.h"
#include "config/av1_rtcd.h"
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
- int sx, int alpha);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha);
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
- int sx, int alpha);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha);
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
- int sx);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx);
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
- int sx);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx);
-static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy);
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
+ int sy);
-static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy);
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
+ int sy);
-static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
- int gamma);
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
+ int sy, int gamma);
-static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy,
- int gamma);
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
+ int sy, int gamma);
-static INLINE int16x8_t load_filters_1(int ofs) {
+static AOM_FORCE_INLINE int16x8_t load_filters_1(int ofs) {
const int ofs0 = ROUND_POWER_OF_TWO(ofs, WARPEDDIFF_PREC_BITS);
const int16_t *base =
@@ -53,7 +55,8 @@ static INLINE int16x8_t load_filters_1(int ofs) {
return vld1q_s16(base + ofs0 * 8);
}
-static INLINE void load_filters_4(int16x8_t out[], int ofs, int stride) {
+static AOM_FORCE_INLINE void load_filters_4(int16x8_t out[], int ofs,
+ int stride) {
const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS);
const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS);
const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS);
@@ -67,7 +70,8 @@ static INLINE void load_filters_4(int16x8_t out[], int ofs, int stride) {
out[3] = vld1q_s16(base + ofs3 * 8);
}
-static INLINE void load_filters_8(int16x8_t out[], int ofs, int stride) {
+static AOM_FORCE_INLINE void load_filters_8(int16x8_t out[], int ofs,
+ int stride) {
const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS);
const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS);
const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS);
@@ -89,16 +93,18 @@ static INLINE void load_filters_8(int16x8_t out[], int ofs, int stride) {
out[7] = vld1q_s16(base + ofs7 * 8);
}
-static INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val, int bd) {
+static AOM_FORCE_INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val,
+ int bd) {
const int limit = (1 << bd) - 1;
return vqmovun_s32(vminq_s32(val, vdupq_n_s32(limit)));
}
-static INLINE void warp_affine_horizontal(const uint16_t *ref, int width,
- int height, int stride, int p_width,
- int16_t alpha, int16_t beta, int iy4,
- int sx4, int ix4, int16x8_t tmp[],
- int bd) {
+static AOM_FORCE_INLINE void warp_affine_horizontal(const uint16_t *ref,
+ int width, int height,
+ int stride, int p_width,
+ int16_t alpha, int16_t beta,
+ int iy4, int sx4, int ix4,
+ int16x8_t tmp[], int bd) {
const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
if (ix4 <= -7) {
@@ -197,7 +203,7 @@ static INLINE void warp_affine_horizontal(const uint16_t *ref, int width,
}
}
-static INLINE void highbd_vertical_filter_4x1_f4(
+static AOM_FORCE_INLINE void highbd_vertical_filter_4x1_f4(
uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride,
bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd,
int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) {
@@ -253,7 +259,7 @@ static INLINE void highbd_vertical_filter_4x1_f4(
vst1_u16(dst16, res0);
}
-static INLINE void highbd_vertical_filter_8x1_f8(
+static AOM_FORCE_INLINE void highbd_vertical_filter_8x1_f8(
uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride,
bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd,
int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) {
@@ -328,7 +334,7 @@ static INLINE void highbd_vertical_filter_8x1_f8(
vst1_u16(dst16 + 4, res1);
}
-static INLINE void warp_affine_vertical(
+static AOM_FORCE_INLINE void warp_affine_vertical(
uint16_t *pred, int p_width, int p_height, int p_stride, int bd,
uint16_t *dst, int dst_stride, bool is_compound, bool do_average,
bool use_dist_wtd_comp_avg, int fwd, int bwd, int16_t gamma, int16_t delta,
@@ -354,7 +360,7 @@ static INLINE void warp_affine_vertical(
}
}
-static INLINE void highbd_warp_affine_common(
+static AOM_FORCE_INLINE void highbd_warp_affine_common(
const int32_t *mat, const uint16_t *ref, int width, int height, int stride,
uint16_t *pred, int p_col, int p_row, int p_width, int p_height,
int p_stride, int subsampling_x, int subsampling_y, int bd,
diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c b/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c
index 7a14f21846..87e033fd00 100644
--- a/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c
+++ b/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c
@@ -15,7 +15,7 @@
#include <arm_neon_sve_bridge.h>
#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/arm/transpose_neon.h"
#include "aom_ports/mem.h"
@@ -24,8 +24,8 @@
#include "config/av1_rtcd.h"
#include "highbd_warp_plane_neon.h"
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[4];
load_filters_4(f, sx, alpha);
@@ -55,8 +55,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[8];
load_filters_8(f, sx, alpha);
@@ -103,8 +103,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -133,8 +133,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -180,7 +180,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -197,7 +198,8 @@ static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
return m0123;
}
-static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -223,8 +225,8 @@ static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
return (int32x4x2_t){ { m0123, m4567 } };
}
-static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]),
@@ -244,8 +246,8 @@ static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
return vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
}
-static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0 = tmp[0];
int16x8_t s1 = tmp[1];
int16x8_t s2 = tmp[2];
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.c b/third_party/aom/av1/common/arm/warp_plane_neon.c
index 4723154398..546aa2965b 100644
--- a/third_party/aom/av1/common/arm/warp_plane_neon.c
+++ b/third_party/aom/av1/common/arm/warp_plane_neon.c
@@ -11,8 +11,8 @@
#include "warp_plane_neon.h"
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -39,8 +39,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -75,7 +75,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -101,7 +102,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -135,8 +137,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy) {
int16x4_t s0 = vget_low_s16(src[0]);
int16x4_t s1 = vget_low_s16(src[1]);
int16x4_t s2 = vget_low_s16(src[2]);
@@ -161,8 +163,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
*res = m0123;
}
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
@@ -186,9 +189,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
*res = horizontal_add_4d_s32x4(m0123_pairs);
}
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
@@ -223,10 +227,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
*res_high = m4567;
}
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.h b/third_party/aom/av1/common/arm/warp_plane_neon.h
index 5afd72f4ab..eece007ef3 100644
--- a/third_party/aom/av1/common/arm/warp_plane_neon.h
+++ b/third_party/aom/av1/common/arm/warp_plane_neon.h
@@ -24,32 +24,37 @@
#include "av1/common/warped_motion.h"
#include "av1/common/scale.h"
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha);
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha);
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx);
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx);
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy);
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy);
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma);
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma);
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy);
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy);
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma);
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma);
-static INLINE void load_filters_4(int16x8_t out[], int offset, int stride) {
+static AOM_FORCE_INLINE void load_filters_4(int16x8_t out[], int offset,
+ int stride) {
out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >>
WARPEDDIFF_PREC_BITS)));
out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >>
@@ -60,7 +65,8 @@ static INLINE void load_filters_4(int16x8_t out[], int offset, int stride) {
WARPEDDIFF_PREC_BITS)));
}
-static INLINE void load_filters_8(int16x8_t out[], int offset, int stride) {
+static AOM_FORCE_INLINE void load_filters_8(int16x8_t out[], int offset,
+ int stride) {
out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >>
WARPEDDIFF_PREC_BITS)));
out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >>
@@ -79,16 +85,14 @@ static INLINE void load_filters_8(int16x8_t out[], int offset, int stride) {
WARPEDDIFF_PREC_BITS)));
}
-static INLINE int clamp_iy(int iy, int height) {
+static AOM_FORCE_INLINE int clamp_iy(int iy, int height) {
return clamp(iy, 0, height - 1);
}
-static INLINE void warp_affine_horizontal(const uint8_t *ref, int width,
- int height, int stride, int p_width,
- int p_height, int16_t alpha,
- int16_t beta, const int64_t x4,
- const int64_t y4, const int i,
- int16x8_t tmp[]) {
+static AOM_FORCE_INLINE void warp_affine_horizontal(
+ const uint8_t *ref, int width, int height, int stride, int p_width,
+ int p_height, int16_t alpha, int16_t beta, const int64_t x4,
+ const int64_t y4, const int i, int16x8_t tmp[]) {
const int bd = 8;
const int reduce_bits_horiz = ROUND0_BITS;
const int height_limit = AOMMIN(8, p_height - i) + 7;
@@ -197,7 +201,7 @@ static INLINE void warp_affine_horizontal(const uint8_t *ref, int width,
}
}
-static INLINE void warp_affine_vertical(
+static AOM_FORCE_INLINE void warp_affine_vertical(
uint8_t *pred, int p_width, int p_height, int p_stride, int is_compound,
uint16_t *dst, int dst_stride, int do_average, int use_dist_wtd_comp_avg,
int16_t gamma, int16_t delta, const int64_t y4, const int i, const int j,
@@ -325,7 +329,7 @@ static INLINE void warp_affine_vertical(
}
}
-static INLINE void av1_warp_affine_common(
+static AOM_FORCE_INLINE void av1_warp_affine_common(
const int32_t *mat, const uint8_t *ref, int width, int height, int stride,
uint8_t *pred, int p_col, int p_row, int p_width, int p_height,
int p_stride, int subsampling_x, int subsampling_y,
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c
index 39e3ad99f4..22a1be17b5 100644
--- a/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c
+++ b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c
@@ -17,8 +17,8 @@ DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
};
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -45,8 +45,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -83,7 +83,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -112,7 +113,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -149,8 +151,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy) {
int16x4_t s0 = vget_low_s16(src[0]);
int16x4_t s1 = vget_low_s16(src[1]);
int16x4_t s2 = vget_low_s16(src[2]);
@@ -175,8 +177,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
*res = m0123;
}
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
@@ -200,9 +203,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
*res = horizontal_add_4d_s32x4(m0123_pairs);
}
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
@@ -237,10 +241,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
*res_high = m4567;
}
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
diff --git a/third_party/aom/av1/common/arm/warp_plane_sve.c b/third_party/aom/av1/common/arm/warp_plane_sve.c
index 8a4bf5747b..c70b066174 100644
--- a/third_party/aom/av1/common/arm/warp_plane_sve.c
+++ b/third_party/aom/av1/common/arm/warp_plane_sve.c
@@ -11,7 +11,7 @@
#include <arm_neon.h>
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "warp_plane_neon.h"
DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
@@ -20,8 +20,8 @@ DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
};
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -48,8 +48,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -86,7 +86,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -115,7 +116,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -152,8 +154,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy) {
int16x4_t s0 = vget_low_s16(src[0]);
int16x4_t s1 = vget_low_s16(src[1]);
int16x4_t s2 = vget_low_s16(src[2]);
@@ -178,8 +180,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
*res = m0123;
}
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
@@ -200,9 +203,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
*res = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
}
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
@@ -237,10 +241,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
*res_high = m4567;
}
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];