summaryrefslogtreecommitdiffstats
path: root/third_party/aom/av1/common
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/aom/av1/common')
-rw-r--r--third_party/aom/av1/common/alloccommon.c6
-rw-r--r--third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c532
-rw-r--r--third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h293
-rw-r--r--third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c1555
-rw-r--r--third_party/aom/av1/common/arm/highbd_convolve_sve2.c1720
-rw-r--r--third_party/aom/av1/common/arm/highbd_convolve_sve2.h97
-rw-r--r--third_party/aom/av1/common/arm/highbd_warp_plane_neon.c30
-rw-r--r--third_party/aom/av1/common/arm/highbd_warp_plane_neon.h60
-rw-r--r--third_party/aom/av1/common/arm/highbd_warp_plane_sve.c32
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon.c38
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon.h60
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c38
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_sve.c40
-rw-r--r--third_party/aom/av1/common/av1_common_int.h2
-rw-r--r--third_party/aom/av1/common/av1_rtcd_defs.pl54
-rw-r--r--third_party/aom/av1/common/cdef.c13
-rw-r--r--third_party/aom/av1/common/entropymode.h9
-rw-r--r--third_party/aom/av1/common/quant_common.c18
-rw-r--r--third_party/aom/av1/common/reconintra.c6
-rw-r--r--third_party/aom/av1/common/resize.c52
-rw-r--r--third_party/aom/av1/common/resize.h44
-rw-r--r--third_party/aom/av1/common/restoration.c35
-rw-r--r--third_party/aom/av1/common/thread_common.c7
-rw-r--r--third_party/aom/av1/common/thread_common.h1
-rw-r--r--third_party/aom/av1/common/tile_common.c61
-rw-r--r--third_party/aom/av1/common/tile_common.h15
-rw-r--r--third_party/aom/av1/common/x86/cdef_block_sse2.c40
-rw-r--r--third_party/aom/av1/common/x86/cdef_block_ssse3.c11
-rw-r--r--third_party/aom/av1/common/x86/convolve_2d_avx2.c18
-rw-r--r--third_party/aom/av1/common/x86/convolve_2d_sse2.c17
-rw-r--r--third_party/aom/av1/common/x86/convolve_sse2.c26
-rw-r--r--third_party/aom/av1/common/x86/jnt_convolve_sse2.c229
32 files changed, 4280 insertions, 879 deletions
diff --git a/third_party/aom/av1/common/alloccommon.c b/third_party/aom/av1/common/alloccommon.c
index 2a9a8beb40..e9a38c4a60 100644
--- a/third_party/aom/av1/common/alloccommon.c
+++ b/third_party/aom/av1/common/alloccommon.c
@@ -13,6 +13,8 @@
#include "config/aom_config.h"
#include "aom_mem/aom_mem.h"
+#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/alloccommon.h"
#include "av1/common/av1_common_int.h"
@@ -20,6 +22,8 @@
#include "av1/common/cdef_block.h"
#include "av1/common/entropymode.h"
#include "av1/common/entropymv.h"
+#include "av1/common/enums.h"
+#include "av1/common/restoration.h"
#include "av1/common/thread_common.h"
int av1_get_MBs(int width, int height) {
@@ -200,7 +204,7 @@ void av1_alloc_cdef_buffers(AV1_COMMON *const cm,
const int is_num_workers_changed =
cdef_info->allocated_num_workers != num_workers;
const int is_cdef_enabled =
- cm->seq_params->enable_cdef && !cm->tiles.large_scale;
+ cm->seq_params->enable_cdef && !cm->tiles.single_tile_decoding;
// num-bufs=3 represents ping-pong buffers for top linebuf,
// followed by bottom linebuf.
diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c
index fc03a2ee04..9247ded6bf 100644
--- a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c
+++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c
@@ -20,266 +20,9 @@
#include "aom_ports/mem.h"
#include "av1/common/convolve.h"
#include "av1/common/filter.h"
+#include "av1/common/arm/highbd_compound_convolve_neon.h"
#include "av1/common/arm/highbd_convolve_neon.h"
-#define ROUND_SHIFT 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS
-
-static INLINE void highbd_12_comp_avg_neon(const uint16_t *src_ptr,
- int src_stride, uint16_t *dst_ptr,
- int dst_stride, int w, int h,
- ConvolveParams *conv_params,
- const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint16x4_t offset_vec = vdup_n_u16(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
-
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint16x4_t avg = vhadd_u16(src, ref);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint16x8_t avg = vhaddq_u16(s, r);
- int32x4_t d0_lo =
- vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
- int32x4_t d0_hi =
- vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
-
- uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT - 2),
- vqrshrun_n_s32(d0_hi, ROUND_SHIFT - 2));
- d0 = vminq_u16(d0, max);
- vst1q_u16(dst, d0);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- }
-}
-
-static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride,
- uint16_t *dst_ptr, int dst_stride,
- int w, int h,
- ConvolveParams *conv_params,
- const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint16x4_t offset_vec = vdup_n_u16(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
-
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint16x4_t avg = vhadd_u16(src, ref);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint16x8_t avg = vhaddq_u16(s, r);
- int32x4_t d0_lo =
- vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
- int32x4_t d0_hi =
- vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
-
- uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT),
- vqrshrun_n_s32(d0_hi, ROUND_SHIFT));
- d0 = vminq_u16(d0, max);
- vst1q_u16(dst, d0);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- }
-}
-
-static INLINE void highbd_12_dist_wtd_comp_avg_neon(
- const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
- int w, int h, ConvolveParams *conv_params, const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint32x4_t offset_vec = vdupq_n_u32(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
- uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
- uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
-
- // Weighted averaging
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
- wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
- wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
- wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
- wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
-
- uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
- wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
- wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
- int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
-
- uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT - 2),
- vqrshrun_n_s32(d1, ROUND_SHIFT - 2));
- d01 = vminq_u16(d01, max);
- vst1q_u16(dst, d01);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- }
-}
-
-static INLINE void highbd_dist_wtd_comp_avg_neon(
- const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
- int w, int h, ConvolveParams *conv_params, const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint32x4_t offset_vec = vdupq_n_u32(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
- uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
- uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
-
- // Weighted averaging
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
- wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
- wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
- wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
- wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
-
- uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
- wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
- wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
- int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
-
- uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT),
- vqrshrun_n_s32(d1, ROUND_SHIFT));
- d01 = vminq_u16(d01, max);
- vst1q_u16(dst, d01);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- }
-}
-
static INLINE uint16x4_t highbd_12_convolve6_4(
const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
@@ -743,9 +486,6 @@ void av1_highbd_dist_wtd_convolve_x_neon(
const int im_stride = MAX_SB_SIZE;
const int horiz_offset = filter_params_x->taps / 2 - 1;
assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
- const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
- const int offset_avg = (1 << (offset_bits - conv_params->round_1)) +
- (1 << (offset_bits - conv_params->round_1 - 1));
const int offset_convolve = (1 << (conv_params->round_0 - 1)) +
(1 << (bd + FILTER_BITS)) +
(1 << (bd + FILTER_BITS - 1));
@@ -768,10 +508,10 @@ void av1_highbd_dist_wtd_convolve_x_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
- w, h, conv_params, offset_avg, bd);
+ w, h, conv_params);
} else {
highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, offset_avg, bd);
+ conv_params);
}
} else {
if (x_filter_taps <= 6 && w != 4) {
@@ -795,10 +535,10 @@ void av1_highbd_dist_wtd_convolve_x_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
- h, conv_params, offset_avg, bd);
+ h, conv_params, bd);
} else {
highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, offset_avg, bd);
+ conv_params, bd);
}
} else {
if (x_filter_taps <= 6 && w != 4) {
@@ -971,6 +711,212 @@ static INLINE void highbd_dist_wtd_convolve_y_6tap_neon(
}
}
+static INLINE uint16x4_t highbd_12_convolve4_4(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ return vqshrun_n_s32(sum, ROUND0_BITS + 2);
+}
+
+static INLINE uint16x8_t highbd_12_convolve4_8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS + 2),
+ vqshrun_n_s32(sum1, ROUND0_BITS + 2));
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_y_4tap_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, const int16_t *y_filter_ptr, const int offset) {
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+ const int32x4_t offset_vec = vdupq_n_s32(offset);
+
+ if (w == 4) {
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 =
+ highbd_12_convolve4_4(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x4_t d1 =
+ highbd_12_convolve4_4(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x4_t d2 =
+ highbd_12_convolve4_4(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x4_t d3 =
+ highbd_12_convolve4_4(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ } else {
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 =
+ highbd_12_convolve4_8(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x8_t d1 =
+ highbd_12_convolve4_8(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x8_t d2 =
+ highbd_12_convolve4_8(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x8_t d3 =
+ highbd_12_convolve4_8(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ return vqshrun_n_s32(sum, ROUND0_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS),
+ vqshrun_n_s32(sum1, ROUND0_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_y_4tap_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, const int16_t *y_filter_ptr, const int offset) {
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+ const int32x4_t offset_vec = vdupq_n_s32(offset);
+
+ if (w == 4) {
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 = highbd_convolve4_4(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x4_t d1 = highbd_convolve4_4(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x4_t d2 = highbd_convolve4_4(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x4_t d3 = highbd_convolve4_4(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ } else {
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 =
+ highbd_convolve4_8(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x8_t d1 =
+ highbd_convolve4_8(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x8_t d2 =
+ highbd_convolve4_8(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x8_t d3 =
+ highbd_convolve4_8(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w != 0);
+ }
+}
+
static INLINE void highbd_12_dist_wtd_convolve_y_8tap_neon(
const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
int w, int h, const int16_t *y_filter_ptr, const int offset) {
@@ -1148,9 +1094,6 @@ void av1_highbd_dist_wtd_convolve_y_neon(
const int im_stride = MAX_SB_SIZE;
const int vert_offset = filter_params_y->taps / 2 - 1;
assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
- const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
- const int round_offset_avg = (1 << (offset_bits - conv_params->round_1)) +
- (1 << (offset_bits - conv_params->round_1 - 1));
const int round_offset_conv = (1 << (conv_params->round_0 - 1)) +
(1 << (bd + FILTER_BITS)) +
(1 << (bd + FILTER_BITS - 1));
@@ -1162,7 +1105,11 @@ void av1_highbd_dist_wtd_convolve_y_neon(
if (bd == 12) {
if (conv_params->do_average) {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_y_4tap_neon(
+ src + 2 * src_stride, src_stride, im_block, im_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_12_dist_wtd_convolve_y_6tap_neon(
src + src_stride, src_stride, im_block, im_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1173,14 +1120,17 @@ void av1_highbd_dist_wtd_convolve_y_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
- w, h, conv_params, round_offset_avg,
- bd);
+ w, h, conv_params);
} else {
highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params);
}
} else {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_y_4tap_neon(
+ src + 2 * src_stride, src_stride, dst16, dst16_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_12_dist_wtd_convolve_y_6tap_neon(
src + src_stride, src_stride, dst16, dst16_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1192,7 +1142,11 @@ void av1_highbd_dist_wtd_convolve_y_neon(
}
} else {
if (conv_params->do_average) {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_y_4tap_neon(src + 2 * src_stride, src_stride,
+ im_block, im_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride,
im_block, im_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1203,13 +1157,17 @@ void av1_highbd_dist_wtd_convolve_y_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
- h, conv_params, round_offset_avg, bd);
+ h, conv_params, bd);
} else {
highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params, bd);
}
} else {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_y_4tap_neon(src + 2 * src_stride, src_stride,
+ dst16, dst16_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride,
dst16, dst16_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1285,18 +1243,18 @@ void av1_highbd_dist_wtd_convolve_2d_copy_neon(const uint16_t *src,
if (conv_params->use_dist_wtd_comp_avg) {
if (bd == 12) {
highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
- w, h, conv_params, round_offset, bd);
+ w, h, conv_params);
} else {
highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
- h, conv_params, round_offset, bd);
+ h, conv_params, bd);
}
} else {
if (bd == 12) {
highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset, bd);
+ conv_params);
} else {
highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset, bd);
+ conv_params, bd);
}
}
}
@@ -1949,9 +1907,6 @@ void av1_highbd_dist_wtd_convolve_2d_neon(
(1 << (bd + FILTER_BITS - 1)) + (1 << (conv_params->round_0 - 1));
const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
const int round_offset_conv_y = (1 << y_offset_bits);
- const int round_offset_avg =
- ((1 << (y_offset_bits - conv_params->round_1)) +
- (1 << (y_offset_bits - conv_params->round_1 - 1)));
const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
@@ -2012,19 +1967,18 @@ void av1_highbd_dist_wtd_convolve_2d_neon(
if (conv_params->use_dist_wtd_comp_avg) {
if (bd == 12) {
highbd_12_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride,
- w, h, conv_params, round_offset_avg,
- bd);
+ w, h, conv_params);
} else {
highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w,
- h, conv_params, round_offset_avg, bd);
+ h, conv_params, bd);
}
} else {
if (bd == 12) {
highbd_12_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params);
} else {
highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params, bd);
}
}
}
diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h
new file mode 100644
index 0000000000..c9344f3adf
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_ports/mem.h"
+
+#define ROUND_SHIFT 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS
+
+static INLINE void highbd_12_comp_avg_neon(const uint16_t *src_ptr,
+ int src_stride, uint16_t *dst_ptr,
+ int dst_stride, int w, int h,
+ ConvolveParams *conv_params) {
+ const int offset_bits = 12 + 2 * FILTER_BITS - ROUND0_BITS - 2;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint16x4_t offset_vec = vdup_n_u16((uint16_t)offset);
+ const uint16x8_t max = vdupq_n_u16((1 << 12) - 1);
+
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint16x4_t avg = vhadd_u16(src, ref);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint16x8_t avg = vhaddq_u16(s, r);
+ int32x4_t d0_lo =
+ vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
+ int32x4_t d0_hi =
+ vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
+
+ uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT - 2),
+ vqrshrun_n_s32(d0_hi, ROUND_SHIFT - 2));
+ d0 = vminq_u16(d0, max);
+ vst1q_u16(dst, d0);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride,
+ uint16_t *dst_ptr, int dst_stride,
+ int w, int h,
+ ConvolveParams *conv_params,
+ const int bd) {
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint16x4_t offset_vec = vdup_n_u16((uint16_t)offset);
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint16x4_t avg = vhadd_u16(src, ref);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint16x8_t avg = vhaddq_u16(s, r);
+ int32x4_t d0_lo =
+ vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
+ int32x4_t d0_hi =
+ vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
+
+ uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT),
+ vqrshrun_n_s32(d0_hi, ROUND_SHIFT));
+ d0 = vminq_u16(d0, max);
+ vst1q_u16(dst, d0);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void highbd_12_dist_wtd_comp_avg_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, ConvolveParams *conv_params) {
+ const int offset_bits = 12 + 2 * FILTER_BITS - ROUND0_BITS - 2;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint32x4_t offset_vec = vdupq_n_u32(offset);
+ const uint16x8_t max = vdupq_n_u16((1 << 12) - 1);
+ uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
+ uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
+
+ // Weighted averaging
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
+ wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
+ wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
+ wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
+ wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
+
+ uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
+ wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
+ wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
+ int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
+
+ uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT - 2),
+ vqrshrun_n_s32(d1, ROUND_SHIFT - 2));
+ d01 = vminq_u16(d01, max);
+ vst1q_u16(dst, d01);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void highbd_dist_wtd_comp_avg_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, ConvolveParams *conv_params, const int bd) {
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint32x4_t offset_vec = vdupq_n_u32(offset);
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
+ uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
+
+ // Weighted averaging
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
+ wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
+ wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
+ wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
+ wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
+
+ uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
+ wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
+ wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
+ int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
+
+ uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT),
+ vqrshrun_n_s32(d1, ROUND_SHIFT));
+ d01 = vminq_u16(d01, max);
+ vst1q_u16(dst, d01);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ }
+}
diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c b/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c
new file mode 100644
index 0000000000..1d6c9b4faf
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c
@@ -0,0 +1,1555 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_ports/mem.h"
+#include "av1/common/convolve.h"
+#include "av1/common/filter.h"
+#include "av1/common/filter.h"
+#include "av1/common/arm/highbd_compound_convolve_neon.h"
+#include "av1/common/arm/highbd_convolve_neon.h"
+#include "av1/common/arm/highbd_convolve_sve2.h"
+
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = {
+ 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
+ 4, 5, 6, 7, 5, 6, 7, 0, 6, 7, 0, 1, 7, 0, 1, 2,
+};
+
+static INLINE uint16x8_t highbd_12_convolve8_8_x(int16x8_t s0[8],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS + 2),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS + 2));
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_x_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x1_t offset_vec =
+ vcreate_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1)));
+ const int64x2_t offset_lo = vcombine_s64(offset_vec, vdup_n_s64(0));
+
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset_lo);
+ uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset_lo);
+ uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset_lo);
+ uint16x8_t d3 = highbd_12_convolve8_8_x(s3, filter, offset_lo);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_x(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_x_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x1_t offset_vec =
+ vcreate_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)));
+ const int64x2_t offset_lo = vcombine_s64(offset_vec, vdup_n_s64(0));
+
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset_lo);
+ uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset_lo);
+ uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset_lo);
+ uint16x8_t d3 = highbd_convolve8_8_x(s3, filter, offset_lo);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+}
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDeinterleaveTbl[8]) = {
+ 0, 2, 4, 6, 1, 3, 5, 7,
+};
+// clang-format on
+
+static INLINE uint16x4_t highbd_12_convolve4_4_x(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset,
+ uint16x8x2_t permute_tbl) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS + 2);
+}
+
+static INLINE uint16x8_t highbd_12_convolve4_8_x(int16x8_t s0[4],
+ int16x8_t filter,
+ int64x2_t offset,
+ uint16x8_t tbl) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, ROUND0_BITS + 2),
+ vqrshrun_n_s32(sum2637, ROUND0_BITS + 2));
+ return aom_tbl_u16(res, tbl);
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_x_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1)));
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_12_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_12_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_x(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset,
+ uint16x8x2_t permute_tbl) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_x(int16x8_t s0[4], int16x8_t filter,
+ int64x2_t offset,
+ uint16x8_t tbl) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, ROUND0_BITS),
+ vqrshrun_n_s32(sum2637, ROUND0_BITS));
+ return aom_tbl_u16(res, tbl);
+}
+
+static INLINE void highbd_dist_wtd_convolve_x_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)));
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+void av1_highbd_dist_wtd_convolve_x_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params, int bd) {
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+
+ if (x_filter_taps == 6) {
+ av1_highbd_dist_wtd_convolve_x_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn,
+ conv_params, bd);
+ return;
+ }
+
+ int dst16_stride = conv_params->dst_stride;
+ const int im_stride = MAX_SB_SIZE;
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
+
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+ src -= horiz_offset;
+
+ if (bd == 12) {
+ if (conv_params->do_average) {
+ if (x_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_x_4tap_sve2(src + 2, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr);
+ } else {
+ highbd_12_dist_wtd_convolve_x_8tap_sve2(src, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr);
+ }
+
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
+ w, h, conv_params);
+
+ } else {
+ highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params);
+ }
+ } else {
+ if (x_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_x_4tap_sve2(
+ src + 2, src_stride, dst16, dst16_stride, w, h, x_filter_ptr);
+ } else {
+ highbd_12_dist_wtd_convolve_x_8tap_sve2(
+ src, src_stride, dst16, dst16_stride, w, h, x_filter_ptr);
+ }
+ }
+ } else {
+ if (conv_params->do_average) {
+ if (x_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_x_4tap_sve2(src + 2, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr, bd);
+ } else {
+ highbd_dist_wtd_convolve_x_8tap_sve2(src, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr, bd);
+ }
+
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
+ h, conv_params, bd);
+ } else {
+ highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params, bd);
+ }
+ } else {
+ if (x_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_x_4tap_sve2(
+ src + 2, src_stride, dst16, dst16_stride, w, h, x_filter_ptr, bd);
+ } else {
+ highbd_dist_wtd_convolve_x_8tap_sve2(
+ src, src_stride, dst16, dst16_stride, w, h, x_filter_ptr, bd);
+ }
+ }
+ }
+}
+
+static INLINE uint16x4_t highbd_12_convolve8_4_y(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS + 2);
+}
+
+static INLINE uint16x8_t highbd_12_convolve8_8_y(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS + 2),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS + 2));
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_y_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1)));
+ const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ int16_t *s = (int16_t *)src;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 = highbd_12_convolve8_4_y(s0123, s4567, y_filter, offset);
+ uint16x4_t d1 = highbd_12_convolve8_4_y(s1234, s5678, y_filter, offset);
+ uint16x4_t d2 = highbd_12_convolve8_4_y(s2345, s6789, y_filter, offset);
+ uint16x4_t d3 = highbd_12_convolve8_4_y(s3456, s789A, y_filter, offset);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_y(s0123, s4567, y_filter, offset);
+ uint16x8_t d1 = highbd_12_convolve8_8_y(s1234, s5678, y_filter, offset);
+ uint16x8_t d2 = highbd_12_convolve8_8_y(s2345, s6789, y_filter, offset);
+ uint16x8_t d3 = highbd_12_convolve8_8_y(s3456, s789A, y_filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_y_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr, const int bd) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)));
+ const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ int16_t *s = (int16_t *)src;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 = highbd_convolve8_4_y(s0123, s4567, y_filter, offset);
+ uint16x4_t d1 = highbd_convolve8_4_y(s1234, s5678, y_filter, offset);
+ uint16x4_t d2 = highbd_convolve8_4_y(s2345, s6789, y_filter, offset);
+ uint16x4_t d3 = highbd_convolve8_4_y(s3456, s789A, y_filter, offset);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 = highbd_convolve8_8_y(s0123, s4567, y_filter, offset);
+ uint16x8_t d1 = highbd_convolve8_8_y(s1234, s5678, y_filter, offset);
+ uint16x8_t d2 = highbd_convolve8_8_y(s2345, s6789, y_filter, offset);
+ uint16x8_t d3 = highbd_convolve8_8_y(s3456, s789A, y_filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void av1_highbd_dist_wtd_convolve_y_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn,
+ ConvolveParams *conv_params, int bd) {
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+ if (y_filter_taps != 8) {
+ av1_highbd_dist_wtd_convolve_y_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn,
+ conv_params, bd);
+ return;
+ }
+
+ int dst16_stride = conv_params->dst_stride;
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+ assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
+
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+ src -= vert_offset * src_stride;
+
+ if (bd == 12) {
+ if (conv_params->do_average) {
+ highbd_12_dist_wtd_convolve_y_8tap_sve2(src, src_stride, im_block,
+ im_stride, w, h, y_filter_ptr);
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
+ w, h, conv_params);
+ } else {
+ highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params);
+ }
+ } else {
+ highbd_12_dist_wtd_convolve_y_8tap_sve2(src, src_stride, dst16,
+ dst16_stride, w, h, y_filter_ptr);
+ }
+ } else {
+ if (conv_params->do_average) {
+ highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, im_block, im_stride,
+ w, h, y_filter_ptr, bd);
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
+ h, conv_params, bd);
+ } else {
+ highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params, bd);
+ }
+ } else {
+ highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, dst16, dst16_stride,
+ w, h, y_filter_ptr, bd);
+ }
+ }
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x2_t offset = vdupq_n_s64(1 << (12 + FILTER_BITS - 2));
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset);
+ uint16x8_t d3 = highbd_12_convolve8_8_x(s3, filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)src;
+ do {
+ int16x8_t s0[8], s1[8], s2[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4],
+ &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], &s1[4],
+ &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], &s2[4],
+ &s2[5], &s2[6], &s2[7]);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x2_t offset = vdupq_n_s64(1 << (bd + FILTER_BITS - 2));
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset);
+ uint16x8_t d3 = highbd_convolve8_8_x(s3, filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)src;
+ do {
+ int16x8_t s0[8], s1[8], s2[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4],
+ &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], &s1[4],
+ &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], &s2[4],
+ &s2[5], &s2[6], &s2[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x2_t offset = vdupq_n_s64(1 << (12 + FILTER_BITS - 1));
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_12_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+
+ uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl);
+
+ store_u16_4x3(dst, dst_stride, d0, d1, d2);
+
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_12_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+
+ uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x2_t offset = vdupq_n_s64(1 << (bd + FILTER_BITS - 1));
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+
+ uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl);
+
+ store_u16_4x3(dst, dst_stride, d0, d1, d2);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_2d_v(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, COMPOUND_ROUND1_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_2d_v(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, COMPOUND_ROUND1_BITS),
+ vqrshrun_n_s32(sum4567, COMPOUND_ROUND1_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_vert_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr, int offset) {
+ const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+ const int64x2_t offset_s64 = vdupq_n_s64(offset);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ int16_t *s = (int16_t *)src;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 =
+ highbd_convolve8_4_2d_v(s0123, s4567, y_filter, offset_s64);
+ uint16x4_t d1 =
+ highbd_convolve8_4_2d_v(s1234, s5678, y_filter, offset_s64);
+ uint16x4_t d2 =
+ highbd_convolve8_4_2d_v(s2345, s6789, y_filter, offset_s64);
+ uint16x4_t d3 =
+ highbd_convolve8_4_2d_v(s3456, s789A, y_filter, offset_s64);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 =
+ highbd_convolve8_8_2d_v(s0123, s4567, y_filter, offset_s64);
+ uint16x8_t d1 =
+ highbd_convolve8_8_2d_v(s1234, s5678, y_filter, offset_s64);
+ uint16x8_t d2 =
+ highbd_convolve8_8_2d_v(s2345, s6789, y_filter, offset_s64);
+ uint16x8_t d3 =
+ highbd_convolve8_8_2d_v(s3456, s789A, y_filter, offset_s64);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_2d_v(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_2d_v(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS),
+ vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_vert_4tap_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, const int16_t *y_filter_ptr, const int offset) {
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+ const int32x4_t offset_vec = vdupq_n_s32(offset);
+
+ if (w == 4) {
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 =
+ highbd_convolve4_4_2d_v(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x4_t d1 =
+ highbd_convolve4_4_2d_v(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x4_t d2 =
+ highbd_convolve4_4_2d_v(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x4_t d3 =
+ highbd_convolve4_4_2d_v(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ } else {
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 =
+ highbd_convolve4_8_2d_v(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x8_t d1 =
+ highbd_convolve4_8_2d_v(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x8_t d2 =
+ highbd_convolve4_8_2d_v(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x8_t d3 =
+ highbd_convolve4_8_2d_v(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w != 0);
+ }
+}
+
+void av1_highbd_dist_wtd_convolve_2d_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int subpel_x_qn,
+ const int subpel_y_qn, ConvolveParams *conv_params, int bd) {
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block2[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ int dst16_stride = conv_params->dst_stride;
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+ const int clamped_x_taps = x_filter_taps < 4 ? 4 : x_filter_taps;
+
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+ const int clamped_y_taps = y_filter_taps < 4 ? 4 : y_filter_taps;
+
+ if (x_filter_taps == 6 || y_filter_taps == 6) {
+ av1_highbd_dist_wtd_convolve_2d_neon(
+ src, src_stride, dst, dst_stride, w, h, filter_params_x,
+ filter_params_y, subpel_x_qn, subpel_y_qn, conv_params, bd);
+ return;
+ }
+
+ const int im_h = h + clamped_y_taps - 1;
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = clamped_y_taps / 2 - 1;
+ const int horiz_offset = clamped_x_taps / 2 - 1;
+ const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int round_offset_conv_y = (1 << y_offset_bits);
+
+ const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
+
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+ if (bd == 12) {
+ if (x_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr);
+ } else {
+ highbd_12_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr);
+ }
+ } else {
+ if (x_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, bd);
+ } else {
+ highbd_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, bd);
+ }
+ }
+
+ if (conv_params->do_average) {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_2d_vert_4tap_neon(im_block, im_stride, im_block2,
+ im_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ } else {
+ highbd_dist_wtd_convolve_2d_vert_8tap_sve2(im_block, im_stride, im_block2,
+ im_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ }
+ if (conv_params->use_dist_wtd_comp_avg) {
+ if (bd == 12) {
+ highbd_12_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride,
+ w, h, conv_params);
+
+ } else {
+ highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w,
+ h, conv_params, bd);
+ }
+ } else {
+ if (bd == 12) {
+ highbd_12_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
+ conv_params);
+
+ } else {
+ highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
+ conv_params, bd);
+ }
+ }
+ } else {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_2d_vert_4tap_neon(
+ im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ } else {
+ highbd_dist_wtd_convolve_2d_vert_8tap_sve2(
+ im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ }
+ }
+}
diff --git a/third_party/aom/av1/common/arm/highbd_convolve_sve2.c b/third_party/aom/av1/common/arm/highbd_convolve_sve2.c
new file mode 100644
index 0000000000..82eb12fcea
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_convolve_sve2.c
@@ -0,0 +1,1720 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_ports/mem.h"
+#include "av1/common/convolve.h"
+#include "av1/common/filter.h"
+#include "av1/common/arm/highbd_convolve_sve2.h"
+
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = {
+ 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
+ 4, 5, 6, 7, 5, 6, 7, 0, 6, 7, 0, 1, 7, 0, 1, 2,
+};
+
+static INLINE uint16x4_t convolve12_4_x(
+ int16x8_t s0, int16x8_t s1, int16x8_t filter_0_7, int16x8_t filter_4_11,
+ const int64x2_t offset, uint16x8x4_t permute_tbl, uint16x4_t max) {
+ int16x8_t permuted_samples[6];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int32x4_t res0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(res0123, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t convolve12_8_x(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t filter_0_7,
+ int16x8_t filter_4_11, int64x2_t offset,
+ uint16x8x4_t permute_tbl,
+ uint16x8_t max) {
+ int16x8_t permuted_samples[8];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+ permuted_samples[6] = aom_tbl2_s16(s1, s2, permute_tbl.val[2]);
+ permuted_samples[7] = aom_tbl2_s16(s1, s2, permute_tbl.val[3]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int64x2_t sum45 =
+ aom_svdot_lane_s16(offset, permuted_samples[2], filter_0_7, 0);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[4], filter_0_7, 1);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[6], filter_4_11, 1);
+
+ int64x2_t sum67 =
+ aom_svdot_lane_s16(offset, permuted_samples[3], filter_0_7, 0);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[5], filter_0_7, 1);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[7], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve_x_sr_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, int bd) {
+ // This shim allows to do only one rounding shift instead of two.
+ const int64x2_t offset = vdupq_n_s64(1 << (conv_params->round_0 - 1));
+
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x4_t permute_tbl = vld1q_u16_x4(kDotProdTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 = vreinterpretq_u16_u64(vcombine_u64(
+ vdup_n_u64(0), vdup_n_u64(svcnth() * 0x0001000000000000ULL)));
+ permute_tbl.val[2] = vaddq_u16(permute_tbl.val[2], correction0);
+
+ uint16x8_t correction1 = vreinterpretq_u16_u64(
+ vcombine_u64(vdup_n_u64(svcnth() * 0x0001000100000000ULL),
+ vdup_n_u64(svcnth() * 0x0001000100010000ULL)));
+ permute_tbl.val[3] = vaddq_u16(permute_tbl.val[3], correction1);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ const int16_t *s = (const int16_t *)src;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x4(s, src_stride, &s0, &s2, &s4, &s6);
+ load_s16_8x4(s + 8, src_stride, &s1, &s3, &s5, &s7);
+
+ uint16x4_t d0 = convolve12_4_x(s0, s1, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x4_t d1 = convolve12_4_x(s2, s3, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x4_t d2 = convolve12_4_x(s4, s5, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x4_t d3 = convolve12_4_x(s6, s7, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11;
+ load_s16_8x4(s, src_stride, &s0, &s3, &s6, &s9);
+ load_s16_8x4(s + 8, src_stride, &s1, &s4, &s7, &s10);
+ load_s16_8x4(s + 16, src_stride, &s2, &s5, &s8, &s11);
+
+ uint16x8_t d0 = convolve12_8_x(s0, s1, s2, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x8_t d1 = convolve12_8_x(s3, s4, s5, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x8_t d2 = convolve12_8_x(s6, s7, s8, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x8_t d3 = convolve12_8_x(s9, s10, s11, y_filter_0_7,
+ y_filter_4_11, offset, permute_tbl, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE uint16x8_t convolve8_8_x(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset, uint16x8_t max) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve_x_sr_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, int bd) {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ // This shim allows to do only one rounding shift instead of two.
+ const int64_t offset = 1 << (conv_params->round_0 - 1);
+ const int64x2_t offset_lo = vcombine_s64((int64x1_t)(offset), vdup_n_s64(0));
+
+ const int16x8_t filter = vld1q_s16(y_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = convolve8_8_x(s0, filter, offset_lo, max);
+ uint16x8_t d1 = convolve8_8_x(s1, filter, offset_lo, max);
+ uint16x8_t d2 = convolve8_8_x(s2, filter, offset_lo, max);
+ uint16x8_t d3 = convolve8_8_x(s3, filter, offset_lo, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+}
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDeinterleaveTbl[8]) = {
+ 0, 2, 4, 6, 1, 3, 5, 7,
+};
+// clang-format on
+
+static INLINE uint16x4_t convolve4_4_x(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset,
+ uint16x8x2_t permute_tbl,
+ uint16x4_t max) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t convolve4_8_x(int16x8_t s0[4], int16x8_t filter,
+ int64x2_t offset, uint16x8_t tbl,
+ uint16x8_t max) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, FILTER_BITS),
+ vqrshrun_n_s32(sum2637, FILTER_BITS));
+ res = aom_tbl_u16(res, tbl);
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve_x_sr_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr,
+ ConvolveParams *conv_params, int bd) {
+ // This shim allows to do only one rounding shift instead of two.
+ const int64x2_t offset = vdupq_n_s64(1 << (conv_params->round_0 - 1));
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = convolve4_4_x(s0, filter, offset, permute_tbl, max);
+ uint16x4_t d1 = convolve4_4_x(s1, filter, offset, permute_tbl, max);
+ uint16x4_t d2 = convolve4_4_x(s2, filter, offset, permute_tbl, max);
+ uint16x4_t d3 = convolve4_4_x(s3, filter, offset, permute_tbl, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = convolve4_8_x(s0, filter, offset, idx, max);
+ uint16x8_t d1 = convolve4_8_x(s1, filter, offset, idx, max);
+ uint16x8_t d2 = convolve4_8_x(s2, filter, offset, idx, max);
+ uint16x8_t d3 = convolve4_8_x(s3, filter, offset, idx, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+void av1_highbd_convolve_x_sr_sve2(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const int subpel_x_qn,
+ ConvolveParams *conv_params, int bd) {
+ if (w == 2 || h == 2) {
+ av1_highbd_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn, conv_params, bd);
+ return;
+ }
+
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+
+ if (x_filter_taps == 6) {
+ av1_highbd_convolve_x_sr_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn, conv_params,
+ bd);
+ return;
+ }
+
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+ src -= horiz_offset;
+
+ if (x_filter_taps == 12) {
+ highbd_convolve_x_sr_12tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ x_filter_ptr, conv_params, bd);
+ return;
+ }
+
+ if (x_filter_taps == 8) {
+ highbd_convolve_x_sr_8tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ x_filter_ptr, conv_params, bd);
+ return;
+ }
+
+ highbd_convolve_x_sr_4tap_sve2(src + 2, src_stride, dst, dst_stride, w, h,
+ x_filter_ptr, conv_params, bd);
+}
+
+static INLINE uint16x4_t highbd_convolve12_4_y(int16x8_t s0[2], int16x8_t s1[2],
+ int16x8_t s2[2],
+ int16x8_t filter_0_7,
+ int16x8_t filter_4_11,
+ uint16x4_t max) {
+ int64x2_t sum[2];
+
+ sum[0] = aom_svdot_lane_s16(vdupq_n_s64(0), s0[0], filter_0_7, 0);
+ sum[0] = aom_svdot_lane_s16(sum[0], s1[0], filter_0_7, 1);
+ sum[0] = aom_svdot_lane_s16(sum[0], s2[0], filter_4_11, 1);
+
+ sum[1] = aom_svdot_lane_s16(vdupq_n_s64(0), s0[1], filter_0_7, 0);
+ sum[1] = aom_svdot_lane_s16(sum[1], s1[1], filter_0_7, 1);
+ sum[1] = aom_svdot_lane_s16(sum[1], s2[1], filter_4_11, 1);
+
+ int32x4_t res_s32 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[1]));
+
+ uint16x4_t res = vqrshrun_n_s32(res_s32, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE void highbd_convolve_y_sr_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr, int bd) {
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+
+ do {
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+ int h = height;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA;
+ load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8,
+ &s9, &sA);
+ s += 11 * src_stride;
+
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2], s4567[2], s5678[2],
+ s6789[2], s789A[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+ transpose_concat_4x4(s4, s5, s6, s7, s4567);
+ transpose_concat_4x4(s5, s6, s7, s8, s5678);
+ transpose_concat_4x4(s6, s7, s8, s9, s6789);
+ transpose_concat_4x4(s7, s8, s9, sA, s789A);
+
+ do {
+ int16x4_t sB, sC, sD, sE;
+ load_s16_4x4(s, src_stride, &sB, &sC, &sD, &sE);
+
+ int16x8_t s89AB[2], s9ABC[2], sABCD[2], sBCDE[2];
+ transpose_concat_4x4(sB, sC, sD, sE, sBCDE);
+
+ // Use the above transpose and reuse data from the previous loop to get
+ // the rest.
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[0], s89AB);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[1], s9ABC);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[2], sABCD);
+
+ uint16x4_t d0 = highbd_convolve12_4_y(s0123, s4567, s89AB, y_filter_0_7,
+ y_filter_4_11, max);
+ uint16x4_t d1 = highbd_convolve12_4_y(s1234, s5678, s9ABC, y_filter_0_7,
+ y_filter_4_11, max);
+ uint16x4_t d2 = highbd_convolve12_4_y(s2345, s6789, sABCD, y_filter_0_7,
+ y_filter_4_11, max);
+ uint16x4_t d3 = highbd_convolve12_4_y(s3456, s789A, sBCDE, y_filter_0_7,
+ y_filter_4_11, max);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s4567[0] = s89AB[0];
+ s4567[1] = s89AB[1];
+ s5678[0] = s9ABC[0];
+ s5678[1] = s9ABC[1];
+ s6789[0] = sABCD[0];
+ s6789[1] = sABCD[1];
+ s789A[0] = sBCDE[0];
+ s789A[1] = sBCDE[1];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 4;
+ dst += 4;
+ width -= 4;
+ } while (width != 0);
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ uint16x4_t max) {
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ uint16x8_t max) {
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_y_sr_8tap_sve2(const uint16_t *src, ptrdiff_t src_stride,
+ uint16_t *dst, ptrdiff_t dst_stride,
+ int width, int height,
+ const int16_t *filter_y, int bd) {
+ assert(w >= 4 && h >= 4);
+
+ const int16x8_t y_filter = vld1q_s16(filter_y);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 = highbd_convolve8_4_y(s0123, s4567, y_filter, max);
+ uint16x4_t d1 = highbd_convolve8_4_y(s1234, s5678, y_filter, max);
+ uint16x4_t d2 = highbd_convolve8_4_y(s2345, s6789, y_filter, max);
+ uint16x4_t d3 = highbd_convolve8_4_y(s3456, s789A, y_filter, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 = highbd_convolve8_8_y(s0123, s4567, y_filter, max);
+ uint16x8_t d1 = highbd_convolve8_8_y(s1234, s5678, y_filter, max);
+ uint16x8_t d2 = highbd_convolve8_8_y(s2345, s6789, y_filter, max);
+ uint16x8_t d3 = highbd_convolve8_8_y(s3456, s789A, y_filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_y(int16x8_t samples[2],
+ int16x8_t filter,
+ uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[1], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_y(int16x8_t samples[4],
+ int16x8_t filter,
+ uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[1], filter, 0);
+ int64x2_t sum45 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[2], filter, 0);
+ int64x2_t sum67 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[3], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_y_sr_4tap_sve2(const uint16_t *src, ptrdiff_t src_stride,
+ uint16_t *dst, ptrdiff_t dst_stride,
+ int width, int height,
+ const int16_t *filter_y, int bd) {
+ assert(w >= 4 && h >= 4);
+
+ const int16x8_t y_filter =
+ vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ uint16x4_t d0 = highbd_convolve4_4_y(s0123, y_filter, max);
+ uint16x4_t d1 = highbd_convolve4_4_y(s1234, y_filter, max);
+ uint16x4_t d2 = highbd_convolve4_4_y(s2345, y_filter, max);
+ uint16x4_t d3 = highbd_convolve4_4_y(s3456, y_filter, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample
+ // permute required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ uint16x8_t d0 = highbd_convolve4_8_y(s0123, y_filter, max);
+ uint16x8_t d1 = highbd_convolve4_8_y(s1234, y_filter, max);
+ uint16x8_t d2 = highbd_convolve4_8_y(s2345, y_filter, max);
+ uint16x8_t d3 = highbd_convolve4_8_y(s3456, y_filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void av1_highbd_convolve_y_sr_sve2(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_y_qn, int bd) {
+ if (w == 2 || h == 2) {
+ av1_highbd_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn, bd);
+ return;
+ }
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+ if (y_filter_taps == 6) {
+ av1_highbd_convolve_y_sr_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn, bd);
+ return;
+ }
+
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+ src -= vert_offset * src_stride;
+
+ if (y_filter_taps > 8) {
+ highbd_convolve_y_sr_12tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ y_filter_ptr, bd);
+ return;
+ }
+
+ if (y_filter_taps == 4) {
+ highbd_convolve_y_sr_4tap_sve2(src + 2 * src_stride, src_stride, dst,
+ dst_stride, w, h, y_filter_ptr, bd);
+ return;
+ }
+
+ highbd_convolve_y_sr_8tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ y_filter_ptr, bd);
+}
+
+static INLINE uint16x4_t convolve12_4_2d_h(
+ int16x8_t s0, int16x8_t s1, int16x8_t filter_0_7, int16x8_t filter_4_11,
+ const int64x2_t offset, int32x4_t shift, uint16x8x4_t permute_tbl) {
+ int16x8_t permuted_samples[6];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ return vqmovun_s32(sum0123);
+}
+
+static INLINE uint16x8_t convolve12_8_2d_h(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t filter_0_7,
+ int16x8_t filter_4_11,
+ int64x2_t offset, int32x4_t shift,
+ uint16x8x4_t permute_tbl) {
+ int16x8_t permuted_samples[8];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+ permuted_samples[6] = aom_tbl2_s16(s1, s2, permute_tbl.val[2]);
+ permuted_samples[7] = aom_tbl2_s16(s1, s2, permute_tbl.val[3]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int64x2_t sum45 =
+ aom_svdot_lane_s16(offset, permuted_samples[2], filter_0_7, 0);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[4], filter_0_7, 1);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[6], filter_4_11, 1);
+
+ int64x2_t sum67 =
+ aom_svdot_lane_s16(offset, permuted_samples[3], filter_0_7, 0);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[5], filter_0_7, 1);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[7], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ sum4567 = vqrshlq_s32(sum4567, shift);
+
+ return vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+}
+
+static INLINE void highbd_convolve_2d_sr_horiz_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, const int x_offset) {
+ const int64x2_t offset = vdupq_n_s64(x_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x4_t permute_tbl = vld1q_u16_x4(kDotProdTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 = vreinterpretq_u16_u64(vcombine_u64(
+ vdup_n_u64(0), vdup_n_u64(svcnth() * 0x0001000000000000ULL)));
+ permute_tbl.val[2] = vaddq_u16(permute_tbl.val[2], correction0);
+
+ uint16x8_t correction1 = vreinterpretq_u16_u64(
+ vcombine_u64(vdup_n_u64(svcnth() * 0x0001000100000000ULL),
+ vdup_n_u64(svcnth() * 0x0001000100010000ULL)));
+ permute_tbl.val[3] = vaddq_u16(permute_tbl.val[3], correction1);
+
+ if (width == 4) {
+ const int16_t *s = (const int16_t *)src;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x4(s, src_stride, &s0, &s2, &s4, &s6);
+ load_s16_8x4(s + 8, src_stride, &s1, &s3, &s5, &s7);
+
+ uint16x4_t d0 = convolve12_4_2d_h(s0, s1, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+ uint16x4_t d1 = convolve12_4_2d_h(s2, s3, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+ uint16x4_t d2 = convolve12_4_2d_h(s4, s5, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+ uint16x4_t d3 = convolve12_4_2d_h(s6, s7, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ dst += 4 * dst_stride;
+ s += 4 * src_stride;
+ height -= 4;
+ } while (height > 0);
+ } else {
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11;
+ load_s16_8x4(s, src_stride, &s0, &s3, &s6, &s9);
+ load_s16_8x4(s + 8, src_stride, &s1, &s4, &s7, &s10);
+ load_s16_8x4(s + 16, src_stride, &s2, &s5, &s8, &s11);
+
+ uint16x8_t d0 =
+ convolve12_8_2d_h(s0, s1, s2, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+ uint16x8_t d1 =
+ convolve12_8_2d_h(s3, s4, s5, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+ uint16x8_t d2 =
+ convolve12_8_2d_h(s6, s7, s8, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+ uint16x8_t d3 =
+ convolve12_8_2d_h(s9, s10, s11, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ }
+}
+
+static INLINE uint16x8_t convolve8_8_2d_h(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset, int32x4_t shift) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ sum4567 = vqrshlq_s32(sum4567, shift);
+
+ return vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+}
+
+static INLINE void highbd_convolve_2d_sr_horiz_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, const int x_offset) {
+ const int64x2_t offset = vdupq_n_s64(x_offset);
+ const int64x2_t offset_lo = vcombine_s64(vget_low_s64(offset), vdup_n_s64(0));
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+ const int16x8_t filter = vld1q_s16(y_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = convolve8_8_2d_h(s0, filter, offset_lo, shift);
+ uint16x8_t d1 = convolve8_8_2d_h(s1, filter, offset_lo, shift);
+ uint16x8_t d2 = convolve8_8_2d_h(s2, filter, offset_lo, shift);
+ uint16x8_t d3 = convolve8_8_2d_h(s3, filter, offset_lo, shift);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+}
+
+static INLINE uint16x4_t convolve4_4_2d_h(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset, int32x4_t shift,
+ uint16x8x2_t permute_tbl) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ return vqmovun_s32(sum0123);
+}
+
+static INLINE uint16x8_t convolve4_8_2d_h(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset, int32x4_t shift,
+ uint16x8_t tbl) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ sum4567 = vqrshlq_s32(sum4567, shift);
+
+ uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+ return aom_tbl_u16(res, tbl);
+}
+
+static INLINE void highbd_convolve_2d_sr_horiz_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr,
+ ConvolveParams *conv_params, const int x_offset) {
+ const int64x2_t offset = vdupq_n_s64(x_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ const int16_t *s = (const int16_t *)(src);
+
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = convolve4_4_2d_h(s0, filter, offset, shift, permute_tbl);
+ uint16x4_t d1 = convolve4_4_2d_h(s1, filter, offset, shift, permute_tbl);
+ uint16x4_t d2 = convolve4_4_2d_h(s2, filter, offset, shift, permute_tbl);
+ uint16x4_t d3 = convolve4_4_2d_h(s3, filter, offset, shift, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = convolve4_8_2d_h(s0, filter, offset, shift, idx);
+ uint16x8_t d1 = convolve4_8_2d_h(s1, filter, offset, shift, idx);
+ uint16x8_t d2 = convolve4_8_2d_h(s2, filter, offset, shift, idx);
+ uint16x8_t d3 = convolve4_8_2d_h(s3, filter, offset, shift, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve12_4_2d_v(
+ int16x8_t s0[2], int16x8_t s1[2], int16x8_t s2[2], int16x8_t filter_0_7,
+ int16x8_t filter_4_11, int32x4_t shift, int64x2_t offset, uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, s0[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, s1[0], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, s2[0], filter_4_11, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, s0[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, s1[1], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, s2[1], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vshlq_s32(sum0123, shift);
+
+ uint16x4_t res = vqmovun_s32(sum0123);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE void highbd_convolve_2d_sr_vert_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, int bd, const int y_offset) {
+ const int64x2_t offset = vdupq_n_s64(y_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_1);
+
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+
+ do {
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = (uint16_t *)dst;
+ int h = height;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA;
+ load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8,
+ &s9, &sA);
+ s += 11 * src_stride;
+
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2], s4567[2], s5678[2],
+ s6789[2], s789A[2];
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+ transpose_concat_4x4(s4, s5, s6, s7, s4567);
+ transpose_concat_4x4(s5, s6, s7, s8, s5678);
+ transpose_concat_4x4(s6, s7, s8, s9, s6789);
+ transpose_concat_4x4(s7, s8, s9, sA, s789A);
+
+ do {
+ int16x4_t sB, sC, sD, sE;
+ load_s16_4x4(s, src_stride, &sB, &sC, &sD, &sE);
+
+ int16x8_t s89AB[2], s9ABC[2], sABCD[2], sBCDE[2];
+ transpose_concat_4x4(sB, sC, sD, sE, sBCDE);
+
+ // Use the above transpose and reuse data from the previous loop to get
+ // the rest.
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[0], s89AB);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[1], s9ABC);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[2], sABCD);
+
+ uint16x4_t d0 = highbd_convolve12_4_2d_v(
+ s0123, s4567, s89AB, y_filter_0_7, y_filter_4_11, shift, offset, max);
+ uint16x4_t d1 = highbd_convolve12_4_2d_v(
+ s1234, s5678, s9ABC, y_filter_0_7, y_filter_4_11, shift, offset, max);
+ uint16x4_t d2 = highbd_convolve12_4_2d_v(
+ s2345, s6789, sABCD, y_filter_0_7, y_filter_4_11, shift, offset, max);
+ uint16x4_t d3 = highbd_convolve12_4_2d_v(
+ s3456, s789A, sBCDE, y_filter_0_7, y_filter_4_11, shift, offset, max);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s4567[0] = s89AB[0];
+ s4567[1] = s89AB[1];
+ s5678[0] = s9ABC[0];
+ s5678[1] = s9ABC[1];
+ s6789[0] = sABCD[0];
+ s6789[1] = sABCD[1];
+ s789A[0] = sBCDE[0];
+ s789A[1] = sBCDE[1];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 4;
+ dst += 4;
+ width -= 4;
+ } while (width != 0);
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_2d_v(
+ int16x8_t samples_lo[2], int16x8_t samples_hi[2], int16x8_t filter,
+ int32x4_t shift, int64x2_t offset, uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vshlq_s32(sum0123, shift);
+
+ uint16x4_t res = vqmovun_s32(sum0123);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_2d_v(
+ int16x8_t samples_lo[4], int16x8_t samples_hi[4], int16x8_t filter,
+ int32x4_t shift, int64x2_t offset, uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ sum0123 = vshlq_s32(sum0123, shift);
+ sum4567 = vshlq_s32(sum4567, shift);
+
+ uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_2d_sr_vert_8tap_sve2(const uint16_t *src,
+ ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, int width,
+ int height, const int16_t *filter_y,
+ ConvolveParams *conv_params, int bd,
+ const int y_offset) {
+ assert(w >= 4 && h >= 4);
+ const int64x2_t offset = vdupq_n_s64(y_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_1);
+ const int16x8_t y_filter = vld1q_s16(filter_y);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 =
+ highbd_convolve8_4_2d_v(s0123, s4567, y_filter, shift, offset, max);
+ uint16x4_t d1 =
+ highbd_convolve8_4_2d_v(s1234, s5678, y_filter, shift, offset, max);
+ uint16x4_t d2 =
+ highbd_convolve8_4_2d_v(s2345, s6789, y_filter, shift, offset, max);
+ uint16x4_t d3 =
+ highbd_convolve8_4_2d_v(s3456, s789A, y_filter, shift, offset, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 =
+ highbd_convolve8_8_2d_v(s0123, s4567, y_filter, shift, offset, max);
+ uint16x8_t d1 =
+ highbd_convolve8_8_2d_v(s1234, s5678, y_filter, shift, offset, max);
+ uint16x8_t d2 =
+ highbd_convolve8_8_2d_v(s2345, s6789, y_filter, shift, offset, max);
+ uint16x8_t d3 =
+ highbd_convolve8_8_2d_v(s3456, s789A, y_filter, shift, offset, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_2d_v(int16x8_t samples[2],
+ int16x8_t filter,
+ int32x4_t shift,
+ int64x2_t offset,
+ uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples[1], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vshlq_s32(sum0123, shift);
+
+ uint16x4_t res = vqmovun_s32(sum0123);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_2d_v(int16x8_t samples[4],
+ int16x8_t filter,
+ int32x4_t shift,
+ int64x2_t offset,
+ uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples[1], filter, 0);
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples[2], filter, 0);
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples[3], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ sum0123 = vshlq_s32(sum0123, shift);
+ sum4567 = vshlq_s32(sum4567, shift);
+
+ uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_2d_sr_vert_4tap_sve2(const uint16_t *src,
+ ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, int width,
+ int height, const int16_t *filter_y,
+ ConvolveParams *conv_params, int bd,
+ const int y_offset) {
+ assert(w >= 4 && h >= 4);
+ const int64x2_t offset = vdupq_n_s64(y_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_1);
+
+ const int16x8_t y_filter =
+ vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)(src);
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ uint16x4_t d0 =
+ highbd_convolve4_4_2d_v(s0123, y_filter, shift, offset, max);
+ uint16x4_t d1 =
+ highbd_convolve4_4_2d_v(s1234, y_filter, shift, offset, max);
+ uint16x4_t d2 =
+ highbd_convolve4_4_2d_v(s2345, y_filter, shift, offset, max);
+ uint16x4_t d3 =
+ highbd_convolve4_4_2d_v(s3456, y_filter, shift, offset, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)(src);
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample
+ // permute required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ uint16x8_t d0 =
+ highbd_convolve4_8_2d_v(s0123, y_filter, shift, offset, max);
+ uint16x8_t d1 =
+ highbd_convolve4_8_2d_v(s1234, y_filter, shift, offset, max);
+ uint16x8_t d2 =
+ highbd_convolve4_8_2d_v(s2345, y_filter, shift, offset, max);
+ uint16x8_t d3 =
+ highbd_convolve4_8_2d_v(s3456, y_filter, shift, offset, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void av1_highbd_convolve_2d_sr_sve2(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_qn,
+ const int subpel_y_qn,
+ ConvolveParams *conv_params, int bd) {
+ if (w == 2 || h == 2) {
+ av1_highbd_convolve_2d_sr_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y, subpel_x_qn,
+ subpel_y_qn, conv_params, bd);
+ return;
+ }
+
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+ if (x_filter_taps == 6 || y_filter_taps == 6) {
+ av1_highbd_convolve_2d_sr_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y,
+ subpel_x_qn, subpel_y_qn, conv_params, bd);
+ return;
+ }
+
+ const int clamped_x_taps = x_filter_taps < 4 ? 4 : x_filter_taps;
+ const int clamped_y_taps = y_filter_taps < 4 ? 4 : y_filter_taps;
+
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = clamped_y_taps / 2 - 1;
+ const int horiz_offset = clamped_x_taps / 2 - 1;
+ const int x_offset = (1 << (bd + FILTER_BITS - 1));
+ const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ // The extra shim of (1 << (conv_params->round_1 - 1)) allows us to do a
+ // simple shift left instead of a rounding saturating shift left.
+ const int y_offset =
+ (1 << (conv_params->round_1 - 1)) - (1 << (y_offset_bits - 1));
+
+ const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
+
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+ const int im_h = h + clamped_y_taps - 1;
+
+ if (x_filter_taps > 8) {
+ highbd_convolve_2d_sr_horiz_12tap_sve2(src_ptr, src_stride, im_block,
+ im_stride, w, im_h, x_filter_ptr,
+ conv_params, x_offset);
+
+ highbd_convolve_2d_sr_vert_12tap_sve2(im_block, im_stride, dst, dst_stride,
+ w, h, y_filter_ptr, conv_params, bd,
+ y_offset);
+ return;
+ }
+
+ if (x_filter_taps <= 4) {
+ highbd_convolve_2d_sr_horiz_4tap_sve2(src_ptr, src_stride, im_block,
+ im_stride, w, im_h, x_filter_ptr,
+ conv_params, x_offset);
+ } else {
+ highbd_convolve_2d_sr_horiz_8tap_sve2(src_ptr, src_stride, im_block,
+ im_stride, w, im_h, x_filter_ptr,
+ conv_params, x_offset);
+ }
+
+ if (y_filter_taps <= 4) {
+ highbd_convolve_2d_sr_vert_4tap_sve2(im_block, im_stride, dst, dst_stride,
+ w, h, y_filter_ptr, conv_params, bd,
+ y_offset);
+ } else {
+ highbd_convolve_2d_sr_vert_8tap_sve2(im_block, im_stride, dst, dst_stride,
+ w, h, y_filter_ptr, conv_params, bd,
+ y_offset);
+ }
+}
diff --git a/third_party/aom/av1/common/arm/highbd_convolve_sve2.h b/third_party/aom/av1/common/arm/highbd_convolve_sve2.h
new file mode 100644
index 0000000000..05e23deef4
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_convolve_sve2.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
+#define AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
+
+#include <arm_neon.h>
+
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdMergeBlockTbl[24]) = {
+ // Shift left and insert new last column in transposed 4x4 block.
+ 1, 2, 3, 0, 5, 6, 7, 4,
+ // Shift left and insert two new columns in transposed 4x4 block.
+ 2, 3, 0, 1, 6, 7, 4, 5,
+ // Shift left and insert three new columns in transposed 4x4 block.
+ 3, 0, 1, 2, 7, 4, 5, 6,
+};
+// clang-format on
+
+static INLINE void transpose_concat_4x4(int16x4_t s0, int16x4_t s1,
+ int16x4_t s2, int16x4_t s3,
+ int16x8_t res[2]) {
+ // Transpose 16-bit elements and concatenate result rows as follows:
+ // s0: 00, 01, 02, 03
+ // s1: 10, 11, 12, 13
+ // s2: 20, 21, 22, 23
+ // s3: 30, 31, 32, 33
+ //
+ // res[0]: 00 10 20 30 01 11 21 31
+ // res[1]: 02 12 22 32 03 13 23 33
+
+ int16x8_t s0q = vcombine_s16(s0, vdup_n_s16(0));
+ int16x8_t s1q = vcombine_s16(s1, vdup_n_s16(0));
+ int16x8_t s2q = vcombine_s16(s2, vdup_n_s16(0));
+ int16x8_t s3q = vcombine_s16(s3, vdup_n_s16(0));
+
+ int32x4_t s01 = vreinterpretq_s32_s16(vzip1q_s16(s0q, s1q));
+ int32x4_t s23 = vreinterpretq_s32_s16(vzip1q_s16(s2q, s3q));
+
+ int32x4x2_t s0123 = vzipq_s32(s01, s23);
+
+ res[0] = vreinterpretq_s16_s32(s0123.val[0]);
+ res[1] = vreinterpretq_s16_s32(s0123.val[1]);
+}
+
+static INLINE void transpose_concat_8x4(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t s3,
+ int16x8_t res[4]) {
+ // Transpose 16-bit elements and concatenate result rows as follows:
+ // s0: 00, 01, 02, 03, 04, 05, 06, 07
+ // s1: 10, 11, 12, 13, 14, 15, 16, 17
+ // s2: 20, 21, 22, 23, 24, 25, 26, 27
+ // s3: 30, 31, 32, 33, 34, 35, 36, 37
+ //
+ // res[0]: 00 10 20 30 01 11 21 31
+ // res[1]: 02 12 22 32 03 13 23 33
+ // res[2]: 04 14 24 34 05 15 25 35
+ // res[3]: 06 16 26 36 07 17 27 37
+
+ int16x8x2_t tr01_16 = vzipq_s16(s0, s1);
+ int16x8x2_t tr23_16 = vzipq_s16(s2, s3);
+ int32x4x2_t tr01_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[0]),
+ vreinterpretq_s32_s16(tr23_16.val[0]));
+ int32x4x2_t tr23_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[1]),
+ vreinterpretq_s32_s16(tr23_16.val[1]));
+
+ res[0] = vreinterpretq_s16_s32(tr01_32.val[0]);
+ res[1] = vreinterpretq_s16_s32(tr01_32.val[1]);
+ res[2] = vreinterpretq_s16_s32(tr23_32.val[0]);
+ res[3] = vreinterpretq_s16_s32(tr23_32.val[1]);
+}
+
+static INLINE void aom_tbl2x4_s16(int16x8_t t0[4], int16x8_t t1[4],
+ uint16x8_t tbl, int16x8_t res[4]) {
+ res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+ res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+ res[2] = aom_tbl2_s16(t0[2], t1[2], tbl);
+ res[3] = aom_tbl2_s16(t0[3], t1[3], tbl);
+}
+
+static INLINE void aom_tbl2x2_s16(int16x8_t t0[2], int16x8_t t1[2],
+ uint16x8_t tbl, int16x8_t res[2]) {
+ res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+ res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+}
+
+#endif // AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c
index c6f1e3ad92..89647bc921 100644
--- a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c
+++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c
@@ -23,8 +23,8 @@
#include "config/av1_rtcd.h"
#include "highbd_warp_plane_neon.h"
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[4];
load_filters_4(f, sx, alpha);
@@ -57,8 +57,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[8];
load_filters_8(f, sx, alpha);
@@ -111,8 +111,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -144,8 +144,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -197,7 +197,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -213,7 +214,8 @@ static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
return m0123;
}
-static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -238,8 +240,8 @@ static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
return (int32x4x2_t){ { m0123, m4567 } };
}
-static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]),
@@ -262,8 +264,8 @@ static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
return horizontal_add_4d_s32x4(m0123);
}
-static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0 = tmp[0];
int16x8_t s1 = tmp[1];
int16x8_t s2 = tmp[2];
diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h
index 3b8982898e..48af4a707b 100644
--- a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h
+++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h
@@ -23,29 +23,31 @@
#include "av1/common/warped_motion.h"
#include "config/av1_rtcd.h"
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
- int sx, int alpha);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha);
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
- int sx, int alpha);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha);
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
- int sx);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx);
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
- int sx);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx);
-static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy);
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
+ int sy);
-static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy);
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
+ int sy);
-static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
- int gamma);
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
+ int sy, int gamma);
-static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy,
- int gamma);
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
+ int sy, int gamma);
-static INLINE int16x8_t load_filters_1(int ofs) {
+static AOM_FORCE_INLINE int16x8_t load_filters_1(int ofs) {
const int ofs0 = ROUND_POWER_OF_TWO(ofs, WARPEDDIFF_PREC_BITS);
const int16_t *base =
@@ -53,7 +55,8 @@ static INLINE int16x8_t load_filters_1(int ofs) {
return vld1q_s16(base + ofs0 * 8);
}
-static INLINE void load_filters_4(int16x8_t out[], int ofs, int stride) {
+static AOM_FORCE_INLINE void load_filters_4(int16x8_t out[], int ofs,
+ int stride) {
const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS);
const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS);
const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS);
@@ -67,7 +70,8 @@ static INLINE void load_filters_4(int16x8_t out[], int ofs, int stride) {
out[3] = vld1q_s16(base + ofs3 * 8);
}
-static INLINE void load_filters_8(int16x8_t out[], int ofs, int stride) {
+static AOM_FORCE_INLINE void load_filters_8(int16x8_t out[], int ofs,
+ int stride) {
const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS);
const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS);
const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS);
@@ -89,16 +93,18 @@ static INLINE void load_filters_8(int16x8_t out[], int ofs, int stride) {
out[7] = vld1q_s16(base + ofs7 * 8);
}
-static INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val, int bd) {
+static AOM_FORCE_INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val,
+ int bd) {
const int limit = (1 << bd) - 1;
return vqmovun_s32(vminq_s32(val, vdupq_n_s32(limit)));
}
-static INLINE void warp_affine_horizontal(const uint16_t *ref, int width,
- int height, int stride, int p_width,
- int16_t alpha, int16_t beta, int iy4,
- int sx4, int ix4, int16x8_t tmp[],
- int bd) {
+static AOM_FORCE_INLINE void warp_affine_horizontal(const uint16_t *ref,
+ int width, int height,
+ int stride, int p_width,
+ int16_t alpha, int16_t beta,
+ int iy4, int sx4, int ix4,
+ int16x8_t tmp[], int bd) {
const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
if (ix4 <= -7) {
@@ -197,7 +203,7 @@ static INLINE void warp_affine_horizontal(const uint16_t *ref, int width,
}
}
-static INLINE void highbd_vertical_filter_4x1_f4(
+static AOM_FORCE_INLINE void highbd_vertical_filter_4x1_f4(
uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride,
bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd,
int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) {
@@ -253,7 +259,7 @@ static INLINE void highbd_vertical_filter_4x1_f4(
vst1_u16(dst16, res0);
}
-static INLINE void highbd_vertical_filter_8x1_f8(
+static AOM_FORCE_INLINE void highbd_vertical_filter_8x1_f8(
uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride,
bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd,
int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) {
@@ -328,7 +334,7 @@ static INLINE void highbd_vertical_filter_8x1_f8(
vst1_u16(dst16 + 4, res1);
}
-static INLINE void warp_affine_vertical(
+static AOM_FORCE_INLINE void warp_affine_vertical(
uint16_t *pred, int p_width, int p_height, int p_stride, int bd,
uint16_t *dst, int dst_stride, bool is_compound, bool do_average,
bool use_dist_wtd_comp_avg, int fwd, int bwd, int16_t gamma, int16_t delta,
@@ -354,7 +360,7 @@ static INLINE void warp_affine_vertical(
}
}
-static INLINE void highbd_warp_affine_common(
+static AOM_FORCE_INLINE void highbd_warp_affine_common(
const int32_t *mat, const uint16_t *ref, int width, int height, int stride,
uint16_t *pred, int p_col, int p_row, int p_width, int p_height,
int p_stride, int subsampling_x, int subsampling_y, int bd,
diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c b/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c
index 7a14f21846..87e033fd00 100644
--- a/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c
+++ b/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c
@@ -15,7 +15,7 @@
#include <arm_neon_sve_bridge.h>
#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/arm/transpose_neon.h"
#include "aom_ports/mem.h"
@@ -24,8 +24,8 @@
#include "config/av1_rtcd.h"
#include "highbd_warp_plane_neon.h"
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[4];
load_filters_4(f, sx, alpha);
@@ -55,8 +55,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[8];
load_filters_8(f, sx, alpha);
@@ -103,8 +103,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -133,8 +133,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -180,7 +180,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -197,7 +198,8 @@ static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
return m0123;
}
-static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -223,8 +225,8 @@ static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
return (int32x4x2_t){ { m0123, m4567 } };
}
-static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]),
@@ -244,8 +246,8 @@ static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
return vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
}
-static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0 = tmp[0];
int16x8_t s1 = tmp[1];
int16x8_t s2 = tmp[2];
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.c b/third_party/aom/av1/common/arm/warp_plane_neon.c
index 4723154398..546aa2965b 100644
--- a/third_party/aom/av1/common/arm/warp_plane_neon.c
+++ b/third_party/aom/av1/common/arm/warp_plane_neon.c
@@ -11,8 +11,8 @@
#include "warp_plane_neon.h"
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -39,8 +39,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -75,7 +75,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -101,7 +102,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -135,8 +137,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy) {
int16x4_t s0 = vget_low_s16(src[0]);
int16x4_t s1 = vget_low_s16(src[1]);
int16x4_t s2 = vget_low_s16(src[2]);
@@ -161,8 +163,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
*res = m0123;
}
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
@@ -186,9 +189,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
*res = horizontal_add_4d_s32x4(m0123_pairs);
}
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
@@ -223,10 +227,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
*res_high = m4567;
}
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.h b/third_party/aom/av1/common/arm/warp_plane_neon.h
index 5afd72f4ab..eece007ef3 100644
--- a/third_party/aom/av1/common/arm/warp_plane_neon.h
+++ b/third_party/aom/av1/common/arm/warp_plane_neon.h
@@ -24,32 +24,37 @@
#include "av1/common/warped_motion.h"
#include "av1/common/scale.h"
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha);
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha);
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx);
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx);
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy);
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy);
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma);
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma);
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy);
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy);
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma);
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma);
-static INLINE void load_filters_4(int16x8_t out[], int offset, int stride) {
+static AOM_FORCE_INLINE void load_filters_4(int16x8_t out[], int offset,
+ int stride) {
out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >>
WARPEDDIFF_PREC_BITS)));
out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >>
@@ -60,7 +65,8 @@ static INLINE void load_filters_4(int16x8_t out[], int offset, int stride) {
WARPEDDIFF_PREC_BITS)));
}
-static INLINE void load_filters_8(int16x8_t out[], int offset, int stride) {
+static AOM_FORCE_INLINE void load_filters_8(int16x8_t out[], int offset,
+ int stride) {
out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >>
WARPEDDIFF_PREC_BITS)));
out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >>
@@ -79,16 +85,14 @@ static INLINE void load_filters_8(int16x8_t out[], int offset, int stride) {
WARPEDDIFF_PREC_BITS)));
}
-static INLINE int clamp_iy(int iy, int height) {
+static AOM_FORCE_INLINE int clamp_iy(int iy, int height) {
return clamp(iy, 0, height - 1);
}
-static INLINE void warp_affine_horizontal(const uint8_t *ref, int width,
- int height, int stride, int p_width,
- int p_height, int16_t alpha,
- int16_t beta, const int64_t x4,
- const int64_t y4, const int i,
- int16x8_t tmp[]) {
+static AOM_FORCE_INLINE void warp_affine_horizontal(
+ const uint8_t *ref, int width, int height, int stride, int p_width,
+ int p_height, int16_t alpha, int16_t beta, const int64_t x4,
+ const int64_t y4, const int i, int16x8_t tmp[]) {
const int bd = 8;
const int reduce_bits_horiz = ROUND0_BITS;
const int height_limit = AOMMIN(8, p_height - i) + 7;
@@ -197,7 +201,7 @@ static INLINE void warp_affine_horizontal(const uint8_t *ref, int width,
}
}
-static INLINE void warp_affine_vertical(
+static AOM_FORCE_INLINE void warp_affine_vertical(
uint8_t *pred, int p_width, int p_height, int p_stride, int is_compound,
uint16_t *dst, int dst_stride, int do_average, int use_dist_wtd_comp_avg,
int16_t gamma, int16_t delta, const int64_t y4, const int i, const int j,
@@ -325,7 +329,7 @@ static INLINE void warp_affine_vertical(
}
}
-static INLINE void av1_warp_affine_common(
+static AOM_FORCE_INLINE void av1_warp_affine_common(
const int32_t *mat, const uint8_t *ref, int width, int height, int stride,
uint8_t *pred, int p_col, int p_row, int p_width, int p_height,
int p_stride, int subsampling_x, int subsampling_y,
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c
index 39e3ad99f4..22a1be17b5 100644
--- a/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c
+++ b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c
@@ -17,8 +17,8 @@ DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
};
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -45,8 +45,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -83,7 +83,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -112,7 +113,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -149,8 +151,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy) {
int16x4_t s0 = vget_low_s16(src[0]);
int16x4_t s1 = vget_low_s16(src[1]);
int16x4_t s2 = vget_low_s16(src[2]);
@@ -175,8 +177,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
*res = m0123;
}
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
@@ -200,9 +203,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
*res = horizontal_add_4d_s32x4(m0123_pairs);
}
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
@@ -237,10 +241,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
*res_high = m4567;
}
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
diff --git a/third_party/aom/av1/common/arm/warp_plane_sve.c b/third_party/aom/av1/common/arm/warp_plane_sve.c
index 8a4bf5747b..c70b066174 100644
--- a/third_party/aom/av1/common/arm/warp_plane_sve.c
+++ b/third_party/aom/av1/common/arm/warp_plane_sve.c
@@ -11,7 +11,7 @@
#include <arm_neon.h>
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "warp_plane_neon.h"
DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
@@ -20,8 +20,8 @@ DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
};
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -48,8 +48,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -86,7 +86,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -115,7 +116,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -152,8 +154,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy) {
int16x4_t s0 = vget_low_s16(src[0]);
int16x4_t s1 = vget_low_s16(src[1]);
int16x4_t s2 = vget_low_s16(src[2]);
@@ -178,8 +180,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
*res = m0123;
}
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
@@ -200,9 +203,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
*res = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
}
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
@@ -237,10 +241,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
*res_high = m4567;
}
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
diff --git a/third_party/aom/av1/common/av1_common_int.h b/third_party/aom/av1/common/av1_common_int.h
index 4c0cb99d2b..4e14c4a8be 100644
--- a/third_party/aom/av1/common/av1_common_int.h
+++ b/third_party/aom/av1/common/av1_common_int.h
@@ -17,7 +17,7 @@
#include "aom/internal/aom_codec_internal.h"
#include "aom_dsp/flow_estimation/corner_detect.h"
-#include "aom_util/aom_thread.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/alloccommon.h"
#include "av1/common/av1_loopfilter.h"
#include "av1/common/entropy.h"
diff --git a/third_party/aom/av1/common/av1_rtcd_defs.pl b/third_party/aom/av1/common/av1_rtcd_defs.pl
index ef999fbba2..c0831330d1 100644
--- a/third_party/aom/av1/common/av1_rtcd_defs.pl
+++ b/third_party/aom/av1/common/av1_rtcd_defs.pl
@@ -77,6 +77,16 @@ EOF
}
forward_decls qw/av1_common_forward_decls/;
+# Fallbacks for Valgrind support
+# For normal use, we require SSE4.1. However, 32-bit Valgrind does not support
+# SSE4.1, so we include fallbacks for some critical functions to improve
+# performance
+$sse2_x86 = $ssse3_x86 = '';
+if ($opts{arch} eq "x86") {
+ $sse2_x86 = 'sse2';
+ $ssse3_x86 = 'ssse3';
+}
+
# functions that are 64 bit only.
$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
if ($opts{arch} eq "x86_64") {
@@ -345,7 +355,7 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
#fwd txfm
add_proto qw/void av1_lowbd_fwd_txfm/, "const int16_t *src_diff, tran_low_t *coeff, int diff_stride, TxfmParam *txfm_param";
- specialize qw/av1_lowbd_fwd_txfm sse2 sse4_1 avx2 neon/;
+ specialize qw/av1_lowbd_fwd_txfm sse4_1 avx2 neon/, $sse2_x86;
add_proto qw/void av1_fwd_txfm2d_4x8/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
specialize qw/av1_fwd_txfm2d_4x8 sse4_1 neon/;
@@ -436,9 +446,9 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
specialize qw/av1_txb_init_levels sse4_1 avx2 neon/;
add_proto qw/uint64_t av1_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N";
- specialize qw/av1_wedge_sse_from_residuals sse2 avx2 neon/;
+ specialize qw/av1_wedge_sse_from_residuals sse2 avx2 neon sve/;
add_proto qw/int8_t av1_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit";
- specialize qw/av1_wedge_sign_from_residuals sse2 avx2 neon/;
+ specialize qw/av1_wedge_sign_from_residuals sse2 avx2 neon sve/;
add_proto qw/void av1_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N";
specialize qw/av1_wedge_compute_delta_squares sse2 avx2 neon/;
@@ -521,21 +531,21 @@ add_proto qw/void cdef_copy_rect8_16bit_to_16bit/, "uint16_t *dst, int dstride,
# structs as arguments, which makes the v256 type of the intrinsics
# hard to support, so optimizations for this target are disabled.
if ($opts{config} !~ /libs-x86-win32-vs.*/) {
- specialize qw/cdef_find_dir sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_find_dir_dual sse2 ssse3 sse4_1 avx2 neon/;
+ specialize qw/cdef_find_dir sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_find_dir_dual sse4_1 avx2 neon/, "$ssse3_x86";
- specialize qw/cdef_filter_8_0 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_8_1 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_8_2 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_8_3 sse2 ssse3 sse4_1 avx2 neon/;
+ specialize qw/cdef_filter_8_0 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_8_1 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_8_2 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_8_3 sse4_1 avx2 neon/, "$ssse3_x86";
- specialize qw/cdef_filter_16_0 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_16_1 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_16_2 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_16_3 sse2 ssse3 sse4_1 avx2 neon/;
+ specialize qw/cdef_filter_16_0 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_16_1 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_16_2 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_16_3 sse4_1 avx2 neon/, "$ssse3_x86";
- specialize qw/cdef_copy_rect8_8bit_to_16bit sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_copy_rect8_16bit_to_16bit sse2 ssse3 sse4_1 avx2 neon/;
+ specialize qw/cdef_copy_rect8_8bit_to_16bit sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_copy_rect8_16bit_to_16bit sse4_1 avx2 neon/, "$ssse3_x86";
}
# WARPED_MOTION / GLOBAL_MOTION functions
@@ -591,20 +601,20 @@ if(aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") {
specialize qw/av1_convolve_y_sr sse2 avx2 neon/;
specialize qw/av1_convolve_y_sr_intrabc neon/;
specialize qw/av1_convolve_2d_scale sse4_1/;
- specialize qw/av1_dist_wtd_convolve_2d sse2 ssse3 avx2 neon neon_dotprod neon_i8mm/;
+ specialize qw/av1_dist_wtd_convolve_2d ssse3 avx2 neon neon_dotprod neon_i8mm/;
specialize qw/av1_dist_wtd_convolve_2d_copy sse2 avx2 neon/;
specialize qw/av1_dist_wtd_convolve_x sse2 avx2 neon neon_dotprod neon_i8mm/;
specialize qw/av1_dist_wtd_convolve_y sse2 avx2 neon/;
if(aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") {
- specialize qw/av1_highbd_dist_wtd_convolve_2d sse4_1 avx2 neon/;
- specialize qw/av1_highbd_dist_wtd_convolve_x sse4_1 avx2 neon/;
- specialize qw/av1_highbd_dist_wtd_convolve_y sse4_1 avx2 neon/;
+ specialize qw/av1_highbd_dist_wtd_convolve_2d sse4_1 avx2 neon sve2/;
+ specialize qw/av1_highbd_dist_wtd_convolve_x sse4_1 avx2 neon sve2/;
+ specialize qw/av1_highbd_dist_wtd_convolve_y sse4_1 avx2 neon sve2/;
specialize qw/av1_highbd_dist_wtd_convolve_2d_copy sse4_1 avx2 neon/;
- specialize qw/av1_highbd_convolve_2d_sr ssse3 avx2 neon/;
+ specialize qw/av1_highbd_convolve_2d_sr ssse3 avx2 neon sve2/;
specialize qw/av1_highbd_convolve_2d_sr_intrabc neon/;
- specialize qw/av1_highbd_convolve_x_sr ssse3 avx2 neon/;
+ specialize qw/av1_highbd_convolve_x_sr ssse3 avx2 neon sve2/;
specialize qw/av1_highbd_convolve_x_sr_intrabc neon/;
- specialize qw/av1_highbd_convolve_y_sr ssse3 avx2 neon/;
+ specialize qw/av1_highbd_convolve_y_sr ssse3 avx2 neon sve2/;
specialize qw/av1_highbd_convolve_y_sr_intrabc neon/;
specialize qw/av1_highbd_convolve_2d_scale sse4_1 neon/;
}
diff --git a/third_party/aom/av1/common/cdef.c b/third_party/aom/av1/common/cdef.c
index 12e9545441..5cec940a8e 100644
--- a/third_party/aom/av1/common/cdef.c
+++ b/third_party/aom/av1/common/cdef.c
@@ -10,15 +10,19 @@
*/
#include <assert.h>
-#include <math.h>
+#include <stddef.h>
#include <string.h>
#include "config/aom_scale_rtcd.h"
#include "aom/aom_integer.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/av1_common_int.h"
#include "av1/common/cdef.h"
#include "av1/common/cdef_block.h"
+#include "av1/common/common.h"
+#include "av1/common/common_data.h"
+#include "av1/common/enums.h"
#include "av1/common/reconinter.h"
#include "av1/common/thread_common.h"
@@ -92,7 +96,7 @@ void av1_cdef_copy_sb8_16_lowbd(uint16_t *const dst, int dstride,
const uint8_t *src, int src_voffset,
int src_hoffset, int sstride, int vsize,
int hsize) {
- const uint8_t *base = &src[src_voffset * sstride + src_hoffset];
+ const uint8_t *base = &src[src_voffset * (ptrdiff_t)sstride + src_hoffset];
cdef_copy_rect8_8bit_to_16bit(dst, dstride, base, sstride, hsize, vsize);
}
@@ -101,7 +105,7 @@ void av1_cdef_copy_sb8_16_highbd(uint16_t *const dst, int dstride,
int src_hoffset, int sstride, int vsize,
int hsize) {
const uint16_t *base =
- &CONVERT_TO_SHORTPTR(src)[src_voffset * sstride + src_hoffset];
+ &CONVERT_TO_SHORTPTR(src)[src_voffset * (ptrdiff_t)sstride + src_hoffset];
cdef_copy_rect8_16bit_to_16bit(dst, dstride, base, sstride, hsize, vsize);
}
@@ -247,7 +251,8 @@ static void cdef_prepare_fb(const AV1_COMMON *const cm, CdefBlockInfo *fb_info,
static INLINE void cdef_filter_fb(CdefBlockInfo *const fb_info, int plane,
uint8_t use_highbitdepth) {
- int offset = fb_info->dst_stride * fb_info->roffset + fb_info->coffset;
+ ptrdiff_t offset =
+ (ptrdiff_t)fb_info->dst_stride * fb_info->roffset + fb_info->coffset;
if (use_highbitdepth) {
av1_cdef_filter_fb(
NULL, CONVERT_TO_SHORTPTR(fb_info->dst + offset), fb_info->dst_stride,
diff --git a/third_party/aom/av1/common/entropymode.h b/third_party/aom/av1/common/entropymode.h
index 09cd6bd1e9..028bd21ae3 100644
--- a/third_party/aom/av1/common/entropymode.h
+++ b/third_party/aom/av1/common/entropymode.h
@@ -12,6 +12,7 @@
#ifndef AOM_AV1_COMMON_ENTROPYMODE_H_
#define AOM_AV1_COMMON_ENTROPYMODE_H_
+#include "aom_ports/bitops.h"
#include "av1/common/entropy.h"
#include "av1/common/entropymv.h"
#include "av1/common/filter.h"
@@ -192,13 +193,7 @@ void av1_setup_past_independence(struct AV1Common *cm);
// Returns (int)ceil(log2(n)).
static INLINE int av1_ceil_log2(int n) {
if (n < 2) return 0;
- int i = 1;
- unsigned int p = 2;
- while (p < (unsigned int)n) {
- i++;
- p = p << 1;
- }
- return i;
+ return get_msb(n - 1) + 1;
}
// Returns the context for palette color index at row 'r' and column 'c',
diff --git a/third_party/aom/av1/common/quant_common.c b/third_party/aom/av1/common/quant_common.c
index b0976287ef..58eb113370 100644
--- a/third_party/aom/av1/common/quant_common.c
+++ b/third_party/aom/av1/common/quant_common.c
@@ -9,10 +9,15 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+#include "config/aom_config.h"
+
+#include "aom/aom_frame_buffer.h"
+#include "aom_scale/yv12config.h"
#include "av1/common/av1_common_int.h"
#include "av1/common/blockd.h"
#include "av1/common/common.h"
#include "av1/common/entropy.h"
+#include "av1/common/filter.h"
#include "av1/common/quant_common.h"
#include "av1/common/seg_common.h"
@@ -274,13 +279,16 @@ const qm_val_t *av1_get_qmatrix(const CommonQuantParams *quant_params,
: quant_params->gqmatrix[NUM_QM_LEVELS - 1][0][qm_tx_size];
}
+#if CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
#define QM_TOTAL_SIZE 3344
// We only use wt_matrix_ref[q] and iwt_matrix_ref[q]
// for q = 0, ..., NUM_QM_LEVELS - 2.
static const qm_val_t wt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE];
static const qm_val_t iwt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE];
+#endif
void av1_qm_init(CommonQuantParams *quant_params, int num_planes) {
+#if CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
for (int q = 0; q < NUM_QM_LEVELS; ++q) {
for (int c = 0; c < num_planes; ++c) {
int current = 0;
@@ -306,6 +314,10 @@ void av1_qm_init(CommonQuantParams *quant_params, int num_planes) {
}
}
}
+#else
+ (void)quant_params;
+ (void)num_planes;
+#endif // CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
}
/* Provide 15 sets of quantization matrices for chroma and luma
@@ -320,6 +332,8 @@ void av1_qm_init(CommonQuantParams *quant_params, int num_planes) {
distances. Matrices for QM level 15 are omitted because they are
not used.
*/
+
+#if CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
static const qm_val_t iwt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE] = {
{
{ /* Luma */
@@ -12873,4 +12887,6 @@ static const qm_val_t wt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE] = {
33, 33, 32, 32, 32, 32, 34, 33, 33, 33, 32, 32, 32, 32, 34, 33, 33, 33,
32, 32, 32, 32 },
},
-}; \ No newline at end of file
+};
+
+#endif // CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
diff --git a/third_party/aom/av1/common/reconintra.c b/third_party/aom/av1/common/reconintra.c
index f68af18cb1..497863e117 100644
--- a/third_party/aom/av1/common/reconintra.c
+++ b/third_party/aom/av1/common/reconintra.c
@@ -1196,7 +1196,8 @@ static void build_directional_and_filter_intra_predictors(
const int need_right = p_angle < 90;
const int need_bottom = p_angle > 180;
if (p_angle != 90 && p_angle != 180) {
- const int ab_le = need_above_left ? 1 : 0;
+ assert(need_above_left);
+ const int ab_le = 1;
if (need_above && need_left && (txwpx + txhpx >= 24)) {
filter_intra_edge_corner(above_row, left_col);
}
@@ -1500,7 +1501,8 @@ static void highbd_build_directional_and_filter_intra_predictors(
const int need_right = p_angle < 90;
const int need_bottom = p_angle > 180;
if (p_angle != 90 && p_angle != 180) {
- const int ab_le = need_above_left ? 1 : 0;
+ assert(need_above_left);
+ const int ab_le = 1;
if (need_above && need_left && (txwpx + txhpx >= 24)) {
highbd_filter_intra_edge_corner(above_row, left_col);
}
diff --git a/third_party/aom/av1/common/resize.c b/third_party/aom/av1/common/resize.c
index 1b348836a5..441323ab1f 100644
--- a/third_party/aom/av1/common/resize.c
+++ b/third_party/aom/av1/common/resize.c
@@ -524,7 +524,7 @@ static void fill_arr_to_col(uint8_t *img, int stride, int len, uint8_t *arr) {
}
}
-bool av1_resize_plane(const uint8_t *const input, int height, int width,
+bool av1_resize_plane(const uint8_t *input, int height, int width,
int in_stride, uint8_t *output, int height2, int width2,
int out_stride) {
int i;
@@ -881,7 +881,7 @@ static void highbd_fill_arr_to_col(uint16_t *img, int stride, int len,
}
}
-void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
+void av1_highbd_resize_plane(const uint8_t *input, int height, int width,
int in_stride, uint8_t *output, int height2,
int width2, int out_stride, int bd) {
int i;
@@ -980,10 +980,9 @@ static bool highbd_upscale_normative_rect(const uint8_t *const input,
}
#endif // CONFIG_AV1_HIGHBITDEPTH
-void av1_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+void av1_resize_frame420(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth) {
if (!av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
oy_stride))
@@ -996,10 +995,9 @@ void av1_resize_frame420(const uint8_t *const y, int y_stride,
abort();
}
-bool av1_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+bool av1_resize_frame422(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth) {
if (!av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
oy_stride))
@@ -1013,10 +1011,9 @@ bool av1_resize_frame422(const uint8_t *const y, int y_stride,
return true;
}
-bool av1_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+bool av1_resize_frame444(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth) {
if (!av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
oy_stride))
@@ -1031,8 +1028,8 @@ bool av1_resize_frame444(const uint8_t *const y, int y_stride,
}
#if CONFIG_AV1_HIGHBITDEPTH
-void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame420(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
@@ -1045,8 +1042,8 @@ void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
owidth / 2, ouv_stride, bd);
}
-void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame422(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
@@ -1059,8 +1056,8 @@ void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
owidth / 2, ouv_stride, bd);
}
-void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame444(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
@@ -1126,7 +1123,7 @@ void av1_resize_and_extend_frame_c(const YV12_BUFFER_CONFIG *src,
bool av1_resize_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int bd,
- const int num_planes) {
+ int num_planes) {
// TODO(dkovalev): replace YV12_BUFFER_CONFIG with aom_image_t
// We use AOMMIN(num_planes, MAX_MB_PLANE) instead of num_planes to quiet
@@ -1246,8 +1243,7 @@ void av1_upscale_normative_and_extend_frame(const AV1_COMMON *cm,
YV12_BUFFER_CONFIG *av1_realloc_and_scale_if_required(
AV1_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
const InterpFilter filter, const int phase, const bool use_optimized_scaler,
- const bool for_psnr, const int border_in_pixels,
- const int num_pyramid_levels) {
+ const bool for_psnr, const int border_in_pixels, const bool alloc_pyramid) {
// If scaling is performed for the sole purpose of calculating PSNR, then our
// target dimensions are superres upscaled width/height. Otherwise our target
// dimensions are coded width/height.
@@ -1267,7 +1263,7 @@ YV12_BUFFER_CONFIG *av1_realloc_and_scale_if_required(
scaled, scaled_width, scaled_height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
border_in_pixels, cm->features.byte_alignment, NULL, NULL, NULL,
- num_pyramid_levels, 0))
+ alloc_pyramid, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate scaled buffer");
@@ -1363,7 +1359,7 @@ static void copy_buffer_config(const YV12_BUFFER_CONFIG *const src,
// TODO(afergs): aom_ vs av1_ functions? Which can I use?
// Upscale decoded image.
void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
- int num_pyramid_levels) {
+ bool alloc_pyramid) {
const int num_planes = av1_num_planes(cm);
if (!av1_superres_scaled(cm)) return;
const SequenceHeader *const seq_params = cm->seq_params;
@@ -1378,7 +1374,7 @@ void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
if (aom_alloc_frame_buffer(
&copy_buffer, aligned_width, cm->height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
- AOM_BORDER_IN_PIXELS, byte_alignment, 0, 0))
+ AOM_BORDER_IN_PIXELS, byte_alignment, false, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate copy buffer for superres upscaling");
@@ -1411,7 +1407,7 @@ void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
cm->superres_upscaled_height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
AOM_BORDER_IN_PIXELS, byte_alignment, fb, cb, cb_priv,
- num_pyramid_levels, 0)) {
+ alloc_pyramid, 0)) {
unlock_buffer_pool(pool);
aom_internal_error(
cm->error, AOM_CODEC_MEM_ERROR,
@@ -1428,7 +1424,7 @@ void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
frame_to_show, cm->superres_upscaled_width,
cm->superres_upscaled_height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
- AOM_BORDER_IN_PIXELS, byte_alignment, num_pyramid_levels, 0))
+ AOM_BORDER_IN_PIXELS, byte_alignment, alloc_pyramid, 0))
aom_internal_error(
cm->error, AOM_CODEC_MEM_ERROR,
"Failed to reallocate current frame buffer for superres upscaling");
diff --git a/third_party/aom/av1/common/resize.h b/third_party/aom/av1/common/resize.h
index 0ba3108f72..d573a538bf 100644
--- a/third_party/aom/av1/common/resize.h
+++ b/third_party/aom/av1/common/resize.h
@@ -20,44 +20,41 @@
extern "C" {
#endif
-bool av1_resize_plane(const uint8_t *const input, int height, int width,
+bool av1_resize_plane(const uint8_t *input, int height, int width,
int in_stride, uint8_t *output, int height2, int width2,
int out_stride);
// TODO(aomedia:3228): In libaom 4.0.0, remove av1_resize_frame420 from
// av1/exports_com and delete this function.
-void av1_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+void av1_resize_frame420(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth);
-bool av1_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+bool av1_resize_frame422(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth);
-bool av1_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+bool av1_resize_frame444(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth);
-void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
+void av1_highbd_resize_plane(const uint8_t *input, int height, int width,
int in_stride, uint8_t *output, int height2,
int width2, int out_stride, int bd);
-void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame420(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd);
-void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame422(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd);
-void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame444(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
@@ -73,12 +70,11 @@ void av1_upscale_normative_and_extend_frame(const AV1_COMMON *cm,
YV12_BUFFER_CONFIG *av1_realloc_and_scale_if_required(
AV1_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
const InterpFilter filter, const int phase, const bool use_optimized_scaler,
- const bool for_psnr, const int border_in_pixels,
- const int num_pyramid_levels);
+ const bool for_psnr, const int border_in_pixels, const bool alloc_pyramid);
bool av1_resize_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int bd,
- const int num_planes);
+ int num_planes);
// Calculates the scaled dimensions from the given original dimensions and the
// resize scale denominator.
@@ -95,7 +91,7 @@ void av1_calculate_scaled_superres_size(int *width, int *height,
void av1_calculate_unscaled_superres_size(int *width, int *height, int denom);
void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
- int num_pyramid_levels);
+ bool alloc_pyramid);
// Returns 1 if a superres upscaled frame is scaled and 0 otherwise.
static INLINE int av1_superres_scaled(const AV1_COMMON *cm) {
diff --git a/third_party/aom/av1/common/restoration.c b/third_party/aom/av1/common/restoration.c
index 0be126fa65..335fdc8c2a 100644
--- a/third_party/aom/av1/common/restoration.c
+++ b/third_party/aom/av1/common/restoration.c
@@ -11,20 +11,24 @@
*/
#include <math.h>
+#include <stddef.h>
#include "config/aom_config.h"
-#include "config/aom_dsp_rtcd.h"
#include "config/aom_scale_rtcd.h"
+#include "aom/internal/aom_codec_internal.h"
#include "aom_mem/aom_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
+#include "aom_ports/mem.h"
+#include "aom_util/aom_pthread.h"
+
#include "av1/common/av1_common_int.h"
+#include "av1/common/convolve.h"
+#include "av1/common/enums.h"
#include "av1/common/resize.h"
#include "av1/common/restoration.h"
#include "av1/common/thread_common.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_mem/aom_mem.h"
-
-#include "aom_ports/mem.h"
// The 's' values are calculated based on original 'r' and 'e' values in the
// spec using GenSgrprojVtable().
@@ -115,8 +119,9 @@ void av1_loop_restoration_precal(void) {
#endif
}
-static void extend_frame_lowbd(uint8_t *data, int width, int height, int stride,
- int border_horz, int border_vert) {
+static void extend_frame_lowbd(uint8_t *data, int width, int height,
+ ptrdiff_t stride, int border_horz,
+ int border_vert) {
uint8_t *data_p;
int i;
for (i = 0; i < height; ++i) {
@@ -136,7 +141,8 @@ static void extend_frame_lowbd(uint8_t *data, int width, int height, int stride,
#if CONFIG_AV1_HIGHBITDEPTH
static void extend_frame_highbd(uint16_t *data, int width, int height,
- int stride, int border_horz, int border_vert) {
+ ptrdiff_t stride, int border_horz,
+ int border_vert) {
uint16_t *data_p;
int i, j;
for (i = 0; i < height; ++i) {
@@ -988,8 +994,10 @@ void av1_loop_restoration_filter_unit(
int unit_h = limits->v_end - limits->v_start;
int unit_w = limits->h_end - limits->h_start;
- uint8_t *data8_tl = data8 + limits->v_start * stride + limits->h_start;
- uint8_t *dst8_tl = dst8 + limits->v_start * dst_stride + limits->h_start;
+ uint8_t *data8_tl =
+ data8 + limits->v_start * (ptrdiff_t)stride + limits->h_start;
+ uint8_t *dst8_tl =
+ dst8 + limits->v_start * (ptrdiff_t)dst_stride + limits->h_start;
if (unit_rtype == RESTORE_NONE) {
copy_rest_unit(unit_w, unit_h, data8_tl, stride, dst8_tl, dst_stride,
@@ -1074,7 +1082,8 @@ void av1_loop_restoration_filter_frame_init(AV1LrStruct *lr_ctxt,
if (aom_realloc_frame_buffer(
lr_ctxt->dst, frame_width, frame_height, seq_params->subsampling_x,
seq_params->subsampling_y, highbd, AOM_RESTORATION_FRAME_BORDER,
- cm->features.byte_alignment, NULL, NULL, NULL, 0, 0) != AOM_CODEC_OK)
+ cm->features.byte_alignment, NULL, NULL, NULL, false,
+ 0) != AOM_CODEC_OK)
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate restoration dst buffer");
@@ -1349,7 +1358,7 @@ static void save_deblock_boundary_lines(
const int is_uv = plane > 0;
const uint8_t *src_buf = REAL_PTR(use_highbd, frame->buffers[plane]);
const int src_stride = frame->strides[is_uv] << use_highbd;
- const uint8_t *src_rows = src_buf + row * src_stride;
+ const uint8_t *src_rows = src_buf + row * (ptrdiff_t)src_stride;
uint8_t *bdry_buf = is_above ? boundaries->stripe_boundary_above
: boundaries->stripe_boundary_below;
@@ -1404,7 +1413,7 @@ static void save_cdef_boundary_lines(const YV12_BUFFER_CONFIG *frame,
const int is_uv = plane > 0;
const uint8_t *src_buf = REAL_PTR(use_highbd, frame->buffers[plane]);
const int src_stride = frame->strides[is_uv] << use_highbd;
- const uint8_t *src_rows = src_buf + row * src_stride;
+ const uint8_t *src_rows = src_buf + row * (ptrdiff_t)src_stride;
uint8_t *bdry_buf = is_above ? boundaries->stripe_boundary_above
: boundaries->stripe_boundary_below;
diff --git a/third_party/aom/av1/common/thread_common.c b/third_party/aom/av1/common/thread_common.c
index 45695147ff..8a137cc9f7 100644
--- a/third_party/aom/av1/common/thread_common.c
+++ b/third_party/aom/av1/common/thread_common.c
@@ -14,12 +14,19 @@
#include "config/aom_scale_rtcd.h"
#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/txfm_common.h"
#include "aom_mem/aom_mem.h"
+#include "aom_util/aom_pthread.h"
+#include "aom_util/aom_thread.h"
#include "av1/common/av1_loopfilter.h"
+#include "av1/common/blockd.h"
+#include "av1/common/cdef.h"
#include "av1/common/entropymode.h"
+#include "av1/common/enums.h"
#include "av1/common/thread_common.h"
#include "av1/common/reconinter.h"
#include "av1/common/reconintra.h"
+#include "av1/common/restoration.h"
// Set up nsync by width.
static INLINE int get_sync_range(int width) {
diff --git a/third_party/aom/av1/common/thread_common.h b/third_party/aom/av1/common/thread_common.h
index 675687dc98..7e681f322b 100644
--- a/third_party/aom/av1/common/thread_common.h
+++ b/third_party/aom/av1/common/thread_common.h
@@ -16,6 +16,7 @@
#include "av1/common/av1_loopfilter.h"
#include "av1/common/cdef.h"
+#include "aom_util/aom_pthread.h"
#include "aom_util/aom_thread.h"
#ifdef __cplusplus
diff --git a/third_party/aom/av1/common/tile_common.c b/third_party/aom/av1/common/tile_common.c
index b964f259b8..45a189d69a 100644
--- a/third_party/aom/av1/common/tile_common.c
+++ b/third_party/aom/av1/common/tile_common.c
@@ -177,46 +177,16 @@ int av1_get_sb_cols_in_tile(const AV1_COMMON *cm, const TileInfo *tile) {
cm->seq_params->mib_size_log2);
}
-PixelRect av1_get_tile_rect(const TileInfo *tile_info, const AV1_COMMON *cm,
- int is_uv) {
- PixelRect r;
-
- // Calculate position in the Y plane
- r.left = tile_info->mi_col_start * MI_SIZE;
- r.right = tile_info->mi_col_end * MI_SIZE;
- r.top = tile_info->mi_row_start * MI_SIZE;
- r.bottom = tile_info->mi_row_end * MI_SIZE;
-
- // If upscaling is enabled, the tile limits need scaling to match the
- // upscaled frame where the restoration units live. To do this, scale up the
- // top-left and bottom-right of the tile.
- if (av1_superres_scaled(cm)) {
- av1_calculate_unscaled_superres_size(&r.left, &r.top,
- cm->superres_scale_denominator);
- av1_calculate_unscaled_superres_size(&r.right, &r.bottom,
- cm->superres_scale_denominator);
- }
-
- const int frame_w = cm->superres_upscaled_width;
- const int frame_h = cm->superres_upscaled_height;
-
- // Make sure we don't fall off the bottom-right of the frame.
- r.right = AOMMIN(r.right, frame_w);
- r.bottom = AOMMIN(r.bottom, frame_h);
-
- // Convert to coordinates in the appropriate plane
- const int ss_x = is_uv && cm->seq_params->subsampling_x;
- const int ss_y = is_uv && cm->seq_params->subsampling_y;
-
- r.left = ROUND_POWER_OF_TWO(r.left, ss_x);
- r.right = ROUND_POWER_OF_TWO(r.right, ss_x);
- r.top = ROUND_POWER_OF_TWO(r.top, ss_y);
- r.bottom = ROUND_POWER_OF_TWO(r.bottom, ss_y);
-
- return r;
-}
-
-void av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) {
+// Section 7.3.1 of the AV1 spec says, on pages 200-201:
+// It is a requirement of bitstream conformance that the following conditions
+// are met:
+// ...
+// * TileHeight is equal to (use_128x128_superblock ? 128 : 64) for all
+// tiles (i.e. the tile is exactly one superblock high)
+// * TileWidth is identical for all tiles and is an integer multiple of
+// TileHeight (i.e. the tile is an integer number of superblocks wide)
+// ...
+bool av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) {
const CommonTileParams *const tiles = &cm->tiles;
if (tiles->uniform_spacing) {
*w = tiles->width;
@@ -226,7 +196,10 @@ void av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) {
const int tile_width_sb =
tiles->col_start_sb[i + 1] - tiles->col_start_sb[i];
const int tile_w = tile_width_sb * cm->seq_params->mib_size;
- assert(i == 0 || tile_w == *w); // ensure all tiles have same dimension
+ // ensure all tiles have same dimension
+ if (i != 0 && tile_w != *w) {
+ return false;
+ }
*w = tile_w;
}
@@ -234,10 +207,14 @@ void av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) {
const int tile_height_sb =
tiles->row_start_sb[i + 1] - tiles->row_start_sb[i];
const int tile_h = tile_height_sb * cm->seq_params->mib_size;
- assert(i == 0 || tile_h == *h); // ensure all tiles have same dimension
+ // ensure all tiles have same dimension
+ if (i != 0 && tile_h != *h) {
+ return false;
+ }
*h = tile_h;
}
}
+ return true;
}
int av1_is_min_tile_width_satisfied(const AV1_COMMON *cm) {
diff --git a/third_party/aom/av1/common/tile_common.h b/third_party/aom/av1/common/tile_common.h
index 5383ae940b..12228c9e94 100644
--- a/third_party/aom/av1/common/tile_common.h
+++ b/third_party/aom/av1/common/tile_common.h
@@ -12,13 +12,14 @@
#ifndef AOM_AV1_COMMON_TILE_COMMON_H_
#define AOM_AV1_COMMON_TILE_COMMON_H_
+#include <stdbool.h>
+
+#include "config/aom_config.h"
+
#ifdef __cplusplus
extern "C" {
#endif
-#include "config/aom_config.h"
-#include "aom_dsp/rect.h"
-
struct AV1Common;
struct SequenceHeader;
struct CommonTileParams;
@@ -43,10 +44,6 @@ void av1_tile_set_col(TileInfo *tile, const struct AV1Common *cm, int col);
int av1_get_sb_rows_in_tile(const struct AV1Common *cm, const TileInfo *tile);
int av1_get_sb_cols_in_tile(const struct AV1Common *cm, const TileInfo *tile);
-// Return the pixel extents of the given tile
-PixelRect av1_get_tile_rect(const TileInfo *tile_info,
- const struct AV1Common *cm, int is_uv);
-
// Define tile maximum width and area
// There is no maximum height since height is limited by area and width limits
// The minimum tile width or height is fixed at one superblock
@@ -56,7 +53,9 @@ PixelRect av1_get_tile_rect(const TileInfo *tile_info,
#define MAX_TILE_AREA_LEVEL_7_AND_ABOVE (4096 * 4608)
#endif
-void av1_get_uniform_tile_size(const struct AV1Common *cm, int *w, int *h);
+// Gets the width and height (in units of MI_SIZE) of the tiles in a tile list.
+// Returns true on success, false on failure.
+bool av1_get_uniform_tile_size(const struct AV1Common *cm, int *w, int *h);
void av1_get_tile_limits(struct AV1Common *const cm);
void av1_calculate_tile_cols(const struct SequenceHeader *const seq_params,
int cm_mi_rows, int cm_mi_cols,
diff --git a/third_party/aom/av1/common/x86/cdef_block_sse2.c b/third_party/aom/av1/common/x86/cdef_block_sse2.c
deleted file mode 100644
index 5ab7ffa2ff..0000000000
--- a/third_party/aom/av1/common/x86/cdef_block_sse2.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_dsp/aom_simd.h"
-#define SIMD_FUNC(name) name##_sse2
-#include "av1/common/cdef_block_simd.h"
-
-void cdef_find_dir_dual_sse2(const uint16_t *img1, const uint16_t *img2,
- int stride, int32_t *var_out_1st,
- int32_t *var_out_2nd, int coeff_shift,
- int *out_dir_1st_8x8, int *out_dir_2nd_8x8) {
- // Process first 8x8.
- *out_dir_1st_8x8 = cdef_find_dir(img1, stride, var_out_1st, coeff_shift);
-
- // Process second 8x8.
- *out_dir_2nd_8x8 = cdef_find_dir(img2, stride, var_out_2nd, coeff_shift);
-}
-
-void cdef_copy_rect8_8bit_to_16bit_sse2(uint16_t *dst, int dstride,
- const uint8_t *src, int sstride,
- int width, int height) {
- int j = 0;
- for (int i = 0; i < height; i++) {
- for (j = 0; j < (width & ~0x7); j += 8) {
- v64 row = v64_load_unaligned(&src[i * sstride + j]);
- v128_store_unaligned(&dst[i * dstride + j], v128_unpack_u8_s16(row));
- }
- for (; j < width; j++) {
- dst[i * dstride + j] = src[i * sstride + j];
- }
- }
-}
diff --git a/third_party/aom/av1/common/x86/cdef_block_ssse3.c b/third_party/aom/av1/common/x86/cdef_block_ssse3.c
index 0fb36eb6e0..14eb6c9e31 100644
--- a/third_party/aom/av1/common/x86/cdef_block_ssse3.c
+++ b/third_party/aom/av1/common/x86/cdef_block_ssse3.c
@@ -9,6 +9,17 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+// Include SSSE3 CDEF code only for 32-bit x86, to support Valgrind.
+// For normal use, we require SSE4.1, so cdef_*_sse4_1 will be used instead of
+// these functions. However, 32-bit Valgrind does not support SSE4.1, so we
+// include a fallback to SSSE3 to improve performance
+
+#include "config/aom_config.h"
+
+#if !AOM_ARCH_X86
+#error "cdef_block_ssse3.c is included for compatibility with 32-bit x86 only"
+#endif // !AOM_ARCH_X86
+
#include "aom_dsp/aom_simd.h"
#define SIMD_FUNC(name) name##_ssse3
#include "av1/common/cdef_block_simd.h"
diff --git a/third_party/aom/av1/common/x86/convolve_2d_avx2.c b/third_party/aom/av1/common/x86/convolve_2d_avx2.c
index 1b39a0a8d5..d4c1169cc3 100644
--- a/third_party/aom/av1/common/x86/convolve_2d_avx2.c
+++ b/third_party/aom/av1/common/x86/convolve_2d_avx2.c
@@ -21,13 +21,11 @@
#include "av1/common/convolve.h"
-void av1_convolve_2d_sr_general_avx2(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_x,
- const InterpFilterParams *filter_params_y,
- const int subpel_x_qn,
- const int subpel_y_qn,
- ConvolveParams *conv_params) {
+static void convolve_2d_sr_general_avx2(
+ const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int subpel_x_qn,
+ const int subpel_y_qn, ConvolveParams *conv_params) {
if (filter_params_x->taps > 8) {
const int bd = 8;
int im_stride = 8, i;
@@ -150,9 +148,9 @@ void av1_convolve_2d_sr_avx2(
const bool use_general = (tap_x == 12 || tap_y == 12);
if (use_general) {
- av1_convolve_2d_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
- filter_params_x, filter_params_y,
- subpel_x_q4, subpel_y_q4, conv_params);
+ convolve_2d_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y, subpel_x_q4,
+ subpel_y_q4, conv_params);
} else {
av1_convolve_2d_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_x, filter_params_y,
diff --git a/third_party/aom/av1/common/x86/convolve_2d_sse2.c b/third_party/aom/av1/common/x86/convolve_2d_sse2.c
index 1b85f37294..68971eacc1 100644
--- a/third_party/aom/av1/common/x86/convolve_2d_sse2.c
+++ b/third_party/aom/av1/common/x86/convolve_2d_sse2.c
@@ -19,12 +19,11 @@
#include "aom_dsp/x86/convolve_common_intrin.h"
#include "av1/common/convolve.h"
-void av1_convolve_2d_sr_12tap_sse2(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_x,
- const InterpFilterParams *filter_params_y,
- const int subpel_x_qn, const int subpel_y_qn,
- ConvolveParams *conv_params) {
+static void convolve_2d_sr_12tap_sse2(
+ const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int subpel_x_qn,
+ const int subpel_y_qn, ConvolveParams *conv_params) {
const int bd = 8;
DECLARE_ALIGNED(16, int16_t,
@@ -231,9 +230,9 @@ void av1_convolve_2d_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst,
filter_params_x, filter_params_y, subpel_x_qn,
subpel_y_qn, conv_params);
} else {
- av1_convolve_2d_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
- filter_params_x, filter_params_y,
- subpel_x_qn, subpel_y_qn, conv_params);
+ convolve_2d_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y, subpel_x_qn,
+ subpel_y_qn, conv_params);
}
} else {
const int bd = 8;
diff --git a/third_party/aom/av1/common/x86/convolve_sse2.c b/third_party/aom/av1/common/x86/convolve_sse2.c
index 012e75c1ae..6383567a48 100644
--- a/third_party/aom/av1/common/x86/convolve_sse2.c
+++ b/third_party/aom/av1/common/x86/convolve_sse2.c
@@ -75,10 +75,10 @@ static INLINE __m128i convolve_hi_y(const __m128i *const s,
return convolve(ss, coeffs);
}
-void av1_convolve_y_sr_12tap_sse2(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_y,
- int subpel_y_qn) {
+static void convolve_y_sr_12tap_sse2(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_y,
+ int subpel_y_qn) {
const int fo_vert = filter_params_y->taps / 2 - 1;
const uint8_t *src_ptr = src - fo_vert * src_stride;
const __m128i round_const = _mm_set1_epi32((1 << FILTER_BITS) >> 1);
@@ -185,8 +185,8 @@ void av1_convolve_y_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst,
av1_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h,
filter_params_y, subpel_y_qn);
} else {
- av1_convolve_y_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
- filter_params_y, subpel_y_qn);
+ convolve_y_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn);
}
} else {
const int fo_vert = filter_params_y->taps / 2 - 1;
@@ -337,11 +337,11 @@ void av1_convolve_y_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst,
}
}
-void av1_convolve_x_sr_12tap_sse2(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_x,
- int subpel_x_qn,
- ConvolveParams *conv_params) {
+static void convolve_x_sr_12tap_sse2(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ int subpel_x_qn,
+ ConvolveParams *conv_params) {
const int fo_horiz = filter_params_x->taps / 2 - 1;
const uint8_t *src_ptr = src - fo_horiz;
const int bits = FILTER_BITS - conv_params->round_0;
@@ -402,8 +402,8 @@ void av1_convolve_x_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst,
av1_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h,
filter_params_x, subpel_x_qn, conv_params);
} else {
- av1_convolve_x_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
- filter_params_x, subpel_x_qn, conv_params);
+ convolve_x_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn, conv_params);
}
} else {
const int fo_horiz = filter_params_x->taps / 2 - 1;
diff --git a/third_party/aom/av1/common/x86/jnt_convolve_sse2.c b/third_party/aom/av1/common/x86/jnt_convolve_sse2.c
index 8c5d9918fb..d5d2db7455 100644
--- a/third_party/aom/av1/common/x86/jnt_convolve_sse2.c
+++ b/third_party/aom/av1/common/x86/jnt_convolve_sse2.c
@@ -375,232 +375,3 @@ void av1_dist_wtd_convolve_y_sse2(const uint8_t *src, int src_stride,
} while (j < w);
}
}
-
-void av1_dist_wtd_convolve_2d_sse2(const uint8_t *src, int src_stride,
- uint8_t *dst0, int dst_stride0, int w, int h,
- const InterpFilterParams *filter_params_x,
- const InterpFilterParams *filter_params_y,
- const int subpel_x_qn, const int subpel_y_qn,
- ConvolveParams *conv_params) {
- CONV_BUF_TYPE *dst = conv_params->dst;
- int dst_stride = conv_params->dst_stride;
- const int bd = 8;
-
- DECLARE_ALIGNED(16, int16_t,
- im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]);
- int im_h = h + filter_params_y->taps - 1;
- int im_stride = MAX_SB_SIZE;
- int i, j;
- const int fo_vert = filter_params_y->taps / 2 - 1;
- const int fo_horiz = filter_params_x->taps / 2 - 1;
- const int do_average = conv_params->do_average;
- const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
- const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
-
- const __m128i zero = _mm_setzero_si128();
-
- const int w0 = conv_params->fwd_offset;
- const int w1 = conv_params->bck_offset;
- const __m128i wt0 = _mm_set1_epi16(w0);
- const __m128i wt1 = _mm_set1_epi16(w1);
- const __m128i wt = _mm_unpacklo_epi16(wt0, wt1);
-
- const int offset_0 =
- bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
- const int offset = (1 << offset_0) + (1 << (offset_0 - 1));
- const __m128i offset_const = _mm_set1_epi16(offset);
- const int rounding_shift =
- 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
- const __m128i rounding_const = _mm_set1_epi16((1 << rounding_shift) >> 1);
-
- /* Horizontal filter */
- {
- const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
- const __m128i coeffs_x = _mm_loadu_si128((__m128i *)x_filter);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_x, coeffs_x);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_x, coeffs_x);
-
- // coeffs 0 1 0 1 0 1 0 1
- const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
- // coeffs 2 3 2 3 2 3 2 3
- const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
- // coeffs 4 5 4 5 4 5 4 5
- const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
- // coeffs 6 7 6 7 6 7 6 7
- const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
-
- const __m128i round_const = _mm_set1_epi32(
- ((1 << conv_params->round_0) >> 1) + (1 << (bd + FILTER_BITS - 1)));
- const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_0);
-
- for (i = 0; i < im_h; ++i) {
- for (j = 0; j < w; j += 8) {
- __m128i temp_lo, temp_hi;
- const __m128i data =
- _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j]);
-
- const __m128i src_lo = _mm_unpacklo_epi8(data, zero);
- const __m128i src_hi = _mm_unpackhi_epi8(data, zero);
-
- // Filter even-index pixels
- const __m128i res_0 = _mm_madd_epi16(src_lo, coeff_01);
- temp_lo = _mm_srli_si128(src_lo, 4);
- temp_hi = _mm_slli_si128(src_hi, 12);
- const __m128i src_2 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
- temp_lo = _mm_srli_si128(src_lo, 8);
- temp_hi = _mm_slli_si128(src_hi, 8);
- const __m128i src_4 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
- temp_lo = _mm_srli_si128(src_lo, 12);
- temp_hi = _mm_slli_si128(src_hi, 4);
- const __m128i src_6 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
-
- __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4),
- _mm_add_epi32(res_2, res_6));
- res_even =
- _mm_sra_epi32(_mm_add_epi32(res_even, round_const), round_shift);
-
- // Filter odd-index pixels
- temp_lo = _mm_srli_si128(src_lo, 2);
- temp_hi = _mm_slli_si128(src_hi, 14);
- const __m128i src_1 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
- temp_lo = _mm_srli_si128(src_lo, 6);
- temp_hi = _mm_slli_si128(src_hi, 10);
- const __m128i src_3 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
- temp_lo = _mm_srli_si128(src_lo, 10);
- temp_hi = _mm_slli_si128(src_hi, 6);
- const __m128i src_5 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
- temp_lo = _mm_srli_si128(src_lo, 14);
- temp_hi = _mm_slli_si128(src_hi, 2);
- const __m128i src_7 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
-
- __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5),
- _mm_add_epi32(res_3, res_7));
- res_odd =
- _mm_sra_epi32(_mm_add_epi32(res_odd, round_const), round_shift);
-
- // Pack in the column order 0, 2, 4, 6, 1, 3, 5, 7
- __m128i res = _mm_packs_epi32(res_even, res_odd);
- _mm_store_si128((__m128i *)&im_block[i * im_stride + j], res);
- }
- }
- }
-
- /* Vertical filter */
- {
- const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
- filter_params_y, subpel_y_qn & SUBPEL_MASK);
- const __m128i coeffs_y = _mm_loadu_si128((__m128i *)y_filter);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_y, coeffs_y);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_y, coeffs_y);
-
- // coeffs 0 1 0 1 0 1 0 1
- const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
- // coeffs 2 3 2 3 2 3 2 3
- const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
- // coeffs 4 5 4 5 4 5 4 5
- const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
- // coeffs 6 7 6 7 6 7 6 7
- const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
-
- const __m128i round_const = _mm_set1_epi32(
- ((1 << conv_params->round_1) >> 1) -
- (1 << (bd + 2 * FILTER_BITS - conv_params->round_0 - 1)));
- const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1);
-
- for (i = 0; i < h; ++i) {
- for (j = 0; j < w; j += 8) {
- // Filter even-index pixels
- const int16_t *data = &im_block[i * im_stride + j];
- const __m128i src_0 =
- _mm_unpacklo_epi16(*(__m128i *)(data + 0 * im_stride),
- *(__m128i *)(data + 1 * im_stride));
- const __m128i src_2 =
- _mm_unpacklo_epi16(*(__m128i *)(data + 2 * im_stride),
- *(__m128i *)(data + 3 * im_stride));
- const __m128i src_4 =
- _mm_unpacklo_epi16(*(__m128i *)(data + 4 * im_stride),
- *(__m128i *)(data + 5 * im_stride));
- const __m128i src_6 =
- _mm_unpacklo_epi16(*(__m128i *)(data + 6 * im_stride),
- *(__m128i *)(data + 7 * im_stride));
-
- const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01);
- const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
- const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
- const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
-
- const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
- _mm_add_epi32(res_4, res_6));
-
- // Filter odd-index pixels
- const __m128i src_1 =
- _mm_unpackhi_epi16(*(__m128i *)(data + 0 * im_stride),
- *(__m128i *)(data + 1 * im_stride));
- const __m128i src_3 =
- _mm_unpackhi_epi16(*(__m128i *)(data + 2 * im_stride),
- *(__m128i *)(data + 3 * im_stride));
- const __m128i src_5 =
- _mm_unpackhi_epi16(*(__m128i *)(data + 4 * im_stride),
- *(__m128i *)(data + 5 * im_stride));
- const __m128i src_7 =
- _mm_unpackhi_epi16(*(__m128i *)(data + 6 * im_stride),
- *(__m128i *)(data + 7 * im_stride));
-
- const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
- const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
- const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
- const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
-
- const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
- _mm_add_epi32(res_5, res_7));
-
- // Rearrange pixels back into the order 0 ... 7
- const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
- const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
-
- const __m128i res_lo_round =
- _mm_sra_epi32(_mm_add_epi32(res_lo, round_const), round_shift);
- const __m128i res_hi_round =
- _mm_sra_epi32(_mm_add_epi32(res_hi, round_const), round_shift);
-
- const __m128i res_16b = _mm_packs_epi32(res_lo_round, res_hi_round);
- const __m128i res_unsigned = _mm_add_epi16(res_16b, offset_const);
-
- // Accumulate values into the destination buffer
- if (do_average) {
- const __m128i data_ref_0 =
- _mm_loadu_si128((__m128i *)(&dst[i * dst_stride + j]));
-
- const __m128i comp_avg_res =
- comp_avg(&data_ref_0, &res_unsigned, &wt, use_dist_wtd_comp_avg);
-
- const __m128i round_result = convolve_rounding(
- &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
-
- const __m128i res_8 = _mm_packus_epi16(round_result, round_result);
-
- if (w > 4)
- _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_8);
- else
- *(int *)(&dst0[i * dst_stride0 + j]) = _mm_cvtsi128_si32(res_8);
- } else {
- _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_unsigned);
- }
- }
- }
- }
-}