From d8bbc7858622b6d9c278469aab701ca0b609cddf Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 15 May 2024 05:35:49 +0200 Subject: Merging upstream version 126.0. Signed-off-by: Daniel Baumann --- third_party/aom/av1/av1.cmake | 22 +- third_party/aom/av1/av1_cx_iface.c | 49 +- third_party/aom/av1/av1_dx_iface.c | 17 +- third_party/aom/av1/common/alloccommon.c | 6 +- .../av1/common/arm/highbd_compound_convolve_neon.c | 532 +++--- .../av1/common/arm/highbd_compound_convolve_neon.h | 293 ++++ .../av1/common/arm/highbd_compound_convolve_sve2.c | 1555 ++++++++++++++++++ .../aom/av1/common/arm/highbd_convolve_sve2.c | 1720 ++++++++++++++++++++ .../aom/av1/common/arm/highbd_convolve_sve2.h | 97 ++ .../aom/av1/common/arm/highbd_warp_plane_neon.c | 30 +- .../aom/av1/common/arm/highbd_warp_plane_neon.h | 60 +- .../aom/av1/common/arm/highbd_warp_plane_sve.c | 32 +- third_party/aom/av1/common/arm/warp_plane_neon.c | 38 +- third_party/aom/av1/common/arm/warp_plane_neon.h | 60 +- .../aom/av1/common/arm/warp_plane_neon_i8mm.c | 38 +- third_party/aom/av1/common/arm/warp_plane_sve.c | 40 +- third_party/aom/av1/common/av1_common_int.h | 2 +- third_party/aom/av1/common/av1_rtcd_defs.pl | 54 +- third_party/aom/av1/common/cdef.c | 13 +- third_party/aom/av1/common/entropymode.h | 9 +- third_party/aom/av1/common/quant_common.c | 18 +- third_party/aom/av1/common/reconintra.c | 6 +- third_party/aom/av1/common/resize.c | 52 +- third_party/aom/av1/common/resize.h | 44 +- third_party/aom/av1/common/restoration.c | 35 +- third_party/aom/av1/common/thread_common.c | 7 + third_party/aom/av1/common/thread_common.h | 1 + third_party/aom/av1/common/tile_common.c | 61 +- third_party/aom/av1/common/tile_common.h | 15 +- third_party/aom/av1/common/x86/cdef_block_sse2.c | 40 - third_party/aom/av1/common/x86/cdef_block_ssse3.c | 11 + third_party/aom/av1/common/x86/convolve_2d_avx2.c | 18 +- third_party/aom/av1/common/x86/convolve_2d_sse2.c | 17 +- third_party/aom/av1/common/x86/convolve_sse2.c | 26 +- third_party/aom/av1/common/x86/jnt_convolve_sse2.c | 229 --- third_party/aom/av1/decoder/decodeframe.c | 49 +- third_party/aom/av1/decoder/decodemv.h | 2 + third_party/aom/av1/decoder/decoder.c | 1 + third_party/aom/av1/decoder/dthread.h | 1 - third_party/aom/av1/decoder/obu.c | 41 +- third_party/aom/av1/encoder/allintra_vis.c | 4 +- third_party/aom/av1/encoder/aq_cyclicrefresh.c | 50 +- .../aom/av1/encoder/arm/neon/av1_error_sve.c | 2 +- .../arm/neon/temporal_filter_neon_dotprod.c | 58 +- .../aom/av1/encoder/arm/neon/wedge_utils_sve.c | 92 ++ .../aom/av1/encoder/av1_temporal_denoiser.c | 8 +- third_party/aom/av1/encoder/bitstream.c | 19 +- third_party/aom/av1/encoder/bitstream.h | 1 + third_party/aom/av1/encoder/block.h | 3 + third_party/aom/av1/encoder/cnn.c | 10 +- third_party/aom/av1/encoder/encode_strategy.c | 27 +- third_party/aom/av1/encoder/encodeframe.c | 20 +- third_party/aom/av1/encoder/encodeframe_utils.c | 6 + third_party/aom/av1/encoder/encoder.c | 94 +- third_party/aom/av1/encoder/encoder.h | 9 +- third_party/aom/av1/encoder/encoder_alloc.h | 3 +- third_party/aom/av1/encoder/encoder_utils.c | 20 +- third_party/aom/av1/encoder/encodetxb.c | 26 +- third_party/aom/av1/encoder/ethread.c | 8 +- third_party/aom/av1/encoder/firstpass.c | 1 + third_party/aom/av1/encoder/global_motion.c | 82 +- third_party/aom/av1/encoder/global_motion.h | 32 +- third_party/aom/av1/encoder/global_motion_facade.c | 47 +- third_party/aom/av1/encoder/k_means_template.h | 10 +- third_party/aom/av1/encoder/lookahead.c | 19 +- third_party/aom/av1/encoder/lookahead.h | 20 +- third_party/aom/av1/encoder/nonrd_pickmode.c | 7 +- third_party/aom/av1/encoder/palette.c | 2 +- third_party/aom/av1/encoder/palette.h | 2 +- third_party/aom/av1/encoder/partition_search.c | 48 +- third_party/aom/av1/encoder/partition_strategy.c | 2 +- third_party/aom/av1/encoder/pass2_strategy.c | 100 +- third_party/aom/av1/encoder/pickcdef.c | 2 +- third_party/aom/av1/encoder/picklpf.c | 21 +- third_party/aom/av1/encoder/pickrst.c | 111 +- third_party/aom/av1/encoder/ratectrl.c | 120 +- third_party/aom/av1/encoder/ratectrl.h | 3 + third_party/aom/av1/encoder/speed_features.c | 9 +- third_party/aom/av1/encoder/speed_features.h | 7 + third_party/aom/av1/encoder/superres_scale.c | 2 +- third_party/aom/av1/encoder/svc_layercontext.c | 12 +- third_party/aom/av1/encoder/svc_layercontext.h | 15 + third_party/aom/av1/encoder/temporal_filter.c | 21 +- third_party/aom/av1/encoder/temporal_filter.h | 2 + third_party/aom/av1/encoder/tpl_model.c | 3 +- third_party/aom/av1/encoder/tpl_model.h | 1 + third_party/aom/av1/encoder/tune_butteraugli.c | 10 +- third_party/aom/av1/encoder/tune_vmaf.c | 105 +- third_party/aom/av1/encoder/tune_vmaf.h | 6 +- third_party/aom/av1/encoder/tx_search.c | 23 +- .../aom/av1/encoder/x86/av1_fwd_txfm_sse2.c | 6 + third_party/aom/av1/encoder/x86/cnn_avx2.c | 2 +- 92 files changed, 5155 insertions(+), 1499 deletions(-) create mode 100644 third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h create mode 100644 third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c create mode 100644 third_party/aom/av1/common/arm/highbd_convolve_sve2.c create mode 100644 third_party/aom/av1/common/arm/highbd_convolve_sve2.h delete mode 100644 third_party/aom/av1/common/x86/cdef_block_sse2.c create mode 100644 third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c (limited to 'third_party/aom/av1') diff --git a/third_party/aom/av1/av1.cmake b/third_party/aom/av1/av1.cmake index c66a748d40..32645f6065 100644 --- a/third_party/aom/av1/av1.cmake +++ b/third_party/aom/av1/av1.cmake @@ -262,7 +262,6 @@ list(APPEND AOM_AV1_ENCODER_SOURCES list(APPEND AOM_AV1_COMMON_INTRIN_SSE2 "${AOM_ROOT}/av1/common/x86/av1_txfm_sse2.h" - "${AOM_ROOT}/av1/common/x86/cdef_block_sse2.c" "${AOM_ROOT}/av1/common/x86/cfl_sse2.c" "${AOM_ROOT}/av1/common/x86/convolve_2d_sse2.c" "${AOM_ROOT}/av1/common/x86/convolve_sse2.c" @@ -272,11 +271,14 @@ list(APPEND AOM_AV1_COMMON_INTRIN_SSE2 list(APPEND AOM_AV1_COMMON_INTRIN_SSSE3 "${AOM_ROOT}/av1/common/x86/av1_inv_txfm_ssse3.c" "${AOM_ROOT}/av1/common/x86/av1_inv_txfm_ssse3.h" - "${AOM_ROOT}/av1/common/x86/cdef_block_ssse3.c" "${AOM_ROOT}/av1/common/x86/cfl_ssse3.c" "${AOM_ROOT}/av1/common/x86/jnt_convolve_ssse3.c" "${AOM_ROOT}/av1/common/x86/resize_ssse3.c") +# Fallbacks to support Valgrind on 32-bit x86 +list(APPEND AOM_AV1_COMMON_INTRIN_SSSE3_X86 + "${AOM_ROOT}/av1/common/x86/cdef_block_ssse3.c") + list(APPEND AOM_AV1_COMMON_INTRIN_SSE4_1 "${AOM_ROOT}/av1/common/x86/av1_convolve_horiz_rs_sse4.c" "${AOM_ROOT}/av1/common/x86/av1_convolve_scale_sse4.c" @@ -372,7 +374,8 @@ list(APPEND AOM_AV1_ENCODER_INTRIN_NEON_DOTPROD "${AOM_ROOT}/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c") list(APPEND AOM_AV1_ENCODER_INTRIN_SVE - "${AOM_ROOT}/av1/encoder/arm/neon/av1_error_sve.c") + "${AOM_ROOT}/av1/encoder/arm/neon/av1_error_sve.c" + "${AOM_ROOT}/av1/encoder/arm/neon/wedge_utils_sve.c") list(APPEND AOM_AV1_ENCODER_INTRIN_ARM_CRC32 "${AOM_ROOT}/av1/encoder/arm/crc32/hash_arm_crc32.c") @@ -477,6 +480,10 @@ if(CONFIG_AV1_HIGHBITDEPTH) "${AOM_ROOT}/av1/common/arm/highbd_warp_plane_neon.c" "${AOM_ROOT}/av1/common/arm/highbd_wiener_convolve_neon.c") + list(APPEND AOM_AV1_COMMON_INTRIN_SVE2 + "${AOM_ROOT}/av1/common/arm/highbd_compound_convolve_sve2.c" + "${AOM_ROOT}/av1/common/arm/highbd_convolve_sve2.c") + list(APPEND AOM_AV1_ENCODER_INTRIN_SSE2 "${AOM_ROOT}/av1/encoder/x86/highbd_block_error_intrin_sse2.c" "${AOM_ROOT}/av1/encoder/x86/highbd_temporal_filter_sse2.c") @@ -605,6 +612,10 @@ function(setup_av1_targets) require_compiler_flag_nomsvc("-mssse3" NO) add_intrinsics_object_library("-mssse3" "ssse3" "aom_av1_common" "AOM_AV1_COMMON_INTRIN_SSSE3") + if(AOM_ARCH_X86) + add_intrinsics_object_library("-mssse3" "ssse3_x86" "aom_av1_common" + "AOM_AV1_COMMON_INTRIN_SSSE3_X86") + endif() if(CONFIG_AV1_DECODER) if(AOM_AV1_DECODER_INTRIN_SSSE3) @@ -703,6 +714,11 @@ function(setup_av1_targets) endif() endif() + if(HAVE_SVE2) + add_intrinsics_object_library("${AOM_SVE2_FLAG}" "sve2" "aom_av1_common" + "AOM_AV1_COMMON_INTRIN_SVE2") + endif() + if(HAVE_VSX) if(AOM_AV1_COMMON_INTRIN_VSX) add_intrinsics_object_library("-mvsx -maltivec" "vsx" "aom_av1_common" diff --git a/third_party/aom/av1/av1_cx_iface.c b/third_party/aom/av1/av1_cx_iface.c index 9214feb4e6..2b6b1504e6 100644 --- a/third_party/aom/av1/av1_cx_iface.c +++ b/third_party/aom/av1/av1_cx_iface.c @@ -9,22 +9,28 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ #include +#include #include #include -#include "aom_mem/aom_mem.h" #include "config/aom_config.h" #include "config/aom_version.h" -#include "aom_ports/mem_ops.h" - +#include "aom/aomcx.h" #include "aom/aom_encoder.h" +#include "aom/aom_external_partition.h" +#include "aom/aom_image.h" #include "aom/internal/aom_codec_internal.h" - #include "aom_dsp/flow_estimation/flow_estimation.h" +#include "aom_mem/aom_mem.h" +#include "aom_scale/yv12config.h" +#include "aom_util/aom_pthread.h" #include "av1/av1_cx_iface.h" #include "av1/av1_iface_common.h" +#include "av1/common/av1_common_int.h" +#include "av1/common/enums.h" +#include "av1/common/scale.h" #include "av1/encoder/bitstream.h" #include "av1/encoder/encoder.h" #include "av1/encoder/encoder_alloc.h" @@ -32,6 +38,7 @@ #include "av1/encoder/ethread.h" #include "av1/encoder/external_partition.h" #include "av1/encoder/firstpass.h" +#include "av1/encoder/lookahead.h" #include "av1/encoder/rc_utils.h" #include "av1/arg_defs.h" @@ -1836,6 +1843,11 @@ static aom_codec_err_t ctrl_set_enable_qm(aom_codec_alg_priv_t *ctx, va_list args) { struct av1_extracfg extra_cfg = ctx->extra_cfg; extra_cfg.enable_qm = CAST(AV1E_SET_ENABLE_QM, args); +#if !CONFIG_QUANT_MATRIX + if (extra_cfg.enable_qm) { + ERROR("QM can't be enabled with CONFIG_QUANT_MATRIX=0."); + } +#endif return update_extra_cfg(ctx, &extra_cfg); } static aom_codec_err_t ctrl_set_qm_y(aom_codec_alg_priv_t *ctx, va_list args) { @@ -3072,11 +3084,36 @@ static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx, ctx->pts_offset = ptsvol; ctx->pts_offset_initialized = 1; } + if (ptsvol < ctx->pts_offset) { + aom_internal_error(&ppi->error, AOM_CODEC_INVALID_PARAM, + "pts is smaller than initial pts"); + } ptsvol -= ctx->pts_offset; + if (ptsvol > INT64_MAX / cpi_data.timestamp_ratio->num) { + aom_internal_error( + &ppi->error, AOM_CODEC_INVALID_PARAM, + "conversion of relative pts to ticks would overflow"); + } int64_t src_time_stamp = timebase_units_to_ticks(cpi_data.timestamp_ratio, ptsvol); +#if ULONG_MAX > INT64_MAX + if (duration > INT64_MAX) { + aom_internal_error(&ppi->error, AOM_CODEC_INVALID_PARAM, + "duration is too big"); + } +#endif + if (ptsvol > INT64_MAX - (int64_t)duration) { + aom_internal_error(&ppi->error, AOM_CODEC_INVALID_PARAM, + "relative pts + duration is too big"); + } + aom_codec_pts_t pts_end = ptsvol + (int64_t)duration; + if (pts_end > INT64_MAX / cpi_data.timestamp_ratio->num) { + aom_internal_error( + &ppi->error, AOM_CODEC_INVALID_PARAM, + "conversion of relative pts + duration to ticks would overflow"); + } int64_t src_end_time_stamp = - timebase_units_to_ticks(cpi_data.timestamp_ratio, ptsvol + duration); + timebase_units_to_ticks(cpi_data.timestamp_ratio, pts_end); YV12_BUFFER_CONFIG sd; res = image2yuvconfig(img, &sd); @@ -3110,7 +3147,7 @@ static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx, subsampling_x, subsampling_y, use_highbitdepth, lag_in_frames, src_border_in_pixels, cpi->common.features.byte_alignment, ctx->num_lap_buffers, (cpi->oxcf.kf_cfg.key_freq_max == 0), - cpi->image_pyramid_levels); + cpi->alloc_pyramid); } if (!ppi->lookahead) aom_internal_error(&ppi->error, AOM_CODEC_MEM_ERROR, diff --git a/third_party/aom/av1/av1_dx_iface.c b/third_party/aom/av1/av1_dx_iface.c index 3d7e132ab8..1a2dea37b6 100644 --- a/third_party/aom/av1/av1_dx_iface.c +++ b/third_party/aom/av1/av1_dx_iface.c @@ -19,18 +19,23 @@ #include "aom/internal/aom_image_internal.h" #include "aom/aomdx.h" #include "aom/aom_decoder.h" +#include "aom/aom_image.h" #include "aom_dsp/bitreader_buffer.h" #include "aom_dsp/aom_dsp_common.h" +#include "aom_ports/mem.h" #include "aom_ports/mem_ops.h" +#include "aom_util/aom_pthread.h" #include "aom_util/aom_thread.h" #include "av1/common/alloccommon.h" +#include "av1/common/av1_common_int.h" #include "av1/common/frame_buffers.h" #include "av1/common/enums.h" #include "av1/common/obu_util.h" #include "av1/decoder/decoder.h" #include "av1/decoder/decodeframe.h" +#include "av1/decoder/dthread.h" #include "av1/decoder/grain_synthesis.h" #include "av1/decoder/obu.h" @@ -865,7 +870,9 @@ static aom_image_t *decoder_get_frame(aom_codec_alg_priv_t *ctx, if (pbi->ext_tile_debug && tiles->single_tile_decoding && pbi->dec_tile_row >= 0) { int tile_width, tile_height; - av1_get_uniform_tile_size(cm, &tile_width, &tile_height); + if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) { + return NULL; + } const int tile_row = AOMMIN(pbi->dec_tile_row, tiles->rows - 1); const int mi_row = tile_row * tile_height; const int ssy = ctx->img.y_chroma_shift; @@ -884,7 +891,9 @@ static aom_image_t *decoder_get_frame(aom_codec_alg_priv_t *ctx, if (pbi->ext_tile_debug && tiles->single_tile_decoding && pbi->dec_tile_col >= 0) { int tile_width, tile_height; - av1_get_uniform_tile_size(cm, &tile_width, &tile_height); + if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) { + return NULL; + } const int tile_col = AOMMIN(pbi->dec_tile_col, tiles->cols - 1); const int mi_col = tile_col * tile_width; const int ssx = ctx->img.x_chroma_shift; @@ -1428,7 +1437,9 @@ static aom_codec_err_t ctrl_get_tile_size(aom_codec_alg_priv_t *ctx, (FrameWorkerData *)worker->data1; const AV1_COMMON *const cm = &frame_worker_data->pbi->common; int tile_width, tile_height; - av1_get_uniform_tile_size(cm, &tile_width, &tile_height); + if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) { + return AOM_CODEC_CORRUPT_FRAME; + } *tile_size = ((tile_width * MI_SIZE) << 16) + tile_height * MI_SIZE; return AOM_CODEC_OK; } else { diff --git a/third_party/aom/av1/common/alloccommon.c b/third_party/aom/av1/common/alloccommon.c index 2a9a8beb40..e9a38c4a60 100644 --- a/third_party/aom/av1/common/alloccommon.c +++ b/third_party/aom/av1/common/alloccommon.c @@ -13,6 +13,8 @@ #include "config/aom_config.h" #include "aom_mem/aom_mem.h" +#include "aom_scale/yv12config.h" +#include "aom_util/aom_pthread.h" #include "av1/common/alloccommon.h" #include "av1/common/av1_common_int.h" @@ -20,6 +22,8 @@ #include "av1/common/cdef_block.h" #include "av1/common/entropymode.h" #include "av1/common/entropymv.h" +#include "av1/common/enums.h" +#include "av1/common/restoration.h" #include "av1/common/thread_common.h" int av1_get_MBs(int width, int height) { @@ -200,7 +204,7 @@ void av1_alloc_cdef_buffers(AV1_COMMON *const cm, const int is_num_workers_changed = cdef_info->allocated_num_workers != num_workers; const int is_cdef_enabled = - cm->seq_params->enable_cdef && !cm->tiles.large_scale; + cm->seq_params->enable_cdef && !cm->tiles.single_tile_decoding; // num-bufs=3 represents ping-pong buffers for top linebuf, // followed by bottom linebuf. diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c index fc03a2ee04..9247ded6bf 100644 --- a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c +++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c @@ -20,266 +20,9 @@ #include "aom_ports/mem.h" #include "av1/common/convolve.h" #include "av1/common/filter.h" +#include "av1/common/arm/highbd_compound_convolve_neon.h" #include "av1/common/arm/highbd_convolve_neon.h" -#define ROUND_SHIFT 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS - -static INLINE void highbd_12_comp_avg_neon(const uint16_t *src_ptr, - int src_stride, uint16_t *dst_ptr, - int dst_stride, int w, int h, - ConvolveParams *conv_params, - const int offset, const int bd) { - CONV_BUF_TYPE *ref_ptr = conv_params->dst; - const int ref_stride = conv_params->dst_stride; - const uint16x4_t offset_vec = vdup_n_u16(offset); - const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); - - if (w == 4) { - do { - const uint16x4_t src = vld1_u16(src_ptr); - const uint16x4_t ref = vld1_u16(ref_ptr); - - uint16x4_t avg = vhadd_u16(src, ref); - int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec)); - - uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2); - d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); - - vst1_u16(dst_ptr, d0_u16); - - src_ptr += src_stride; - ref_ptr += ref_stride; - dst_ptr += dst_stride; - } while (--h != 0); - } else { - do { - int width = w; - const uint16_t *src = src_ptr; - const uint16_t *ref = ref_ptr; - uint16_t *dst = dst_ptr; - do { - const uint16x8_t s = vld1q_u16(src); - const uint16x8_t r = vld1q_u16(ref); - - uint16x8_t avg = vhaddq_u16(s, r); - int32x4_t d0_lo = - vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec)); - int32x4_t d0_hi = - vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec)); - - uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT - 2), - vqrshrun_n_s32(d0_hi, ROUND_SHIFT - 2)); - d0 = vminq_u16(d0, max); - vst1q_u16(dst, d0); - - src += 8; - ref += 8; - dst += 8; - width -= 8; - } while (width != 0); - - src_ptr += src_stride; - ref_ptr += ref_stride; - dst_ptr += dst_stride; - } while (--h != 0); - } -} - -static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride, - uint16_t *dst_ptr, int dst_stride, - int w, int h, - ConvolveParams *conv_params, - const int offset, const int bd) { - CONV_BUF_TYPE *ref_ptr = conv_params->dst; - const int ref_stride = conv_params->dst_stride; - const uint16x4_t offset_vec = vdup_n_u16(offset); - const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); - - if (w == 4) { - do { - const uint16x4_t src = vld1_u16(src_ptr); - const uint16x4_t ref = vld1_u16(ref_ptr); - - uint16x4_t avg = vhadd_u16(src, ref); - int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec)); - - uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT); - d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); - - vst1_u16(dst_ptr, d0_u16); - - src_ptr += src_stride; - ref_ptr += ref_stride; - dst_ptr += dst_stride; - } while (--h != 0); - } else { - do { - int width = w; - const uint16_t *src = src_ptr; - const uint16_t *ref = ref_ptr; - uint16_t *dst = dst_ptr; - do { - const uint16x8_t s = vld1q_u16(src); - const uint16x8_t r = vld1q_u16(ref); - - uint16x8_t avg = vhaddq_u16(s, r); - int32x4_t d0_lo = - vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec)); - int32x4_t d0_hi = - vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec)); - - uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT), - vqrshrun_n_s32(d0_hi, ROUND_SHIFT)); - d0 = vminq_u16(d0, max); - vst1q_u16(dst, d0); - - src += 8; - ref += 8; - dst += 8; - width -= 8; - } while (width != 0); - - src_ptr += src_stride; - ref_ptr += ref_stride; - dst_ptr += dst_stride; - } while (--h != 0); - } -} - -static INLINE void highbd_12_dist_wtd_comp_avg_neon( - const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, - int w, int h, ConvolveParams *conv_params, const int offset, const int bd) { - CONV_BUF_TYPE *ref_ptr = conv_params->dst; - const int ref_stride = conv_params->dst_stride; - const uint32x4_t offset_vec = vdupq_n_u32(offset); - const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); - uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset); - uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset); - - // Weighted averaging - if (w == 4) { - do { - const uint16x4_t src = vld1_u16(src_ptr); - const uint16x4_t ref = vld1_u16(ref_ptr); - - uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset); - wtd_avg = vmlal_u16(wtd_avg, src, bck_offset); - wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS); - int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec)); - - uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2); - d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); - - vst1_u16(dst_ptr, d0_u16); - - src_ptr += src_stride; - dst_ptr += dst_stride; - ref_ptr += ref_stride; - } while (--h != 0); - } else { - do { - int width = w; - const uint16_t *src = src_ptr; - const uint16_t *ref = ref_ptr; - uint16_t *dst = dst_ptr; - do { - const uint16x8_t s = vld1q_u16(src); - const uint16x8_t r = vld1q_u16(ref); - - uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset); - wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset); - wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS); - int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec)); - - uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset); - wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset); - wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS); - int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec)); - - uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT - 2), - vqrshrun_n_s32(d1, ROUND_SHIFT - 2)); - d01 = vminq_u16(d01, max); - vst1q_u16(dst, d01); - - src += 8; - ref += 8; - dst += 8; - width -= 8; - } while (width != 0); - src_ptr += src_stride; - dst_ptr += dst_stride; - ref_ptr += ref_stride; - } while (--h != 0); - } -} - -static INLINE void highbd_dist_wtd_comp_avg_neon( - const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, - int w, int h, ConvolveParams *conv_params, const int offset, const int bd) { - CONV_BUF_TYPE *ref_ptr = conv_params->dst; - const int ref_stride = conv_params->dst_stride; - const uint32x4_t offset_vec = vdupq_n_u32(offset); - const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); - uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset); - uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset); - - // Weighted averaging - if (w == 4) { - do { - const uint16x4_t src = vld1_u16(src_ptr); - const uint16x4_t ref = vld1_u16(ref_ptr); - - uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset); - wtd_avg = vmlal_u16(wtd_avg, src, bck_offset); - wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS); - int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec)); - - uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT); - d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); - - vst1_u16(dst_ptr, d0_u16); - - src_ptr += src_stride; - dst_ptr += dst_stride; - ref_ptr += ref_stride; - } while (--h != 0); - } else { - do { - int width = w; - const uint16_t *src = src_ptr; - const uint16_t *ref = ref_ptr; - uint16_t *dst = dst_ptr; - do { - const uint16x8_t s = vld1q_u16(src); - const uint16x8_t r = vld1q_u16(ref); - - uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset); - wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset); - wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS); - int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec)); - - uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset); - wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset); - wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS); - int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec)); - - uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT), - vqrshrun_n_s32(d1, ROUND_SHIFT)); - d01 = vminq_u16(d01, max); - vst1q_u16(dst, d01); - - src += 8; - ref += 8; - dst += 8; - width -= 8; - } while (width != 0); - src_ptr += src_stride; - dst_ptr += dst_stride; - ref_ptr += ref_stride; - } while (--h != 0); - } -} - static INLINE uint16x4_t highbd_12_convolve6_4( const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, @@ -743,9 +486,6 @@ void av1_highbd_dist_wtd_convolve_x_neon( const int im_stride = MAX_SB_SIZE; const int horiz_offset = filter_params_x->taps / 2 - 1; assert(FILTER_BITS == COMPOUND_ROUND1_BITS); - const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; - const int offset_avg = (1 << (offset_bits - conv_params->round_1)) + - (1 << (offset_bits - conv_params->round_1 - 1)); const int offset_convolve = (1 << (conv_params->round_0 - 1)) + (1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)); @@ -768,10 +508,10 @@ void av1_highbd_dist_wtd_convolve_x_neon( } if (conv_params->use_dist_wtd_comp_avg) { highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, - w, h, conv_params, offset_avg, bd); + w, h, conv_params); } else { highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, - conv_params, offset_avg, bd); + conv_params); } } else { if (x_filter_taps <= 6 && w != 4) { @@ -795,10 +535,10 @@ void av1_highbd_dist_wtd_convolve_x_neon( } if (conv_params->use_dist_wtd_comp_avg) { highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, - h, conv_params, offset_avg, bd); + h, conv_params, bd); } else { highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, - conv_params, offset_avg, bd); + conv_params, bd); } } else { if (x_filter_taps <= 6 && w != 4) { @@ -971,6 +711,212 @@ static INLINE void highbd_dist_wtd_convolve_y_6tap_neon( } } +static INLINE uint16x4_t highbd_12_convolve4_4( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) { + int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0); + sum = vmlal_lane_s16(sum, s1, filter, 1); + sum = vmlal_lane_s16(sum, s2, filter, 2); + sum = vmlal_lane_s16(sum, s3, filter, 3); + + return vqshrun_n_s32(sum, ROUND0_BITS + 2); +} + +static INLINE uint16x8_t highbd_12_convolve4_8( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) { + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3); + + return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS + 2), + vqshrun_n_s32(sum1, ROUND0_BITS + 2)); +} + +static INLINE void highbd_12_dist_wtd_convolve_y_4tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, const int offset) { + const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2; + load_s16_4x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x4_t s3, s4, s5, s6; + load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6); + + uint16x4_t d0 = + highbd_12_convolve4_4(s0, s1, s2, s3, y_filter, offset_vec); + uint16x4_t d1 = + highbd_12_convolve4_4(s1, s2, s3, s4, y_filter, offset_vec); + uint16x4_t d2 = + highbd_12_convolve4_4(s2, s3, s4, s5, y_filter, offset_vec); + uint16x4_t d3 = + highbd_12_convolve4_4(s3, s4, s5, s6, y_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2; + load_s16_8x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x8_t s3, s4, s5, s6; + load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6); + + uint16x8_t d0 = + highbd_12_convolve4_8(s0, s1, s2, s3, y_filter, offset_vec); + uint16x8_t d1 = + highbd_12_convolve4_8(s1, s2, s3, s4, y_filter, offset_vec); + uint16x8_t d2 = + highbd_12_convolve4_8(s2, s3, s4, s5, y_filter, offset_vec); + uint16x8_t d3 = + highbd_12_convolve4_8(s3, s4, s5, s6, y_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +static INLINE uint16x4_t highbd_convolve4_4( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) { + int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0); + sum = vmlal_lane_s16(sum, s1, filter, 1); + sum = vmlal_lane_s16(sum, s2, filter, 2); + sum = vmlal_lane_s16(sum, s3, filter, 3); + + return vqshrun_n_s32(sum, ROUND0_BITS); +} + +static INLINE uint16x8_t highbd_convolve4_8( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) { + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3); + + return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS), + vqshrun_n_s32(sum1, ROUND0_BITS)); +} + +static INLINE void highbd_dist_wtd_convolve_y_4tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, const int offset) { + const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2; + load_s16_4x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x4_t s3, s4, s5, s6; + load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6); + + uint16x4_t d0 = highbd_convolve4_4(s0, s1, s2, s3, y_filter, offset_vec); + uint16x4_t d1 = highbd_convolve4_4(s1, s2, s3, s4, y_filter, offset_vec); + uint16x4_t d2 = highbd_convolve4_4(s2, s3, s4, s5, y_filter, offset_vec); + uint16x4_t d3 = highbd_convolve4_4(s3, s4, s5, s6, y_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2; + load_s16_8x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x8_t s3, s4, s5, s6; + load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6); + + uint16x8_t d0 = + highbd_convolve4_8(s0, s1, s2, s3, y_filter, offset_vec); + uint16x8_t d1 = + highbd_convolve4_8(s1, s2, s3, s4, y_filter, offset_vec); + uint16x8_t d2 = + highbd_convolve4_8(s2, s3, s4, s5, y_filter, offset_vec); + uint16x8_t d3 = + highbd_convolve4_8(s3, s4, s5, s6, y_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + static INLINE void highbd_12_dist_wtd_convolve_y_8tap_neon( const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, int w, int h, const int16_t *y_filter_ptr, const int offset) { @@ -1148,9 +1094,6 @@ void av1_highbd_dist_wtd_convolve_y_neon( const int im_stride = MAX_SB_SIZE; const int vert_offset = filter_params_y->taps / 2 - 1; assert(FILTER_BITS == COMPOUND_ROUND1_BITS); - const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; - const int round_offset_avg = (1 << (offset_bits - conv_params->round_1)) + - (1 << (offset_bits - conv_params->round_1 - 1)); const int round_offset_conv = (1 << (conv_params->round_0 - 1)) + (1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)); @@ -1162,7 +1105,11 @@ void av1_highbd_dist_wtd_convolve_y_neon( if (bd == 12) { if (conv_params->do_average) { - if (y_filter_taps <= 6) { + if (y_filter_taps <= 4) { + highbd_12_dist_wtd_convolve_y_4tap_neon( + src + 2 * src_stride, src_stride, im_block, im_stride, w, h, + y_filter_ptr, round_offset_conv); + } else if (y_filter_taps == 6) { highbd_12_dist_wtd_convolve_y_6tap_neon( src + src_stride, src_stride, im_block, im_stride, w, h, y_filter_ptr, round_offset_conv); @@ -1173,14 +1120,17 @@ void av1_highbd_dist_wtd_convolve_y_neon( } if (conv_params->use_dist_wtd_comp_avg) { highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, - w, h, conv_params, round_offset_avg, - bd); + w, h, conv_params); } else { highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, - conv_params, round_offset_avg, bd); + conv_params); } } else { - if (y_filter_taps <= 6) { + if (y_filter_taps <= 4) { + highbd_12_dist_wtd_convolve_y_4tap_neon( + src + 2 * src_stride, src_stride, dst16, dst16_stride, w, h, + y_filter_ptr, round_offset_conv); + } else if (y_filter_taps == 6) { highbd_12_dist_wtd_convolve_y_6tap_neon( src + src_stride, src_stride, dst16, dst16_stride, w, h, y_filter_ptr, round_offset_conv); @@ -1192,7 +1142,11 @@ void av1_highbd_dist_wtd_convolve_y_neon( } } else { if (conv_params->do_average) { - if (y_filter_taps <= 6) { + if (y_filter_taps <= 4) { + highbd_dist_wtd_convolve_y_4tap_neon(src + 2 * src_stride, src_stride, + im_block, im_stride, w, h, + y_filter_ptr, round_offset_conv); + } else if (y_filter_taps == 6) { highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride, im_block, im_stride, w, h, y_filter_ptr, round_offset_conv); @@ -1203,13 +1157,17 @@ void av1_highbd_dist_wtd_convolve_y_neon( } if (conv_params->use_dist_wtd_comp_avg) { highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, - h, conv_params, round_offset_avg, bd); + h, conv_params, bd); } else { highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, - conv_params, round_offset_avg, bd); + conv_params, bd); } } else { - if (y_filter_taps <= 6) { + if (y_filter_taps <= 4) { + highbd_dist_wtd_convolve_y_4tap_neon(src + 2 * src_stride, src_stride, + dst16, dst16_stride, w, h, + y_filter_ptr, round_offset_conv); + } else if (y_filter_taps == 6) { highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride, dst16, dst16_stride, w, h, y_filter_ptr, round_offset_conv); @@ -1285,18 +1243,18 @@ void av1_highbd_dist_wtd_convolve_2d_copy_neon(const uint16_t *src, if (conv_params->use_dist_wtd_comp_avg) { if (bd == 12) { highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, - w, h, conv_params, round_offset, bd); + w, h, conv_params); } else { highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, - h, conv_params, round_offset, bd); + h, conv_params, bd); } } else { if (bd == 12) { highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, - conv_params, round_offset, bd); + conv_params); } else { highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, - conv_params, round_offset, bd); + conv_params, bd); } } } @@ -1949,9 +1907,6 @@ void av1_highbd_dist_wtd_convolve_2d_neon( (1 << (bd + FILTER_BITS - 1)) + (1 << (conv_params->round_0 - 1)); const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; const int round_offset_conv_y = (1 << y_offset_bits); - const int round_offset_avg = - ((1 << (y_offset_bits - conv_params->round_1)) + - (1 << (y_offset_bits - conv_params->round_1 - 1))); const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset; @@ -2012,19 +1967,18 @@ void av1_highbd_dist_wtd_convolve_2d_neon( if (conv_params->use_dist_wtd_comp_avg) { if (bd == 12) { highbd_12_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, - w, h, conv_params, round_offset_avg, - bd); + w, h, conv_params); } else { highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, - h, conv_params, round_offset_avg, bd); + h, conv_params, bd); } } else { if (bd == 12) { highbd_12_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h, - conv_params, round_offset_avg, bd); + conv_params); } else { highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h, - conv_params, round_offset_avg, bd); + conv_params, bd); } } } diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h new file mode 100644 index 0000000000..c9344f3adf --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2024, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include +#include + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" + +#define ROUND_SHIFT 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS + +static INLINE void highbd_12_comp_avg_neon(const uint16_t *src_ptr, + int src_stride, uint16_t *dst_ptr, + int dst_stride, int w, int h, + ConvolveParams *conv_params) { + const int offset_bits = 12 + 2 * FILTER_BITS - ROUND0_BITS - 2; + const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const uint16x4_t offset_vec = vdup_n_u16((uint16_t)offset); + const uint16x8_t max = vdupq_n_u16((1 << 12) - 1); + + if (w == 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint16x4_t avg = vhadd_u16(src, ref); + int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec)); + + uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + vst1_u16(dst_ptr, d0_u16); + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint16x8_t avg = vhaddq_u16(s, r); + int32x4_t d0_lo = + vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec)); + int32x4_t d0_hi = + vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec)); + + uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT - 2), + vqrshrun_n_s32(d0_hi, ROUND_SHIFT - 2)); + d0 = vminq_u16(d0, max); + vst1q_u16(dst, d0); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } +} + +static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride, + uint16_t *dst_ptr, int dst_stride, + int w, int h, + ConvolveParams *conv_params, + const int bd) { + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const uint16x4_t offset_vec = vdup_n_u16((uint16_t)offset); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + if (w == 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint16x4_t avg = vhadd_u16(src, ref); + int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec)); + + uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + vst1_u16(dst_ptr, d0_u16); + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint16x8_t avg = vhaddq_u16(s, r); + int32x4_t d0_lo = + vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec)); + int32x4_t d0_hi = + vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec)); + + uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT), + vqrshrun_n_s32(d0_hi, ROUND_SHIFT)); + d0 = vminq_u16(d0, max); + vst1q_u16(dst, d0); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + + src_ptr += src_stride; + ref_ptr += ref_stride; + dst_ptr += dst_stride; + } while (--h != 0); + } +} + +static INLINE void highbd_12_dist_wtd_comp_avg_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, ConvolveParams *conv_params) { + const int offset_bits = 12 + 2 * FILTER_BITS - ROUND0_BITS - 2; + const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const uint32x4_t offset_vec = vdupq_n_u32(offset); + const uint16x8_t max = vdupq_n_u16((1 << 12) - 1); + uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset); + uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset); + + // Weighted averaging + if (w == 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset); + wtd_avg = vmlal_u16(wtd_avg, src, bck_offset); + wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec)); + + uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + vst1_u16(dst_ptr, d0_u16); + + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset); + wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset); + wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec)); + + uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset); + wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset); + wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS); + int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec)); + + uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT - 2), + vqrshrun_n_s32(d1, ROUND_SHIFT - 2)); + d01 = vminq_u16(d01, max); + vst1q_u16(dst, d01); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } +} + +static INLINE void highbd_dist_wtd_comp_avg_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, ConvolveParams *conv_params, const int bd) { + const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS; + const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) + + (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1)); + + CONV_BUF_TYPE *ref_ptr = conv_params->dst; + const int ref_stride = conv_params->dst_stride; + const uint32x4_t offset_vec = vdupq_n_u32(offset); + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset); + uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset); + + // Weighted averaging + if (w == 4) { + do { + const uint16x4_t src = vld1_u16(src_ptr); + const uint16x4_t ref = vld1_u16(ref_ptr); + + uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset); + wtd_avg = vmlal_u16(wtd_avg, src, bck_offset); + wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec)); + + uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT); + d0_u16 = vmin_u16(d0_u16, vget_low_u16(max)); + + vst1_u16(dst_ptr, d0_u16); + + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } else { + do { + int width = w; + const uint16_t *src = src_ptr; + const uint16_t *ref = ref_ptr; + uint16_t *dst = dst_ptr; + do { + const uint16x8_t s = vld1q_u16(src); + const uint16x8_t r = vld1q_u16(ref); + + uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset); + wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset); + wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS); + int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec)); + + uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset); + wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset); + wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS); + int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec)); + + uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT), + vqrshrun_n_s32(d1, ROUND_SHIFT)); + d01 = vminq_u16(d01, max); + vst1q_u16(dst, d01); + + src += 8; + ref += 8; + dst += 8; + width -= 8; + } while (width != 0); + src_ptr += src_stride; + dst_ptr += dst_stride; + ref_ptr += ref_stride; + } while (--h != 0); + } +} diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c b/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c new file mode 100644 index 0000000000..1d6c9b4faf --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c @@ -0,0 +1,1555 @@ +/* + * Copyright (c) 2024, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include +#include + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/aom_neon_sve_bridge.h" +#include "aom_dsp/arm/aom_neon_sve2_bridge.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" +#include "av1/common/filter.h" +#include "av1/common/arm/highbd_compound_convolve_neon.h" +#include "av1/common/arm/highbd_convolve_neon.h" +#include "av1/common/arm/highbd_convolve_sve2.h" + +DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = { + 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, + 4, 5, 6, 7, 5, 6, 7, 0, 6, 7, 0, 1, 7, 0, 1, 2, +}; + +static INLINE uint16x8_t highbd_12_convolve8_8_x(int16x8_t s0[8], + int16x8_t filter, + int64x2_t offset) { + int64x2_t sum[8]; + sum[0] = aom_sdotq_s16(offset, s0[0], filter); + sum[1] = aom_sdotq_s16(offset, s0[1], filter); + sum[2] = aom_sdotq_s16(offset, s0[2], filter); + sum[3] = aom_sdotq_s16(offset, s0[3], filter); + sum[4] = aom_sdotq_s16(offset, s0[4], filter); + sum[5] = aom_sdotq_s16(offset, s0[5], filter); + sum[6] = aom_sdotq_s16(offset, s0[6], filter); + sum[7] = aom_sdotq_s16(offset, s0[7], filter); + + sum[0] = vpaddq_s64(sum[0], sum[1]); + sum[2] = vpaddq_s64(sum[2], sum[3]); + sum[4] = vpaddq_s64(sum[4], sum[5]); + sum[6] = vpaddq_s64(sum[6], sum[7]); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2])); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6])); + + return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS + 2), + vqrshrun_n_s32(sum4567, ROUND0_BITS + 2)); +} + +static INLINE void highbd_12_dist_wtd_convolve_x_8tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr) { + const int64x1_t offset_vec = + vcreate_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1))); + const int64x2_t offset_lo = vcombine_s64(offset_vec, vdup_n_s64(0)); + + const int16x8_t filter = vld1q_s16(x_filter_ptr); + + do { + const int16_t *s = (const int16_t *)src; + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset_lo); + uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset_lo); + uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset_lo); + uint16x8_t d3 = highbd_12_convolve8_8_x(s3, filter, offset_lo); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); +} + +static INLINE uint16x8_t highbd_convolve8_8_x(int16x8_t s0[8], int16x8_t filter, + int64x2_t offset) { + int64x2_t sum[8]; + sum[0] = aom_sdotq_s16(offset, s0[0], filter); + sum[1] = aom_sdotq_s16(offset, s0[1], filter); + sum[2] = aom_sdotq_s16(offset, s0[2], filter); + sum[3] = aom_sdotq_s16(offset, s0[3], filter); + sum[4] = aom_sdotq_s16(offset, s0[4], filter); + sum[5] = aom_sdotq_s16(offset, s0[5], filter); + sum[6] = aom_sdotq_s16(offset, s0[6], filter); + sum[7] = aom_sdotq_s16(offset, s0[7], filter); + + sum[0] = vpaddq_s64(sum[0], sum[1]); + sum[2] = vpaddq_s64(sum[2], sum[3]); + sum[4] = vpaddq_s64(sum[4], sum[5]); + sum[6] = vpaddq_s64(sum[6], sum[7]); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2])); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6])); + + return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS), + vqrshrun_n_s32(sum4567, ROUND0_BITS)); +} + +static INLINE void highbd_dist_wtd_convolve_x_8tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr, const int bd) { + const int64x1_t offset_vec = + vcreate_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1))); + const int64x2_t offset_lo = vcombine_s64(offset_vec, vdup_n_s64(0)); + + const int16x8_t filter = vld1q_s16(x_filter_ptr); + + do { + const int16_t *s = (const int16_t *)src; + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset_lo); + uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset_lo); + uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset_lo); + uint16x8_t d3 = highbd_convolve8_8_x(s3, filter, offset_lo); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); +} + +// clang-format off +DECLARE_ALIGNED(16, static const uint16_t, kDeinterleaveTbl[8]) = { + 0, 2, 4, 6, 1, 3, 5, 7, +}; +// clang-format on + +static INLINE uint16x4_t highbd_12_convolve4_4_x(int16x8_t s0, int16x8_t filter, + int64x2_t offset, + uint16x8x2_t permute_tbl) { + int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]); + int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]); + + int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0); + int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + + return vqrshrun_n_s32(sum0123, ROUND0_BITS + 2); +} + +static INLINE uint16x8_t highbd_12_convolve4_8_x(int16x8_t s0[4], + int16x8_t filter, + int64x2_t offset, + uint16x8_t tbl) { + int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0); + int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0); + int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0); + int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0); + + int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15)); + int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37)); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, ROUND0_BITS + 2), + vqrshrun_n_s32(sum2637, ROUND0_BITS + 2)); + return aom_tbl_u16(res, tbl); +} + +static INLINE void highbd_12_dist_wtd_convolve_x_4tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr) { + const int64x2_t offset = + vdupq_n_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1))); + + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0)); + + if (width == 4) { + uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl); + + const int16_t *s = (const int16_t *)(src); + + do { + int16x8_t s0, s1, s2, s3; + load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl); + uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl); + uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl); + uint16x4_t d3 = highbd_12_convolve4_4_x(s3, filter, offset, permute_tbl); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + uint16x8_t idx = vld1q_u16(kDeinterleaveTbl); + + do { + const int16_t *s = (const int16_t *)(src); + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[4], s1[4], s2[4], s3[4]; + load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx); + uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx); + uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx); + uint16x8_t d3 = highbd_12_convolve4_8_x(s3, filter, offset, idx); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE uint16x4_t highbd_convolve4_4_x(int16x8_t s0, int16x8_t filter, + int64x2_t offset, + uint16x8x2_t permute_tbl) { + int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]); + int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]); + + int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0); + int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + + return vqrshrun_n_s32(sum0123, ROUND0_BITS); +} + +static INLINE uint16x8_t highbd_convolve4_8_x(int16x8_t s0[4], int16x8_t filter, + int64x2_t offset, + uint16x8_t tbl) { + int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0); + int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0); + int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0); + int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0); + + int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15)); + int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37)); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, ROUND0_BITS), + vqrshrun_n_s32(sum2637, ROUND0_BITS)); + return aom_tbl_u16(res, tbl); +} + +static INLINE void highbd_dist_wtd_convolve_x_4tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr, const int bd) { + const int64x2_t offset = + vdupq_n_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1))); + + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0)); + + if (width == 4) { + uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl); + + const int16_t *s = (const int16_t *)(src); + + do { + int16x8_t s0, s1, s2, s3; + load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl); + uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl); + uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl); + uint16x4_t d3 = highbd_convolve4_4_x(s3, filter, offset, permute_tbl); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + uint16x8_t idx = vld1q_u16(kDeinterleaveTbl); + + do { + const int16_t *s = (const int16_t *)(src); + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[4], s1[4], s2[4], s3[4]; + load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx); + uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx); + uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx); + uint16x8_t d3 = highbd_convolve4_8_x(s3, filter, offset, idx); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +void av1_highbd_dist_wtd_convolve_x_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, + ConvolveParams *conv_params, int bd) { + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + CONV_BUF_TYPE *dst16 = conv_params->dst; + const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn); + + if (x_filter_taps == 6) { + av1_highbd_dist_wtd_convolve_x_neon(src, src_stride, dst, dst_stride, w, h, + filter_params_x, subpel_x_qn, + conv_params, bd); + return; + } + + int dst16_stride = conv_params->dst_stride; + const int im_stride = MAX_SB_SIZE; + const int horiz_offset = filter_params_x->taps / 2 - 1; + assert(FILTER_BITS == COMPOUND_ROUND1_BITS); + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + src -= horiz_offset; + + if (bd == 12) { + if (conv_params->do_average) { + if (x_filter_taps <= 4) { + highbd_12_dist_wtd_convolve_x_4tap_sve2(src + 2, src_stride, im_block, + im_stride, w, h, x_filter_ptr); + } else { + highbd_12_dist_wtd_convolve_x_8tap_sve2(src, src_stride, im_block, + im_stride, w, h, x_filter_ptr); + } + + if (conv_params->use_dist_wtd_comp_avg) { + highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, + w, h, conv_params); + + } else { + highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params); + } + } else { + if (x_filter_taps <= 4) { + highbd_12_dist_wtd_convolve_x_4tap_sve2( + src + 2, src_stride, dst16, dst16_stride, w, h, x_filter_ptr); + } else { + highbd_12_dist_wtd_convolve_x_8tap_sve2( + src, src_stride, dst16, dst16_stride, w, h, x_filter_ptr); + } + } + } else { + if (conv_params->do_average) { + if (x_filter_taps <= 4) { + highbd_dist_wtd_convolve_x_4tap_sve2(src + 2, src_stride, im_block, + im_stride, w, h, x_filter_ptr, bd); + } else { + highbd_dist_wtd_convolve_x_8tap_sve2(src, src_stride, im_block, + im_stride, w, h, x_filter_ptr, bd); + } + + if (conv_params->use_dist_wtd_comp_avg) { + highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, + h, conv_params, bd); + } else { + highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params, bd); + } + } else { + if (x_filter_taps <= 4) { + highbd_dist_wtd_convolve_x_4tap_sve2( + src + 2, src_stride, dst16, dst16_stride, w, h, x_filter_ptr, bd); + } else { + highbd_dist_wtd_convolve_x_8tap_sve2( + src, src_stride, dst16, dst16_stride, w, h, x_filter_ptr, bd); + } + } + } +} + +static INLINE uint16x4_t highbd_12_convolve8_4_y(int16x8_t samples_lo[2], + int16x8_t samples_hi[2], + int16x8_t filter, + int64x2_t offset) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + + return vqrshrun_n_s32(sum0123, ROUND0_BITS + 2); +} + +static INLINE uint16x8_t highbd_12_convolve8_8_y(int16x8_t samples_lo[4], + int16x8_t samples_hi[4], + int16x8_t filter, + int64x2_t offset) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0); + sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1); + + int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0); + sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67)); + + return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS + 2), + vqrshrun_n_s32(sum4567, ROUND0_BITS + 2)); +} + +static INLINE void highbd_12_dist_wtd_convolve_y_8tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *y_filter_ptr) { + const int64x2_t offset = + vdupq_n_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1))); + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl); + // Scale indices by size of the true vector length to avoid reading from an + // 'undefined' portion of a vector on a system with SVE vectors > 128-bit. + uint16x8_t correction0 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL)); + merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0); + uint16x8_t correction1 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL)); + merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1); + + uint16x8_t correction2 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL)); + merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2); + + if (width == 4) { + int16_t *s = (int16_t *)src; + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[2], s1234[2], s2345[2], s3456[2]; + transpose_concat_4x4(s0, s1, s2, s3, s0123); + transpose_concat_4x4(s1, s2, s3, s4, s1234); + transpose_concat_4x4(s2, s3, s4, s5, s2345); + transpose_concat_4x4(s3, s4, s5, s6, s3456); + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + int16x8_t s4567[2], s5678[2], s6789[2], s789A[2]; + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_4x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x4_t d0 = highbd_12_convolve8_4_y(s0123, s4567, y_filter, offset); + uint16x4_t d1 = highbd_12_convolve8_4_y(s1234, s5678, y_filter, offset); + uint16x4_t d2 = highbd_12_convolve8_4_y(s2345, s6789, y_filter, offset); + uint16x4_t d3 = highbd_12_convolve8_4_y(s3456, s789A, y_filter, offset); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + do { + int h = height; + int16_t *s = (int16_t *)src; + uint16_t *d = dst; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[4], s1234[4], s2345[4], s3456[4]; + transpose_concat_8x4(s0, s1, s2, s3, s0123); + transpose_concat_8x4(s1, s2, s3, s4, s1234); + transpose_concat_8x4(s2, s3, s4, s5, s2345); + transpose_concat_8x4(s3, s4, s5, s6, s3456); + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + int16x8_t s4567[4], s5678[4], s6789[4], s789A[4]; + + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_8x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x8_t d0 = highbd_12_convolve8_8_y(s0123, s4567, y_filter, offset); + uint16x8_t d1 = highbd_12_convolve8_8_y(s1234, s5678, y_filter, offset); + uint16x8_t d2 = highbd_12_convolve8_8_y(s2345, s6789, y_filter, offset); + uint16x8_t d3 = highbd_12_convolve8_8_y(s3456, s789A, y_filter, offset); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s0123[2] = s4567[2]; + s0123[3] = s4567[3]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s1234[2] = s5678[2]; + s1234[3] = s5678[3]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s2345[2] = s6789[2]; + s2345[3] = s6789[3]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + s3456[2] = s789A[2]; + s3456[3] = s789A[3]; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + src += 8; + dst += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2], + int16x8_t samples_hi[2], + int16x8_t filter, + int64x2_t offset) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + + return vqrshrun_n_s32(sum0123, ROUND0_BITS); +} + +static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4], + int16x8_t samples_hi[4], + int16x8_t filter, + int64x2_t offset) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0); + sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1); + + int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0); + sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67)); + + return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS), + vqrshrun_n_s32(sum4567, ROUND0_BITS)); +} + +static INLINE void highbd_dist_wtd_convolve_y_8tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *y_filter_ptr, const int bd) { + const int64x2_t offset = + vdupq_n_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1))); + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + + uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl); + // Scale indices by size of the true vector length to avoid reading from an + // 'undefined' portion of a vector on a system with SVE vectors > 128-bit. + uint16x8_t correction0 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL)); + merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0); + uint16x8_t correction1 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL)); + merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1); + + uint16x8_t correction2 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL)); + merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2); + + if (width == 4) { + int16_t *s = (int16_t *)src; + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[2], s1234[2], s2345[2], s3456[2]; + transpose_concat_4x4(s0, s1, s2, s3, s0123); + transpose_concat_4x4(s1, s2, s3, s4, s1234); + transpose_concat_4x4(s2, s3, s4, s5, s2345); + transpose_concat_4x4(s3, s4, s5, s6, s3456); + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + int16x8_t s4567[2], s5678[2], s6789[2], s789A[2]; + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_4x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x4_t d0 = highbd_convolve8_4_y(s0123, s4567, y_filter, offset); + uint16x4_t d1 = highbd_convolve8_4_y(s1234, s5678, y_filter, offset); + uint16x4_t d2 = highbd_convolve8_4_y(s2345, s6789, y_filter, offset); + uint16x4_t d3 = highbd_convolve8_4_y(s3456, s789A, y_filter, offset); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + do { + int h = height; + int16_t *s = (int16_t *)src; + uint16_t *d = dst; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[4], s1234[4], s2345[4], s3456[4]; + transpose_concat_8x4(s0, s1, s2, s3, s0123); + transpose_concat_8x4(s1, s2, s3, s4, s1234); + transpose_concat_8x4(s2, s3, s4, s5, s2345); + transpose_concat_8x4(s3, s4, s5, s6, s3456); + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + int16x8_t s4567[4], s5678[4], s6789[4], s789A[4]; + + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_8x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x8_t d0 = highbd_convolve8_8_y(s0123, s4567, y_filter, offset); + uint16x8_t d1 = highbd_convolve8_8_y(s1234, s5678, y_filter, offset); + uint16x8_t d2 = highbd_convolve8_8_y(s2345, s6789, y_filter, offset); + uint16x8_t d3 = highbd_convolve8_8_y(s3456, s789A, y_filter, offset); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s0123[2] = s4567[2]; + s0123[3] = s4567[3]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s1234[2] = s5678[2]; + s1234[3] = s5678[3]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s2345[2] = s6789[2]; + s2345[3] = s6789[3]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + s3456[2] = s789A[2]; + s3456[3] = s789A[3]; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + src += 8; + dst += 8; + width -= 8; + } while (width != 0); + } +} + +void av1_highbd_dist_wtd_convolve_y_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn, + ConvolveParams *conv_params, int bd) { + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + CONV_BUF_TYPE *dst16 = conv_params->dst; + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + + if (y_filter_taps != 8) { + av1_highbd_dist_wtd_convolve_y_neon(src, src_stride, dst, dst_stride, w, h, + filter_params_y, subpel_y_qn, + conv_params, bd); + return; + } + + int dst16_stride = conv_params->dst_stride; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = filter_params_y->taps / 2 - 1; + assert(FILTER_BITS == COMPOUND_ROUND1_BITS); + + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + src -= vert_offset * src_stride; + + if (bd == 12) { + if (conv_params->do_average) { + highbd_12_dist_wtd_convolve_y_8tap_sve2(src, src_stride, im_block, + im_stride, w, h, y_filter_ptr); + if (conv_params->use_dist_wtd_comp_avg) { + highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, + w, h, conv_params); + } else { + highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params); + } + } else { + highbd_12_dist_wtd_convolve_y_8tap_sve2(src, src_stride, dst16, + dst16_stride, w, h, y_filter_ptr); + } + } else { + if (conv_params->do_average) { + highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, im_block, im_stride, + w, h, y_filter_ptr, bd); + if (conv_params->use_dist_wtd_comp_avg) { + highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, + h, conv_params, bd); + } else { + highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h, + conv_params, bd); + } + } else { + highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, dst16, dst16_stride, + w, h, y_filter_ptr, bd); + } + } +} + +static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_8tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr) { + const int64x2_t offset = vdupq_n_s64(1 << (12 + FILTER_BITS - 2)); + const int16x8_t filter = vld1q_s16(x_filter_ptr); + + // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know + // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at + // a time and then process the last 3 rows separately. + + do { + const int16_t *s = (const int16_t *)src; + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset); + uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset); + uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset); + uint16x8_t d3 = highbd_12_convolve8_8_x(s3, filter, offset); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 4); + + // Process final 3 rows. + const int16_t *s = (const int16_t *)src; + do { + int16x8_t s0[8], s1[8], s2[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4], + &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], &s1[4], + &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], &s2[4], + &s2[5], &s2[6], &s2[7]); + + uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset); + uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset); + uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset); + + store_u16_8x3(dst, dst_stride, d0, d1, d2); + s += 8; + dst += 8; + width -= 8; + } while (width != 0); +} + +static INLINE void highbd_dist_wtd_convolve_2d_horiz_8tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr, const int bd) { + const int64x2_t offset = vdupq_n_s64(1 << (bd + FILTER_BITS - 2)); + const int16x8_t filter = vld1q_s16(x_filter_ptr); + + // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know + // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at + // a time and then process the last 3 rows separately. + + do { + const int16_t *s = (const int16_t *)src; + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset); + uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset); + uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset); + uint16x8_t d3 = highbd_convolve8_8_x(s3, filter, offset); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 4); + + // Process final 3 rows. + const int16_t *s = (const int16_t *)src; + do { + int16x8_t s0[8], s1[8], s2[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4], + &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], &s1[4], + &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], &s2[4], + &s2[5], &s2[6], &s2[7]); + + uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset); + uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset); + uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset); + + store_u16_8x3(dst, dst_stride, d0, d1, d2); + s += 8; + dst += 8; + width -= 8; + } while (width != 0); +} + +static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_4tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr) { + const int64x2_t offset = vdupq_n_s64(1 << (12 + FILTER_BITS - 1)); + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0)); + + // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know + // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at + // a time and then process the last 3 rows separately. + + if (width == 4) { + uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl); + + const int16_t *s = (const int16_t *)(src); + + do { + int16x8_t s0, s1, s2, s3; + load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl); + uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl); + uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl); + uint16x4_t d3 = highbd_12_convolve4_4_x(s3, filter, offset, permute_tbl); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 4); + + // Process final 3 rows. + int16x8_t s0, s1, s2; + load_s16_8x3(s, src_stride, &s0, &s1, &s2); + + uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl); + uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl); + uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl); + + store_u16_4x3(dst, dst_stride, d0, d1, d2); + + } else { + uint16x8_t idx = vld1q_u16(kDeinterleaveTbl); + + do { + const int16_t *s = (const int16_t *)(src); + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[4], s1[4], s2[4], s3[4]; + load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx); + uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx); + uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx); + uint16x8_t d3 = highbd_12_convolve4_8_x(s3, filter, offset, idx); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 4); + + // Process final 3 rows. + const int16_t *s = (const int16_t *)(src); + + do { + int16x8_t s0[4], s1[4], s2[4]; + load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + + uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx); + uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx); + uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx); + + store_u16_8x3(dst, dst_stride, d0, d1, d2); + + s += 8; + dst += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE void highbd_dist_wtd_convolve_2d_horiz_4tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr, const int bd) { + const int64x2_t offset = vdupq_n_s64(1 << (bd + FILTER_BITS - 1)); + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0)); + + // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know + // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at + // a time and then process the last 3 rows separately. + + if (width == 4) { + uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl); + + const int16_t *s = (const int16_t *)(src); + + do { + int16x8_t s0, s1, s2, s3; + load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl); + uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl); + uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl); + uint16x4_t d3 = highbd_convolve4_4_x(s3, filter, offset, permute_tbl); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 4); + + // Process final 3 rows. + int16x8_t s0, s1, s2; + load_s16_8x3(s, src_stride, &s0, &s1, &s2); + + uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl); + uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl); + uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl); + + store_u16_4x3(dst, dst_stride, d0, d1, d2); + } else { + uint16x8_t idx = vld1q_u16(kDeinterleaveTbl); + + do { + const int16_t *s = (const int16_t *)(src); + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[4], s1[4], s2[4], s3[4]; + load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx); + uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx); + uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx); + uint16x8_t d3 = highbd_convolve4_8_x(s3, filter, offset, idx); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 4); + + // Process final 3 rows. + const int16_t *s = (const int16_t *)(src); + + do { + int16x8_t s0[4], s1[4], s2[4]; + load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + + uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx); + uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx); + uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx); + + store_u16_8x3(dst, dst_stride, d0, d1, d2); + + s += 8; + dst += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE uint16x4_t highbd_convolve8_4_2d_v(int16x8_t samples_lo[2], + int16x8_t samples_hi[2], + int16x8_t filter, + int64x2_t offset) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + + return vqrshrun_n_s32(sum0123, COMPOUND_ROUND1_BITS); +} + +static INLINE uint16x8_t highbd_convolve8_8_2d_v(int16x8_t samples_lo[4], + int16x8_t samples_hi[4], + int16x8_t filter, + int64x2_t offset) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0); + sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1); + + int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0); + sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67)); + + return vcombine_u16(vqrshrun_n_s32(sum0123, COMPOUND_ROUND1_BITS), + vqrshrun_n_s32(sum4567, COMPOUND_ROUND1_BITS)); +} + +static INLINE void highbd_dist_wtd_convolve_2d_vert_8tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *y_filter_ptr, int offset) { + const int16x8_t y_filter = vld1q_s16(y_filter_ptr); + const int64x2_t offset_s64 = vdupq_n_s64(offset); + + uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl); + // Scale indices by size of the true vector length to avoid reading from an + // 'undefined' portion of a vector on a system with SVE vectors > 128-bit. + uint16x8_t correction0 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL)); + merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0); + + uint16x8_t correction1 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL)); + merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1); + + uint16x8_t correction2 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL)); + merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2); + + if (width == 4) { + int16_t *s = (int16_t *)src; + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[2], s1234[2], s2345[2], s3456[2]; + transpose_concat_4x4(s0, s1, s2, s3, s0123); + transpose_concat_4x4(s1, s2, s3, s4, s1234); + transpose_concat_4x4(s2, s3, s4, s5, s2345); + transpose_concat_4x4(s3, s4, s5, s6, s3456); + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + int16x8_t s4567[2], s5678[2], s6789[2], s789A[2]; + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_4x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x4_t d0 = + highbd_convolve8_4_2d_v(s0123, s4567, y_filter, offset_s64); + uint16x4_t d1 = + highbd_convolve8_4_2d_v(s1234, s5678, y_filter, offset_s64); + uint16x4_t d2 = + highbd_convolve8_4_2d_v(s2345, s6789, y_filter, offset_s64); + uint16x4_t d3 = + highbd_convolve8_4_2d_v(s3456, s789A, y_filter, offset_s64); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + do { + int h = height; + int16_t *s = (int16_t *)src; + uint16_t *d = dst; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[4], s1234[4], s2345[4], s3456[4]; + transpose_concat_8x4(s0, s1, s2, s3, s0123); + transpose_concat_8x4(s1, s2, s3, s4, s1234); + transpose_concat_8x4(s2, s3, s4, s5, s2345); + transpose_concat_8x4(s3, s4, s5, s6, s3456); + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + int16x8_t s4567[4], s5678[4], s6789[4], s789A[4]; + + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_8x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x8_t d0 = + highbd_convolve8_8_2d_v(s0123, s4567, y_filter, offset_s64); + uint16x8_t d1 = + highbd_convolve8_8_2d_v(s1234, s5678, y_filter, offset_s64); + uint16x8_t d2 = + highbd_convolve8_8_2d_v(s2345, s6789, y_filter, offset_s64); + uint16x8_t d3 = + highbd_convolve8_8_2d_v(s3456, s789A, y_filter, offset_s64); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s0123[2] = s4567[2]; + s0123[3] = s4567[3]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s1234[2] = s5678[2]; + s1234[3] = s5678[3]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s2345[2] = s6789[2]; + s2345[3] = s6789[3]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + s3456[2] = s789A[2]; + s3456[3] = s789A[3]; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + src += 8; + dst += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE uint16x4_t highbd_convolve4_4_2d_v( + const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, + const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) { + int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0); + sum = vmlal_lane_s16(sum, s1, filter, 1); + sum = vmlal_lane_s16(sum, s2, filter, 2); + sum = vmlal_lane_s16(sum, s3, filter, 3); + + return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS); +} + +static INLINE uint16x8_t highbd_convolve4_8_2d_v( + const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, + const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) { + int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2); + sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3); + + int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2); + sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3); + + return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS), + vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS)); +} + +static INLINE void highbd_dist_wtd_convolve_2d_vert_4tap_neon( + const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride, + int w, int h, const int16_t *y_filter_ptr, const int offset) { + const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2); + const int32x4_t offset_vec = vdupq_n_s32(offset); + + if (w == 4) { + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x4_t s0, s1, s2; + load_s16_4x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x4_t s3, s4, s5, s6; + load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6); + + uint16x4_t d0 = + highbd_convolve4_4_2d_v(s0, s1, s2, s3, y_filter, offset_vec); + uint16x4_t d1 = + highbd_convolve4_4_2d_v(s1, s2, s3, s4, y_filter, offset_vec); + uint16x4_t d2 = + highbd_convolve4_4_2d_v(s2, s3, s4, s5, y_filter, offset_vec); + uint16x4_t d3 = + highbd_convolve4_4_2d_v(s3, s4, s5, s6, y_filter, offset_vec); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + } else { + do { + int height = h; + const int16_t *s = (const int16_t *)src_ptr; + uint16_t *d = dst_ptr; + + int16x8_t s0, s1, s2; + load_s16_8x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x8_t s3, s4, s5, s6; + load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6); + + uint16x8_t d0 = + highbd_convolve4_8_2d_v(s0, s1, s2, s3, y_filter, offset_vec); + uint16x8_t d1 = + highbd_convolve4_8_2d_v(s1, s2, s3, s4, y_filter, offset_vec); + uint16x8_t d2 = + highbd_convolve4_8_2d_v(s2, s3, s4, s5, y_filter, offset_vec); + uint16x8_t d3 = + highbd_convolve4_8_2d_v(s3, s4, s5, s6, y_filter, offset_vec); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + d += 4 * dst_stride; + height -= 4; + } while (height != 0); + src_ptr += 8; + dst_ptr += 8; + w -= 8; + } while (w != 0); + } +} + +void av1_highbd_dist_wtd_convolve_2d_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, const int subpel_x_qn, + const int subpel_y_qn, ConvolveParams *conv_params, int bd) { + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + DECLARE_ALIGNED(16, uint16_t, + im_block2[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + + CONV_BUF_TYPE *dst16 = conv_params->dst; + int dst16_stride = conv_params->dst_stride; + const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn); + const int clamped_x_taps = x_filter_taps < 4 ? 4 : x_filter_taps; + + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + const int clamped_y_taps = y_filter_taps < 4 ? 4 : y_filter_taps; + + if (x_filter_taps == 6 || y_filter_taps == 6) { + av1_highbd_dist_wtd_convolve_2d_neon( + src, src_stride, dst, dst_stride, w, h, filter_params_x, + filter_params_y, subpel_x_qn, subpel_y_qn, conv_params, bd); + return; + } + + const int im_h = h + clamped_y_taps - 1; + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = clamped_x_taps / 2 - 1; + const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; + const int round_offset_conv_y = (1 << y_offset_bits); + + const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + if (bd == 12) { + if (x_filter_taps <= 4) { + highbd_12_dist_wtd_convolve_2d_horiz_4tap_sve2( + src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr); + } else { + highbd_12_dist_wtd_convolve_2d_horiz_8tap_sve2( + src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr); + } + } else { + if (x_filter_taps <= 4) { + highbd_dist_wtd_convolve_2d_horiz_4tap_sve2( + src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, bd); + } else { + highbd_dist_wtd_convolve_2d_horiz_8tap_sve2( + src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, bd); + } + } + + if (conv_params->do_average) { + if (y_filter_taps <= 4) { + highbd_dist_wtd_convolve_2d_vert_4tap_neon(im_block, im_stride, im_block2, + im_stride, w, h, y_filter_ptr, + round_offset_conv_y); + } else { + highbd_dist_wtd_convolve_2d_vert_8tap_sve2(im_block, im_stride, im_block2, + im_stride, w, h, y_filter_ptr, + round_offset_conv_y); + } + if (conv_params->use_dist_wtd_comp_avg) { + if (bd == 12) { + highbd_12_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, + w, h, conv_params); + + } else { + highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, + h, conv_params, bd); + } + } else { + if (bd == 12) { + highbd_12_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h, + conv_params); + + } else { + highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h, + conv_params, bd); + } + } + } else { + if (y_filter_taps <= 4) { + highbd_dist_wtd_convolve_2d_vert_4tap_neon( + im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr, + round_offset_conv_y); + } else { + highbd_dist_wtd_convolve_2d_vert_8tap_sve2( + im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr, + round_offset_conv_y); + } + } +} diff --git a/third_party/aom/av1/common/arm/highbd_convolve_sve2.c b/third_party/aom/av1/common/arm/highbd_convolve_sve2.c new file mode 100644 index 0000000000..82eb12fcea --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_convolve_sve2.c @@ -0,0 +1,1720 @@ +/* + * Copyright (c) 2024, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include +#include + +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/arm/aom_neon_sve_bridge.h" +#include "aom_dsp/arm/aom_neon_sve2_bridge.h" +#include "aom_dsp/arm/mem_neon.h" +#include "aom_ports/mem.h" +#include "av1/common/convolve.h" +#include "av1/common/filter.h" +#include "av1/common/arm/highbd_convolve_sve2.h" + +DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = { + 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, + 4, 5, 6, 7, 5, 6, 7, 0, 6, 7, 0, 1, 7, 0, 1, 2, +}; + +static INLINE uint16x4_t convolve12_4_x( + int16x8_t s0, int16x8_t s1, int16x8_t filter_0_7, int16x8_t filter_4_11, + const int64x2_t offset, uint16x8x4_t permute_tbl, uint16x4_t max) { + int16x8_t permuted_samples[6]; + permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]); + permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]); + permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]); + permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]); + permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]); + permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]); + + int64x2_t sum01 = + aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0); + sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1); + sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1); + + int64x2_t sum23 = + aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0); + sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1); + sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1); + + int32x4_t res0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + uint16x4_t res = vqrshrun_n_s32(res0123, FILTER_BITS); + + return vmin_u16(res, max); +} + +static INLINE uint16x8_t convolve12_8_x(int16x8_t s0, int16x8_t s1, + int16x8_t s2, int16x8_t filter_0_7, + int16x8_t filter_4_11, int64x2_t offset, + uint16x8x4_t permute_tbl, + uint16x8_t max) { + int16x8_t permuted_samples[8]; + permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]); + permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]); + permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]); + permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]); + permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]); + permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]); + permuted_samples[6] = aom_tbl2_s16(s1, s2, permute_tbl.val[2]); + permuted_samples[7] = aom_tbl2_s16(s1, s2, permute_tbl.val[3]); + + int64x2_t sum01 = + aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0); + sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1); + sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1); + + int64x2_t sum23 = + aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0); + sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1); + sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1); + + int64x2_t sum45 = + aom_svdot_lane_s16(offset, permuted_samples[2], filter_0_7, 0); + sum45 = aom_svdot_lane_s16(sum45, permuted_samples[4], filter_0_7, 1); + sum45 = aom_svdot_lane_s16(sum45, permuted_samples[6], filter_4_11, 1); + + int64x2_t sum67 = + aom_svdot_lane_s16(offset, permuted_samples[3], filter_0_7, 0); + sum67 = aom_svdot_lane_s16(sum67, permuted_samples[5], filter_0_7, 1); + sum67 = aom_svdot_lane_s16(sum67, permuted_samples[7], filter_4_11, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67)); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS), + vqrshrun_n_s32(sum4567, FILTER_BITS)); + + return vminq_u16(res, max); +} + +static INLINE void highbd_convolve_x_sr_12tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *y_filter_ptr, + ConvolveParams *conv_params, int bd) { + // This shim allows to do only one rounding shift instead of two. + const int64x2_t offset = vdupq_n_s64(1 << (conv_params->round_0 - 1)); + + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4); + + uint16x8x4_t permute_tbl = vld1q_u16_x4(kDotProdTbl); + // Scale indices by size of the true vector length to avoid reading from an + // 'undefined' portion of a vector on a system with SVE vectors > 128-bit. + uint16x8_t correction0 = vreinterpretq_u16_u64(vcombine_u64( + vdup_n_u64(0), vdup_n_u64(svcnth() * 0x0001000000000000ULL))); + permute_tbl.val[2] = vaddq_u16(permute_tbl.val[2], correction0); + + uint16x8_t correction1 = vreinterpretq_u16_u64( + vcombine_u64(vdup_n_u64(svcnth() * 0x0001000100000000ULL), + vdup_n_u64(svcnth() * 0x0001000100010000ULL))); + permute_tbl.val[3] = vaddq_u16(permute_tbl.val[3], correction1); + + if (width == 4) { + const uint16x4_t max = vdup_n_u16((1 << bd) - 1); + const int16_t *s = (const int16_t *)src; + + do { + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; + load_s16_8x4(s, src_stride, &s0, &s2, &s4, &s6); + load_s16_8x4(s + 8, src_stride, &s1, &s3, &s5, &s7); + + uint16x4_t d0 = convolve12_4_x(s0, s1, y_filter_0_7, y_filter_4_11, + offset, permute_tbl, max); + uint16x4_t d1 = convolve12_4_x(s2, s3, y_filter_0_7, y_filter_4_11, + offset, permute_tbl, max); + uint16x4_t d2 = convolve12_4_x(s4, s5, y_filter_0_7, y_filter_4_11, + offset, permute_tbl, max); + uint16x4_t d3 = convolve12_4_x(s6, s7, y_filter_0_7, y_filter_4_11, + offset, permute_tbl, max); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + do { + const int16_t *s = (const int16_t *)src; + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11; + load_s16_8x4(s, src_stride, &s0, &s3, &s6, &s9); + load_s16_8x4(s + 8, src_stride, &s1, &s4, &s7, &s10); + load_s16_8x4(s + 16, src_stride, &s2, &s5, &s8, &s11); + + uint16x8_t d0 = convolve12_8_x(s0, s1, s2, y_filter_0_7, y_filter_4_11, + offset, permute_tbl, max); + uint16x8_t d1 = convolve12_8_x(s3, s4, s5, y_filter_0_7, y_filter_4_11, + offset, permute_tbl, max); + uint16x8_t d2 = convolve12_8_x(s6, s7, s8, y_filter_0_7, y_filter_4_11, + offset, permute_tbl, max); + uint16x8_t d3 = convolve12_8_x(s9, s10, s11, y_filter_0_7, + y_filter_4_11, offset, permute_tbl, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +static INLINE uint16x8_t convolve8_8_x(int16x8_t s0[8], int16x8_t filter, + int64x2_t offset, uint16x8_t max) { + int64x2_t sum[8]; + sum[0] = aom_sdotq_s16(offset, s0[0], filter); + sum[1] = aom_sdotq_s16(offset, s0[1], filter); + sum[2] = aom_sdotq_s16(offset, s0[2], filter); + sum[3] = aom_sdotq_s16(offset, s0[3], filter); + sum[4] = aom_sdotq_s16(offset, s0[4], filter); + sum[5] = aom_sdotq_s16(offset, s0[5], filter); + sum[6] = aom_sdotq_s16(offset, s0[6], filter); + sum[7] = aom_sdotq_s16(offset, s0[7], filter); + + sum[0] = vpaddq_s64(sum[0], sum[1]); + sum[2] = vpaddq_s64(sum[2], sum[3]); + sum[4] = vpaddq_s64(sum[4], sum[5]); + sum[6] = vpaddq_s64(sum[6], sum[7]); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2])); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6])); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS), + vqrshrun_n_s32(sum4567, FILTER_BITS)); + + return vminq_u16(res, max); +} + +static INLINE void highbd_convolve_x_sr_8tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *y_filter_ptr, + ConvolveParams *conv_params, int bd) { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + // This shim allows to do only one rounding shift instead of two. + const int64_t offset = 1 << (conv_params->round_0 - 1); + const int64x2_t offset_lo = vcombine_s64((int64x1_t)(offset), vdup_n_s64(0)); + + const int16x8_t filter = vld1q_s16(y_filter_ptr); + + do { + const int16_t *s = (const int16_t *)src; + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = convolve8_8_x(s0, filter, offset_lo, max); + uint16x8_t d1 = convolve8_8_x(s1, filter, offset_lo, max); + uint16x8_t d2 = convolve8_8_x(s2, filter, offset_lo, max); + uint16x8_t d3 = convolve8_8_x(s3, filter, offset_lo, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); +} + +// clang-format off +DECLARE_ALIGNED(16, static const uint16_t, kDeinterleaveTbl[8]) = { + 0, 2, 4, 6, 1, 3, 5, 7, +}; +// clang-format on + +static INLINE uint16x4_t convolve4_4_x(int16x8_t s0, int16x8_t filter, + int64x2_t offset, + uint16x8x2_t permute_tbl, + uint16x4_t max) { + int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]); + int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]); + + int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0); + int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS); + + return vmin_u16(res, max); +} + +static INLINE uint16x8_t convolve4_8_x(int16x8_t s0[4], int16x8_t filter, + int64x2_t offset, uint16x8_t tbl, + uint16x8_t max) { + int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0); + int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0); + int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0); + int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0); + + int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15)); + int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37)); + + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, FILTER_BITS), + vqrshrun_n_s32(sum2637, FILTER_BITS)); + res = aom_tbl_u16(res, tbl); + + return vminq_u16(res, max); +} + +static INLINE void highbd_convolve_x_sr_4tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr, + ConvolveParams *conv_params, int bd) { + // This shim allows to do only one rounding shift instead of two. + const int64x2_t offset = vdupq_n_s64(1 << (conv_params->round_0 - 1)); + + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0)); + + if (width == 4) { + const uint16x4_t max = vdup_n_u16((1 << bd) - 1); + uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl); + + const int16_t *s = (const int16_t *)(src); + + do { + int16x8_t s0, s1, s2, s3; + load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = convolve4_4_x(s0, filter, offset, permute_tbl, max); + uint16x4_t d1 = convolve4_4_x(s1, filter, offset, permute_tbl, max); + uint16x4_t d2 = convolve4_4_x(s2, filter, offset, permute_tbl, max); + uint16x4_t d3 = convolve4_4_x(s3, filter, offset, permute_tbl, max); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + uint16x8_t idx = vld1q_u16(kDeinterleaveTbl); + + do { + const int16_t *s = (const int16_t *)(src); + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[4], s1[4], s2[4], s3[4]; + load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]); + load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]); + load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]); + load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]); + + uint16x8_t d0 = convolve4_8_x(s0, filter, offset, idx, max); + uint16x8_t d1 = convolve4_8_x(s1, filter, offset, idx, max); + uint16x8_t d2 = convolve4_8_x(s2, filter, offset, idx, max); + uint16x8_t d3 = convolve4_8_x(s3, filter, offset, idx, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } +} + +void av1_highbd_convolve_x_sr_sve2(const uint16_t *src, int src_stride, + uint16_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const int subpel_x_qn, + ConvolveParams *conv_params, int bd) { + if (w == 2 || h == 2) { + av1_highbd_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h, + filter_params_x, subpel_x_qn, conv_params, bd); + return; + } + + const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn); + + if (x_filter_taps == 6) { + av1_highbd_convolve_x_sr_neon(src, src_stride, dst, dst_stride, w, h, + filter_params_x, subpel_x_qn, conv_params, + bd); + return; + } + + const int horiz_offset = filter_params_x->taps / 2 - 1; + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + + src -= horiz_offset; + + if (x_filter_taps == 12) { + highbd_convolve_x_sr_12tap_sve2(src, src_stride, dst, dst_stride, w, h, + x_filter_ptr, conv_params, bd); + return; + } + + if (x_filter_taps == 8) { + highbd_convolve_x_sr_8tap_sve2(src, src_stride, dst, dst_stride, w, h, + x_filter_ptr, conv_params, bd); + return; + } + + highbd_convolve_x_sr_4tap_sve2(src + 2, src_stride, dst, dst_stride, w, h, + x_filter_ptr, conv_params, bd); +} + +static INLINE uint16x4_t highbd_convolve12_4_y(int16x8_t s0[2], int16x8_t s1[2], + int16x8_t s2[2], + int16x8_t filter_0_7, + int16x8_t filter_4_11, + uint16x4_t max) { + int64x2_t sum[2]; + + sum[0] = aom_svdot_lane_s16(vdupq_n_s64(0), s0[0], filter_0_7, 0); + sum[0] = aom_svdot_lane_s16(sum[0], s1[0], filter_0_7, 1); + sum[0] = aom_svdot_lane_s16(sum[0], s2[0], filter_4_11, 1); + + sum[1] = aom_svdot_lane_s16(vdupq_n_s64(0), s0[1], filter_0_7, 0); + sum[1] = aom_svdot_lane_s16(sum[1], s1[1], filter_0_7, 1); + sum[1] = aom_svdot_lane_s16(sum[1], s2[1], filter_4_11, 1); + + int32x4_t res_s32 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[1])); + + uint16x4_t res = vqrshrun_n_s32(res_s32, FILTER_BITS); + + return vmin_u16(res, max); +} + +static INLINE void highbd_convolve_y_sr_12tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *y_filter_ptr, int bd) { + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4); + + uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl); + // Scale indices by size of the true vector length to avoid reading from an + // 'undefined' portion of a vector on a system with SVE vectors > 128-bit. + uint16x8_t correction0 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL)); + merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0); + + uint16x8_t correction1 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL)); + merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1); + + uint16x8_t correction2 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL)); + merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2); + + const uint16x4_t max = vdup_n_u16((1 << bd) - 1); + + do { + int16_t *s = (int16_t *)src; + uint16_t *d = dst; + int h = height; + + int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA; + load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, + &s9, &sA); + s += 11 * src_stride; + + int16x8_t s0123[2], s1234[2], s2345[2], s3456[2], s4567[2], s5678[2], + s6789[2], s789A[2]; + transpose_concat_4x4(s0, s1, s2, s3, s0123); + transpose_concat_4x4(s1, s2, s3, s4, s1234); + transpose_concat_4x4(s2, s3, s4, s5, s2345); + transpose_concat_4x4(s3, s4, s5, s6, s3456); + transpose_concat_4x4(s4, s5, s6, s7, s4567); + transpose_concat_4x4(s5, s6, s7, s8, s5678); + transpose_concat_4x4(s6, s7, s8, s9, s6789); + transpose_concat_4x4(s7, s8, s9, sA, s789A); + + do { + int16x4_t sB, sC, sD, sE; + load_s16_4x4(s, src_stride, &sB, &sC, &sD, &sE); + + int16x8_t s89AB[2], s9ABC[2], sABCD[2], sBCDE[2]; + transpose_concat_4x4(sB, sC, sD, sE, sBCDE); + + // Use the above transpose and reuse data from the previous loop to get + // the rest. + aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[0], s89AB); + aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[1], s9ABC); + aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[2], sABCD); + + uint16x4_t d0 = highbd_convolve12_4_y(s0123, s4567, s89AB, y_filter_0_7, + y_filter_4_11, max); + uint16x4_t d1 = highbd_convolve12_4_y(s1234, s5678, s9ABC, y_filter_0_7, + y_filter_4_11, max); + uint16x4_t d2 = highbd_convolve12_4_y(s2345, s6789, sABCD, y_filter_0_7, + y_filter_4_11, max); + uint16x4_t d3 = highbd_convolve12_4_y(s3456, s789A, sBCDE, y_filter_0_7, + y_filter_4_11, max); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + s4567[0] = s89AB[0]; + s4567[1] = s89AB[1]; + s5678[0] = s9ABC[0]; + s5678[1] = s9ABC[1]; + s6789[0] = sABCD[0]; + s6789[1] = sABCD[1]; + s789A[0] = sBCDE[0]; + s789A[1] = sBCDE[1]; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + src += 4; + dst += 4; + width -= 4; + } while (width != 0); +} + +static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2], + int16x8_t samples_hi[2], + int16x8_t filter, + uint16x4_t max) { + int64x2_t sum01 = + aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = + aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS); + return vmin_u16(res, max); +} + +static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4], + int16x8_t samples_hi[4], + int16x8_t filter, + uint16x8_t max) { + int64x2_t sum01 = + aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = + aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int64x2_t sum45 = + aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[2], filter, 0); + sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1); + + int64x2_t sum67 = + aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[3], filter, 0); + sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67)); + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS), + vqrshrun_n_s32(sum4567, FILTER_BITS)); + return vminq_u16(res, max); +} + +void highbd_convolve_y_sr_8tap_sve2(const uint16_t *src, ptrdiff_t src_stride, + uint16_t *dst, ptrdiff_t dst_stride, + int width, int height, + const int16_t *filter_y, int bd) { + assert(w >= 4 && h >= 4); + + const int16x8_t y_filter = vld1q_s16(filter_y); + + uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl); + // Scale indices by size of the true vector length to avoid reading from an + // 'undefined' portion of a vector on a system with SVE vectors > 128-bit. + uint16x8_t correction0 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL)); + merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0); + + uint16x8_t correction1 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL)); + merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1); + + uint16x8_t correction2 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL)); + merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2); + + if (width == 4) { + const uint16x4_t max = vdup_n_u16((1 << bd) - 1); + int16_t *s = (int16_t *)src; + + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[2], s1234[2], s2345[2], s3456[2]; + transpose_concat_4x4(s0, s1, s2, s3, s0123); + transpose_concat_4x4(s1, s2, s3, s4, s1234); + transpose_concat_4x4(s2, s3, s4, s5, s2345); + transpose_concat_4x4(s3, s4, s5, s6, s3456); + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + int16x8_t s4567[2], s5678[2], s6789[2], s789A[2]; + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_4x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x4_t d0 = highbd_convolve8_4_y(s0123, s4567, y_filter, max); + uint16x4_t d1 = highbd_convolve8_4_y(s1234, s5678, y_filter, max); + uint16x4_t d2 = highbd_convolve8_4_y(s2345, s6789, y_filter, max); + uint16x4_t d3 = highbd_convolve8_4_y(s3456, s789A, y_filter, max); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + do { + int h = height; + int16_t *s = (int16_t *)src; + uint16_t *d = dst; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[4], s1234[4], s2345[4], s3456[4]; + transpose_concat_8x4(s0, s1, s2, s3, s0123); + transpose_concat_8x4(s1, s2, s3, s4, s1234); + transpose_concat_8x4(s2, s3, s4, s5, s2345); + transpose_concat_8x4(s3, s4, s5, s6, s3456); + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + int16x8_t s4567[4], s5678[4], s6789[4], s789A[4]; + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_8x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x8_t d0 = highbd_convolve8_8_y(s0123, s4567, y_filter, max); + uint16x8_t d1 = highbd_convolve8_8_y(s1234, s5678, y_filter, max); + uint16x8_t d2 = highbd_convolve8_8_y(s2345, s6789, y_filter, max); + uint16x8_t d3 = highbd_convolve8_8_y(s3456, s789A, y_filter, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s0123[2] = s4567[2]; + s0123[3] = s4567[3]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s1234[2] = s5678[2]; + s1234[3] = s5678[3]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s2345[2] = s6789[2]; + s2345[3] = s6789[3]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + s3456[2] = s789A[2]; + s3456[3] = s789A[3]; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + src += 8; + dst += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE uint16x4_t highbd_convolve4_4_y(int16x8_t samples[2], + int16x8_t filter, + uint16x4_t max) { + int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[0], filter, 0); + int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[1], filter, 0); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS); + return vmin_u16(res, max); +} + +static INLINE uint16x8_t highbd_convolve4_8_y(int16x8_t samples[4], + int16x8_t filter, + uint16x8_t max) { + int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[0], filter, 0); + int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[1], filter, 0); + int64x2_t sum45 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[2], filter, 0); + int64x2_t sum67 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[3], filter, 0); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67)); + uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS), + vqrshrun_n_s32(sum4567, FILTER_BITS)); + return vminq_u16(res, max); +} + +void highbd_convolve_y_sr_4tap_sve2(const uint16_t *src, ptrdiff_t src_stride, + uint16_t *dst, ptrdiff_t dst_stride, + int width, int height, + const int16_t *filter_y, int bd) { + assert(w >= 4 && h >= 4); + + const int16x8_t y_filter = + vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0)); + + if (width == 4) { + const uint16x4_t max = vdup_n_u16((1 << bd) - 1); + int16_t *s = (int16_t *)src; + + int16x4_t s0, s1, s2; + load_s16_4x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x4_t s3, s4, s5, s6; + load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6); + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[2], s1234[2], s2345[2], s3456[2]; + transpose_concat_4x4(s0, s1, s2, s3, s0123); + transpose_concat_4x4(s1, s2, s3, s4, s1234); + transpose_concat_4x4(s2, s3, s4, s5, s2345); + transpose_concat_4x4(s3, s4, s5, s6, s3456); + + uint16x4_t d0 = highbd_convolve4_4_y(s0123, y_filter, max); + uint16x4_t d1 = highbd_convolve4_4_y(s1234, y_filter, max); + uint16x4_t d2 = highbd_convolve4_4_y(s2345, y_filter, max); + uint16x4_t d3 = highbd_convolve4_4_y(s3456, y_filter, max); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + // Shuffle everything up four rows. + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + do { + int h = height; + int16_t *s = (int16_t *)src; + uint16_t *d = dst; + + int16x8_t s0, s1, s2; + load_s16_8x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x8_t s3, s4, s5, s6; + load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6); + + // This operation combines a conventional transpose and the sample + // permute required before computing the dot product. + int16x8_t s0123[4], s1234[4], s2345[4], s3456[4]; + transpose_concat_8x4(s0, s1, s2, s3, s0123); + transpose_concat_8x4(s1, s2, s3, s4, s1234); + transpose_concat_8x4(s2, s3, s4, s5, s2345); + transpose_concat_8x4(s3, s4, s5, s6, s3456); + + uint16x8_t d0 = highbd_convolve4_8_y(s0123, y_filter, max); + uint16x8_t d1 = highbd_convolve4_8_y(s1234, y_filter, max); + uint16x8_t d2 = highbd_convolve4_8_y(s2345, y_filter, max); + uint16x8_t d3 = highbd_convolve4_8_y(s3456, y_filter, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + // Shuffle everything up four rows. + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + src += 8; + dst += 8; + width -= 8; + } while (width != 0); + } +} + +void av1_highbd_convolve_y_sr_sve2(const uint16_t *src, int src_stride, + uint16_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_y, + const int subpel_y_qn, int bd) { + if (w == 2 || h == 2) { + av1_highbd_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h, + filter_params_y, subpel_y_qn, bd); + return; + } + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + + if (y_filter_taps == 6) { + av1_highbd_convolve_y_sr_neon(src, src_stride, dst, dst_stride, w, h, + filter_params_y, subpel_y_qn, bd); + return; + } + + const int vert_offset = filter_params_y->taps / 2 - 1; + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + + src -= vert_offset * src_stride; + + if (y_filter_taps > 8) { + highbd_convolve_y_sr_12tap_sve2(src, src_stride, dst, dst_stride, w, h, + y_filter_ptr, bd); + return; + } + + if (y_filter_taps == 4) { + highbd_convolve_y_sr_4tap_sve2(src + 2 * src_stride, src_stride, dst, + dst_stride, w, h, y_filter_ptr, bd); + return; + } + + highbd_convolve_y_sr_8tap_sve2(src, src_stride, dst, dst_stride, w, h, + y_filter_ptr, bd); +} + +static INLINE uint16x4_t convolve12_4_2d_h( + int16x8_t s0, int16x8_t s1, int16x8_t filter_0_7, int16x8_t filter_4_11, + const int64x2_t offset, int32x4_t shift, uint16x8x4_t permute_tbl) { + int16x8_t permuted_samples[6]; + permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]); + permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]); + permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]); + permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]); + permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]); + permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]); + + int64x2_t sum01 = + aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0); + sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1); + sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1); + + int64x2_t sum23 = + aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0); + sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1); + sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + sum0123 = vqrshlq_s32(sum0123, shift); + return vqmovun_s32(sum0123); +} + +static INLINE uint16x8_t convolve12_8_2d_h(int16x8_t s0, int16x8_t s1, + int16x8_t s2, int16x8_t filter_0_7, + int16x8_t filter_4_11, + int64x2_t offset, int32x4_t shift, + uint16x8x4_t permute_tbl) { + int16x8_t permuted_samples[8]; + permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]); + permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]); + permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]); + permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]); + permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]); + permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]); + permuted_samples[6] = aom_tbl2_s16(s1, s2, permute_tbl.val[2]); + permuted_samples[7] = aom_tbl2_s16(s1, s2, permute_tbl.val[3]); + + int64x2_t sum01 = + aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0); + sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1); + sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1); + + int64x2_t sum23 = + aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0); + sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1); + sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1); + + int64x2_t sum45 = + aom_svdot_lane_s16(offset, permuted_samples[2], filter_0_7, 0); + sum45 = aom_svdot_lane_s16(sum45, permuted_samples[4], filter_0_7, 1); + sum45 = aom_svdot_lane_s16(sum45, permuted_samples[6], filter_4_11, 1); + + int64x2_t sum67 = + aom_svdot_lane_s16(offset, permuted_samples[3], filter_0_7, 0); + sum67 = aom_svdot_lane_s16(sum67, permuted_samples[5], filter_0_7, 1); + sum67 = aom_svdot_lane_s16(sum67, permuted_samples[7], filter_4_11, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67)); + + sum0123 = vqrshlq_s32(sum0123, shift); + sum4567 = vqrshlq_s32(sum4567, shift); + + return vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567)); +} + +static INLINE void highbd_convolve_2d_sr_horiz_12tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *y_filter_ptr, + ConvolveParams *conv_params, const int x_offset) { + const int64x2_t offset = vdupq_n_s64(x_offset); + const int32x4_t shift = vdupq_n_s32(-conv_params->round_0); + + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4); + + uint16x8x4_t permute_tbl = vld1q_u16_x4(kDotProdTbl); + // Scale indices by size of the true vector length to avoid reading from an + // 'undefined' portion of a vector on a system with SVE vectors > 128-bit. + uint16x8_t correction0 = vreinterpretq_u16_u64(vcombine_u64( + vdup_n_u64(0), vdup_n_u64(svcnth() * 0x0001000000000000ULL))); + permute_tbl.val[2] = vaddq_u16(permute_tbl.val[2], correction0); + + uint16x8_t correction1 = vreinterpretq_u16_u64( + vcombine_u64(vdup_n_u64(svcnth() * 0x0001000100000000ULL), + vdup_n_u64(svcnth() * 0x0001000100010000ULL))); + permute_tbl.val[3] = vaddq_u16(permute_tbl.val[3], correction1); + + if (width == 4) { + const int16_t *s = (const int16_t *)src; + + do { + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; + load_s16_8x4(s, src_stride, &s0, &s2, &s4, &s6); + load_s16_8x4(s + 8, src_stride, &s1, &s3, &s5, &s7); + + uint16x4_t d0 = convolve12_4_2d_h(s0, s1, y_filter_0_7, y_filter_4_11, + offset, shift, permute_tbl); + uint16x4_t d1 = convolve12_4_2d_h(s2, s3, y_filter_0_7, y_filter_4_11, + offset, shift, permute_tbl); + uint16x4_t d2 = convolve12_4_2d_h(s4, s5, y_filter_0_7, y_filter_4_11, + offset, shift, permute_tbl); + uint16x4_t d3 = convolve12_4_2d_h(s6, s7, y_filter_0_7, y_filter_4_11, + offset, shift, permute_tbl); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + dst += 4 * dst_stride; + s += 4 * src_stride; + height -= 4; + } while (height > 0); + } else { + do { + const int16_t *s = (const int16_t *)src; + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11; + load_s16_8x4(s, src_stride, &s0, &s3, &s6, &s9); + load_s16_8x4(s + 8, src_stride, &s1, &s4, &s7, &s10); + load_s16_8x4(s + 16, src_stride, &s2, &s5, &s8, &s11); + + uint16x8_t d0 = + convolve12_8_2d_h(s0, s1, s2, y_filter_0_7, y_filter_4_11, offset, + shift, permute_tbl); + uint16x8_t d1 = + convolve12_8_2d_h(s3, s4, s5, y_filter_0_7, y_filter_4_11, offset, + shift, permute_tbl); + uint16x8_t d2 = + convolve12_8_2d_h(s6, s7, s8, y_filter_0_7, y_filter_4_11, offset, + shift, permute_tbl); + uint16x8_t d3 = + convolve12_8_2d_h(s9, s10, s11, y_filter_0_7, y_filter_4_11, offset, + shift, permute_tbl); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 0); + } +} + +static INLINE uint16x8_t convolve8_8_2d_h(int16x8_t s0[8], int16x8_t filter, + int64x2_t offset, int32x4_t shift) { + int64x2_t sum[8]; + sum[0] = aom_sdotq_s16(offset, s0[0], filter); + sum[1] = aom_sdotq_s16(offset, s0[1], filter); + sum[2] = aom_sdotq_s16(offset, s0[2], filter); + sum[3] = aom_sdotq_s16(offset, s0[3], filter); + sum[4] = aom_sdotq_s16(offset, s0[4], filter); + sum[5] = aom_sdotq_s16(offset, s0[5], filter); + sum[6] = aom_sdotq_s16(offset, s0[6], filter); + sum[7] = aom_sdotq_s16(offset, s0[7], filter); + + sum[0] = vpaddq_s64(sum[0], sum[1]); + sum[2] = vpaddq_s64(sum[2], sum[3]); + sum[4] = vpaddq_s64(sum[4], sum[5]); + sum[6] = vpaddq_s64(sum[6], sum[7]); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2])); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6])); + + sum0123 = vqrshlq_s32(sum0123, shift); + sum4567 = vqrshlq_s32(sum4567, shift); + + return vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567)); +} + +static INLINE void highbd_convolve_2d_sr_horiz_8tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *y_filter_ptr, + ConvolveParams *conv_params, const int x_offset) { + const int64x2_t offset = vdupq_n_s64(x_offset); + const int64x2_t offset_lo = vcombine_s64(vget_low_s64(offset), vdup_n_s64(0)); + const int32x4_t shift = vdupq_n_s32(-conv_params->round_0); + + const int16x8_t filter = vld1q_s16(y_filter_ptr); + + do { + const int16_t *s = (const int16_t *)src; + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = convolve8_8_2d_h(s0, filter, offset_lo, shift); + uint16x8_t d1 = convolve8_8_2d_h(s1, filter, offset_lo, shift); + uint16x8_t d2 = convolve8_8_2d_h(s2, filter, offset_lo, shift); + uint16x8_t d3 = convolve8_8_2d_h(s3, filter, offset_lo, shift); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 0); +} + +static INLINE uint16x4_t convolve4_4_2d_h(int16x8_t s0, int16x8_t filter, + int64x2_t offset, int32x4_t shift, + uint16x8x2_t permute_tbl) { + int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]); + int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]); + + int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0); + int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + sum0123 = vqrshlq_s32(sum0123, shift); + return vqmovun_s32(sum0123); +} + +static INLINE uint16x8_t convolve4_8_2d_h(int16x8_t s0[8], int16x8_t filter, + int64x2_t offset, int32x4_t shift, + uint16x8_t tbl) { + int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0); + int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0); + int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0); + int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37)); + + sum0123 = vqrshlq_s32(sum0123, shift); + sum4567 = vqrshlq_s32(sum4567, shift); + + uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567)); + return aom_tbl_u16(res, tbl); +} + +static INLINE void highbd_convolve_2d_sr_horiz_4tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *x_filter_ptr, + ConvolveParams *conv_params, const int x_offset) { + const int64x2_t offset = vdupq_n_s64(x_offset); + const int32x4_t shift = vdupq_n_s32(-conv_params->round_0); + + const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2); + const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0)); + + if (width == 4) { + const int16_t *s = (const int16_t *)(src); + + uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl); + + do { + int16x8_t s0, s1, s2, s3; + load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3); + + uint16x4_t d0 = convolve4_4_2d_h(s0, filter, offset, shift, permute_tbl); + uint16x4_t d1 = convolve4_4_2d_h(s1, filter, offset, shift, permute_tbl); + uint16x4_t d2 = convolve4_4_2d_h(s2, filter, offset, shift, permute_tbl); + uint16x4_t d3 = convolve4_4_2d_h(s3, filter, offset, shift, permute_tbl); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 0); + } else { + uint16x8_t idx = vld1q_u16(kDeinterleaveTbl); + + do { + const int16_t *s = (const int16_t *)(src); + uint16_t *d = dst; + int w = width; + + do { + int16x8_t s0[8], s1[8], s2[8], s3[8]; + load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], + &s0[4], &s0[5], &s0[6], &s0[7]); + load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], + &s1[4], &s1[5], &s1[6], &s1[7]); + load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], + &s2[4], &s2[5], &s2[6], &s2[7]); + load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3], + &s3[4], &s3[5], &s3[6], &s3[7]); + + uint16x8_t d0 = convolve4_8_2d_h(s0, filter, offset, shift, idx); + uint16x8_t d1 = convolve4_8_2d_h(s1, filter, offset, shift, idx); + uint16x8_t d2 = convolve4_8_2d_h(s2, filter, offset, shift, idx); + uint16x8_t d3 = convolve4_8_2d_h(s3, filter, offset, shift, idx); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + s += 8; + d += 8; + w -= 8; + } while (w != 0); + src += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height > 0); + } +} + +static INLINE uint16x4_t highbd_convolve12_4_2d_v( + int16x8_t s0[2], int16x8_t s1[2], int16x8_t s2[2], int16x8_t filter_0_7, + int16x8_t filter_4_11, int32x4_t shift, int64x2_t offset, uint16x4_t max) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, s0[0], filter_0_7, 0); + sum01 = aom_svdot_lane_s16(sum01, s1[0], filter_0_7, 1); + sum01 = aom_svdot_lane_s16(sum01, s2[0], filter_4_11, 1); + + int64x2_t sum23 = aom_svdot_lane_s16(offset, s0[1], filter_0_7, 0); + sum23 = aom_svdot_lane_s16(sum23, s1[1], filter_0_7, 1); + sum23 = aom_svdot_lane_s16(sum23, s2[1], filter_4_11, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + sum0123 = vshlq_s32(sum0123, shift); + + uint16x4_t res = vqmovun_s32(sum0123); + + return vmin_u16(res, max); +} + +static INLINE void highbd_convolve_2d_sr_vert_12tap_sve2( + const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, + int width, int height, const int16_t *y_filter_ptr, + ConvolveParams *conv_params, int bd, const int y_offset) { + const int64x2_t offset = vdupq_n_s64(y_offset); + const int32x4_t shift = vdupq_n_s32(-conv_params->round_1); + + const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr); + const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4); + + uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl); + // Scale indices by size of the true vector length to avoid reading from an + // 'undefined' portion of a vector on a system with SVE vectors > 128-bit. + uint16x8_t correction0 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL)); + merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0); + + uint16x8_t correction1 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL)); + merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1); + + uint16x8_t correction2 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL)); + merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2); + + const uint16x4_t max = vdup_n_u16((1 << bd) - 1); + + do { + int16_t *s = (int16_t *)src; + uint16_t *d = (uint16_t *)dst; + int h = height; + + int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA; + load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, + &s9, &sA); + s += 11 * src_stride; + + int16x8_t s0123[2], s1234[2], s2345[2], s3456[2], s4567[2], s5678[2], + s6789[2], s789A[2]; + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + transpose_concat_4x4(s0, s1, s2, s3, s0123); + transpose_concat_4x4(s1, s2, s3, s4, s1234); + transpose_concat_4x4(s2, s3, s4, s5, s2345); + transpose_concat_4x4(s3, s4, s5, s6, s3456); + transpose_concat_4x4(s4, s5, s6, s7, s4567); + transpose_concat_4x4(s5, s6, s7, s8, s5678); + transpose_concat_4x4(s6, s7, s8, s9, s6789); + transpose_concat_4x4(s7, s8, s9, sA, s789A); + + do { + int16x4_t sB, sC, sD, sE; + load_s16_4x4(s, src_stride, &sB, &sC, &sD, &sE); + + int16x8_t s89AB[2], s9ABC[2], sABCD[2], sBCDE[2]; + transpose_concat_4x4(sB, sC, sD, sE, sBCDE); + + // Use the above transpose and reuse data from the previous loop to get + // the rest. + aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[0], s89AB); + aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[1], s9ABC); + aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[2], sABCD); + + uint16x4_t d0 = highbd_convolve12_4_2d_v( + s0123, s4567, s89AB, y_filter_0_7, y_filter_4_11, shift, offset, max); + uint16x4_t d1 = highbd_convolve12_4_2d_v( + s1234, s5678, s9ABC, y_filter_0_7, y_filter_4_11, shift, offset, max); + uint16x4_t d2 = highbd_convolve12_4_2d_v( + s2345, s6789, sABCD, y_filter_0_7, y_filter_4_11, shift, offset, max); + uint16x4_t d3 = highbd_convolve12_4_2d_v( + s3456, s789A, sBCDE, y_filter_0_7, y_filter_4_11, shift, offset, max); + + store_u16_4x4(d, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + s4567[0] = s89AB[0]; + s4567[1] = s89AB[1]; + s5678[0] = s9ABC[0]; + s5678[1] = s9ABC[1]; + s6789[0] = sABCD[0]; + s6789[1] = sABCD[1]; + s789A[0] = sBCDE[0]; + s789A[1] = sBCDE[1]; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + src += 4; + dst += 4; + width -= 4; + } while (width != 0); +} + +static INLINE uint16x4_t highbd_convolve8_4_2d_v( + int16x8_t samples_lo[2], int16x8_t samples_hi[2], int16x8_t filter, + int32x4_t shift, int64x2_t offset, uint16x4_t max) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + sum0123 = vshlq_s32(sum0123, shift); + + uint16x4_t res = vqmovun_s32(sum0123); + return vmin_u16(res, max); +} + +static INLINE uint16x8_t highbd_convolve8_8_2d_v( + int16x8_t samples_lo[4], int16x8_t samples_hi[4], int16x8_t filter, + int32x4_t shift, int64x2_t offset, uint16x8_t max) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0); + sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1); + + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0); + sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1); + + int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0); + sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1); + + int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0); + sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67)); + + sum0123 = vshlq_s32(sum0123, shift); + sum4567 = vshlq_s32(sum4567, shift); + + uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567)); + return vminq_u16(res, max); +} + +void highbd_convolve_2d_sr_vert_8tap_sve2(const uint16_t *src, + ptrdiff_t src_stride, uint16_t *dst, + ptrdiff_t dst_stride, int width, + int height, const int16_t *filter_y, + ConvolveParams *conv_params, int bd, + const int y_offset) { + assert(w >= 4 && h >= 4); + const int64x2_t offset = vdupq_n_s64(y_offset); + const int32x4_t shift = vdupq_n_s32(-conv_params->round_1); + const int16x8_t y_filter = vld1q_s16(filter_y); + + uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl); + // Scale indices by size of the true vector length to avoid reading from an + // 'undefined' portion of a vector on a system with SVE vectors > 128-bit. + uint16x8_t correction0 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL)); + merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0); + + uint16x8_t correction1 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL)); + merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1); + + uint16x8_t correction2 = + vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL)); + merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2); + + if (width == 4) { + const uint16x4_t max = vdup_n_u16((1 << bd) - 1); + int16_t *s = (int16_t *)src; + + int16x4_t s0, s1, s2, s3, s4, s5, s6; + load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[2], s1234[2], s2345[2], s3456[2]; + transpose_concat_4x4(s0, s1, s2, s3, s0123); + transpose_concat_4x4(s1, s2, s3, s4, s1234); + transpose_concat_4x4(s2, s3, s4, s5, s2345); + transpose_concat_4x4(s3, s4, s5, s6, s3456); + + do { + int16x4_t s7, s8, s9, s10; + load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10); + + int16x8_t s4567[2], s5678[2], s6789[2], s789A[2]; + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_4x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x4_t d0 = + highbd_convolve8_4_2d_v(s0123, s4567, y_filter, shift, offset, max); + uint16x4_t d1 = + highbd_convolve8_4_2d_v(s1234, s5678, y_filter, shift, offset, max); + uint16x4_t d2 = + highbd_convolve8_4_2d_v(s2345, s6789, y_filter, shift, offset, max); + uint16x4_t d3 = + highbd_convolve8_4_2d_v(s3456, s789A, y_filter, shift, offset, max); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + do { + int h = height; + int16_t *s = (int16_t *)src; + uint16_t *d = dst; + + int16x8_t s0, s1, s2, s3, s4, s5, s6; + load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); + s += 7 * src_stride; + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[4], s1234[4], s2345[4], s3456[4]; + transpose_concat_8x4(s0, s1, s2, s3, s0123); + transpose_concat_8x4(s1, s2, s3, s4, s1234); + transpose_concat_8x4(s2, s3, s4, s5, s2345); + transpose_concat_8x4(s3, s4, s5, s6, s3456); + + do { + int16x8_t s7, s8, s9, s10; + load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10); + + int16x8_t s4567[4], s5678[4], s6789[4], s789A[4]; + // Transpose and shuffle the 4 lines that were loaded. + transpose_concat_8x4(s7, s8, s9, s10, s789A); + + // Merge new data into block from previous iteration. + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678); + aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789); + + uint16x8_t d0 = + highbd_convolve8_8_2d_v(s0123, s4567, y_filter, shift, offset, max); + uint16x8_t d1 = + highbd_convolve8_8_2d_v(s1234, s5678, y_filter, shift, offset, max); + uint16x8_t d2 = + highbd_convolve8_8_2d_v(s2345, s6789, y_filter, shift, offset, max); + uint16x8_t d3 = + highbd_convolve8_8_2d_v(s3456, s789A, y_filter, shift, offset, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + // Prepare block for next iteration - re-using as much as possible. + // Shuffle everything up four rows. + s0123[0] = s4567[0]; + s0123[1] = s4567[1]; + s0123[2] = s4567[2]; + s0123[3] = s4567[3]; + s1234[0] = s5678[0]; + s1234[1] = s5678[1]; + s1234[2] = s5678[2]; + s1234[3] = s5678[3]; + s2345[0] = s6789[0]; + s2345[1] = s6789[1]; + s2345[2] = s6789[2]; + s2345[3] = s6789[3]; + s3456[0] = s789A[0]; + s3456[1] = s789A[1]; + s3456[2] = s789A[2]; + s3456[3] = s789A[3]; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + src += 8; + dst += 8; + width -= 8; + } while (width != 0); + } +} + +static INLINE uint16x4_t highbd_convolve4_4_2d_v(int16x8_t samples[2], + int16x8_t filter, + int32x4_t shift, + int64x2_t offset, + uint16x4_t max) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples[0], filter, 0); + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples[1], filter, 0); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + sum0123 = vshlq_s32(sum0123, shift); + + uint16x4_t res = vqmovun_s32(sum0123); + return vmin_u16(res, max); +} + +static INLINE uint16x8_t highbd_convolve4_8_2d_v(int16x8_t samples[4], + int16x8_t filter, + int32x4_t shift, + int64x2_t offset, + uint16x8_t max) { + int64x2_t sum01 = aom_svdot_lane_s16(offset, samples[0], filter, 0); + int64x2_t sum23 = aom_svdot_lane_s16(offset, samples[1], filter, 0); + int64x2_t sum45 = aom_svdot_lane_s16(offset, samples[2], filter, 0); + int64x2_t sum67 = aom_svdot_lane_s16(offset, samples[3], filter, 0); + + int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23)); + int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67)); + + sum0123 = vshlq_s32(sum0123, shift); + sum4567 = vshlq_s32(sum4567, shift); + + uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567)); + return vminq_u16(res, max); +} + +void highbd_convolve_2d_sr_vert_4tap_sve2(const uint16_t *src, + ptrdiff_t src_stride, uint16_t *dst, + ptrdiff_t dst_stride, int width, + int height, const int16_t *filter_y, + ConvolveParams *conv_params, int bd, + const int y_offset) { + assert(w >= 4 && h >= 4); + const int64x2_t offset = vdupq_n_s64(y_offset); + const int32x4_t shift = vdupq_n_s32(-conv_params->round_1); + + const int16x8_t y_filter = + vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0)); + + if (width == 4) { + const uint16x4_t max = vdup_n_u16((1 << bd) - 1); + int16_t *s = (int16_t *)(src); + + int16x4_t s0, s1, s2; + load_s16_4x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x4_t s3, s4, s5, s6; + load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6); + + // This operation combines a conventional transpose and the sample permute + // required before computing the dot product. + int16x8_t s0123[2], s1234[2], s2345[2], s3456[2]; + transpose_concat_4x4(s0, s1, s2, s3, s0123); + transpose_concat_4x4(s1, s2, s3, s4, s1234); + transpose_concat_4x4(s2, s3, s4, s5, s2345); + transpose_concat_4x4(s3, s4, s5, s6, s3456); + + uint16x4_t d0 = + highbd_convolve4_4_2d_v(s0123, y_filter, shift, offset, max); + uint16x4_t d1 = + highbd_convolve4_4_2d_v(s1234, y_filter, shift, offset, max); + uint16x4_t d2 = + highbd_convolve4_4_2d_v(s2345, y_filter, shift, offset, max); + uint16x4_t d3 = + highbd_convolve4_4_2d_v(s3456, y_filter, shift, offset, max); + + store_u16_4x4(dst, dst_stride, d0, d1, d2, d3); + + // Shuffle everything up four rows. + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + dst += 4 * dst_stride; + height -= 4; + } while (height != 0); + } else { + const uint16x8_t max = vdupq_n_u16((1 << bd) - 1); + + do { + int h = height; + int16_t *s = (int16_t *)(src); + uint16_t *d = dst; + + int16x8_t s0, s1, s2; + load_s16_8x3(s, src_stride, &s0, &s1, &s2); + s += 3 * src_stride; + + do { + int16x8_t s3, s4, s5, s6; + load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6); + + // This operation combines a conventional transpose and the sample + // permute required before computing the dot product. + int16x8_t s0123[4], s1234[4], s2345[4], s3456[4]; + transpose_concat_8x4(s0, s1, s2, s3, s0123); + transpose_concat_8x4(s1, s2, s3, s4, s1234); + transpose_concat_8x4(s2, s3, s4, s5, s2345); + transpose_concat_8x4(s3, s4, s5, s6, s3456); + + uint16x8_t d0 = + highbd_convolve4_8_2d_v(s0123, y_filter, shift, offset, max); + uint16x8_t d1 = + highbd_convolve4_8_2d_v(s1234, y_filter, shift, offset, max); + uint16x8_t d2 = + highbd_convolve4_8_2d_v(s2345, y_filter, shift, offset, max); + uint16x8_t d3 = + highbd_convolve4_8_2d_v(s3456, y_filter, shift, offset, max); + + store_u16_8x4(d, dst_stride, d0, d1, d2, d3); + + // Shuffle everything up four rows. + s0 = s4; + s1 = s5; + s2 = s6; + + s += 4 * src_stride; + d += 4 * dst_stride; + h -= 4; + } while (h != 0); + src += 8; + dst += 8; + width -= 8; + } while (width != 0); + } +} + +void av1_highbd_convolve_2d_sr_sve2(const uint16_t *src, int src_stride, + uint16_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, + const int subpel_x_qn, + const int subpel_y_qn, + ConvolveParams *conv_params, int bd) { + if (w == 2 || h == 2) { + av1_highbd_convolve_2d_sr_c(src, src_stride, dst, dst_stride, w, h, + filter_params_x, filter_params_y, subpel_x_qn, + subpel_y_qn, conv_params, bd); + return; + } + + DECLARE_ALIGNED(16, uint16_t, + im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); + const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn); + const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn); + + if (x_filter_taps == 6 || y_filter_taps == 6) { + av1_highbd_convolve_2d_sr_neon(src, src_stride, dst, dst_stride, w, h, + filter_params_x, filter_params_y, + subpel_x_qn, subpel_y_qn, conv_params, bd); + return; + } + + const int clamped_x_taps = x_filter_taps < 4 ? 4 : x_filter_taps; + const int clamped_y_taps = y_filter_taps < 4 ? 4 : y_filter_taps; + + const int im_stride = MAX_SB_SIZE; + const int vert_offset = clamped_y_taps / 2 - 1; + const int horiz_offset = clamped_x_taps / 2 - 1; + const int x_offset = (1 << (bd + FILTER_BITS - 1)); + const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; + // The extra shim of (1 << (conv_params->round_1 - 1)) allows us to do a + // simple shift left instead of a rounding saturating shift left. + const int y_offset = + (1 << (conv_params->round_1 - 1)) - (1 << (y_offset_bits - 1)); + + const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset; + + const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_x, subpel_x_qn & SUBPEL_MASK); + const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel( + filter_params_y, subpel_y_qn & SUBPEL_MASK); + const int im_h = h + clamped_y_taps - 1; + + if (x_filter_taps > 8) { + highbd_convolve_2d_sr_horiz_12tap_sve2(src_ptr, src_stride, im_block, + im_stride, w, im_h, x_filter_ptr, + conv_params, x_offset); + + highbd_convolve_2d_sr_vert_12tap_sve2(im_block, im_stride, dst, dst_stride, + w, h, y_filter_ptr, conv_params, bd, + y_offset); + return; + } + + if (x_filter_taps <= 4) { + highbd_convolve_2d_sr_horiz_4tap_sve2(src_ptr, src_stride, im_block, + im_stride, w, im_h, x_filter_ptr, + conv_params, x_offset); + } else { + highbd_convolve_2d_sr_horiz_8tap_sve2(src_ptr, src_stride, im_block, + im_stride, w, im_h, x_filter_ptr, + conv_params, x_offset); + } + + if (y_filter_taps <= 4) { + highbd_convolve_2d_sr_vert_4tap_sve2(im_block, im_stride, dst, dst_stride, + w, h, y_filter_ptr, conv_params, bd, + y_offset); + } else { + highbd_convolve_2d_sr_vert_8tap_sve2(im_block, im_stride, dst, dst_stride, + w, h, y_filter_ptr, conv_params, bd, + y_offset); + } +} diff --git a/third_party/aom/av1/common/arm/highbd_convolve_sve2.h b/third_party/aom/av1/common/arm/highbd_convolve_sve2.h new file mode 100644 index 0000000000..05e23deef4 --- /dev/null +++ b/third_party/aom/av1/common/arm/highbd_convolve_sve2.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2023, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#ifndef AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_ +#define AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_ + +#include + +#include "aom_dsp/arm/aom_neon_sve2_bridge.h" + +// clang-format off +DECLARE_ALIGNED(16, static const uint16_t, kDotProdMergeBlockTbl[24]) = { + // Shift left and insert new last column in transposed 4x4 block. + 1, 2, 3, 0, 5, 6, 7, 4, + // Shift left and insert two new columns in transposed 4x4 block. + 2, 3, 0, 1, 6, 7, 4, 5, + // Shift left and insert three new columns in transposed 4x4 block. + 3, 0, 1, 2, 7, 4, 5, 6, +}; +// clang-format on + +static INLINE void transpose_concat_4x4(int16x4_t s0, int16x4_t s1, + int16x4_t s2, int16x4_t s3, + int16x8_t res[2]) { + // Transpose 16-bit elements and concatenate result rows as follows: + // s0: 00, 01, 02, 03 + // s1: 10, 11, 12, 13 + // s2: 20, 21, 22, 23 + // s3: 30, 31, 32, 33 + // + // res[0]: 00 10 20 30 01 11 21 31 + // res[1]: 02 12 22 32 03 13 23 33 + + int16x8_t s0q = vcombine_s16(s0, vdup_n_s16(0)); + int16x8_t s1q = vcombine_s16(s1, vdup_n_s16(0)); + int16x8_t s2q = vcombine_s16(s2, vdup_n_s16(0)); + int16x8_t s3q = vcombine_s16(s3, vdup_n_s16(0)); + + int32x4_t s01 = vreinterpretq_s32_s16(vzip1q_s16(s0q, s1q)); + int32x4_t s23 = vreinterpretq_s32_s16(vzip1q_s16(s2q, s3q)); + + int32x4x2_t s0123 = vzipq_s32(s01, s23); + + res[0] = vreinterpretq_s16_s32(s0123.val[0]); + res[1] = vreinterpretq_s16_s32(s0123.val[1]); +} + +static INLINE void transpose_concat_8x4(int16x8_t s0, int16x8_t s1, + int16x8_t s2, int16x8_t s3, + int16x8_t res[4]) { + // Transpose 16-bit elements and concatenate result rows as follows: + // s0: 00, 01, 02, 03, 04, 05, 06, 07 + // s1: 10, 11, 12, 13, 14, 15, 16, 17 + // s2: 20, 21, 22, 23, 24, 25, 26, 27 + // s3: 30, 31, 32, 33, 34, 35, 36, 37 + // + // res[0]: 00 10 20 30 01 11 21 31 + // res[1]: 02 12 22 32 03 13 23 33 + // res[2]: 04 14 24 34 05 15 25 35 + // res[3]: 06 16 26 36 07 17 27 37 + + int16x8x2_t tr01_16 = vzipq_s16(s0, s1); + int16x8x2_t tr23_16 = vzipq_s16(s2, s3); + int32x4x2_t tr01_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[0]), + vreinterpretq_s32_s16(tr23_16.val[0])); + int32x4x2_t tr23_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[1]), + vreinterpretq_s32_s16(tr23_16.val[1])); + + res[0] = vreinterpretq_s16_s32(tr01_32.val[0]); + res[1] = vreinterpretq_s16_s32(tr01_32.val[1]); + res[2] = vreinterpretq_s16_s32(tr23_32.val[0]); + res[3] = vreinterpretq_s16_s32(tr23_32.val[1]); +} + +static INLINE void aom_tbl2x4_s16(int16x8_t t0[4], int16x8_t t1[4], + uint16x8_t tbl, int16x8_t res[4]) { + res[0] = aom_tbl2_s16(t0[0], t1[0], tbl); + res[1] = aom_tbl2_s16(t0[1], t1[1], tbl); + res[2] = aom_tbl2_s16(t0[2], t1[2], tbl); + res[3] = aom_tbl2_s16(t0[3], t1[3], tbl); +} + +static INLINE void aom_tbl2x2_s16(int16x8_t t0[2], int16x8_t t1[2], + uint16x8_t tbl, int16x8_t res[2]) { + res[0] = aom_tbl2_s16(t0[0], t1[0], tbl); + res[1] = aom_tbl2_s16(t0[1], t1[1], tbl); +} + +#endif // AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_ diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c index c6f1e3ad92..89647bc921 100644 --- a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c +++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c @@ -23,8 +23,8 @@ #include "config/av1_rtcd.h" #include "highbd_warp_plane_neon.h" -static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, - int sx, int alpha) { +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha) { int16x8_t f[4]; load_filters_4(f, sx, alpha); @@ -57,8 +57,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, return vcombine_s16(vmovn_s32(res), vdup_n_s16(0)); } -static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, - int sx, int alpha) { +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha) { int16x8_t f[8]; load_filters_8(f, sx, alpha); @@ -111,8 +111,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1)); } -static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, - int sx) { +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx) { int16x8_t f = load_filters_1(sx); int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), @@ -144,8 +144,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, return vcombine_s16(vmovn_s32(res), vdup_n_s16(0)); } -static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, - int sx) { +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx) { int16x8_t f = load_filters_1(sx); int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), @@ -197,7 +197,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1)); } -static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) { +static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, + int sy) { const int16x8_t f = load_filters_1(sy); const int16x4_t f0123 = vget_low_s16(f); const int16x4_t f4567 = vget_high_s16(f); @@ -213,7 +214,8 @@ static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) { return m0123; } -static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) { +static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, + int sy) { const int16x8_t f = load_filters_1(sy); const int16x4_t f0123 = vget_low_s16(f); const int16x4_t f4567 = vget_high_s16(f); @@ -238,8 +240,8 @@ static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) { return (int32x4x2_t){ { m0123, m4567 } }; } -static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy, - int gamma) { +static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, + int sy, int gamma) { int16x8_t s0, s1, s2, s3; transpose_elems_s16_4x8( vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]), @@ -262,8 +264,8 @@ static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy, return horizontal_add_4d_s32x4(m0123); } -static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy, - int gamma) { +static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, + int sy, int gamma) { int16x8_t s0 = tmp[0]; int16x8_t s1 = tmp[1]; int16x8_t s2 = tmp[2]; diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h index 3b8982898e..48af4a707b 100644 --- a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h +++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h @@ -23,29 +23,31 @@ #include "av1/common/warped_motion.h" #include "config/av1_rtcd.h" -static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, - int sx, int alpha); +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha); -static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, - int sx, int alpha); +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha); -static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, - int sx); +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx); -static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, - int sx); +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx); -static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy); +static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, + int sy); -static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy); +static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, + int sy); -static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy, - int gamma); +static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, + int sy, int gamma); -static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy, - int gamma); +static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, + int sy, int gamma); -static INLINE int16x8_t load_filters_1(int ofs) { +static AOM_FORCE_INLINE int16x8_t load_filters_1(int ofs) { const int ofs0 = ROUND_POWER_OF_TWO(ofs, WARPEDDIFF_PREC_BITS); const int16_t *base = @@ -53,7 +55,8 @@ static INLINE int16x8_t load_filters_1(int ofs) { return vld1q_s16(base + ofs0 * 8); } -static INLINE void load_filters_4(int16x8_t out[], int ofs, int stride) { +static AOM_FORCE_INLINE void load_filters_4(int16x8_t out[], int ofs, + int stride) { const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS); const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS); const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS); @@ -67,7 +70,8 @@ static INLINE void load_filters_4(int16x8_t out[], int ofs, int stride) { out[3] = vld1q_s16(base + ofs3 * 8); } -static INLINE void load_filters_8(int16x8_t out[], int ofs, int stride) { +static AOM_FORCE_INLINE void load_filters_8(int16x8_t out[], int ofs, + int stride) { const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS); const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS); const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS); @@ -89,16 +93,18 @@ static INLINE void load_filters_8(int16x8_t out[], int ofs, int stride) { out[7] = vld1q_s16(base + ofs7 * 8); } -static INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val, int bd) { +static AOM_FORCE_INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val, + int bd) { const int limit = (1 << bd) - 1; return vqmovun_s32(vminq_s32(val, vdupq_n_s32(limit))); } -static INLINE void warp_affine_horizontal(const uint16_t *ref, int width, - int height, int stride, int p_width, - int16_t alpha, int16_t beta, int iy4, - int sx4, int ix4, int16x8_t tmp[], - int bd) { +static AOM_FORCE_INLINE void warp_affine_horizontal(const uint16_t *ref, + int width, int height, + int stride, int p_width, + int16_t alpha, int16_t beta, + int iy4, int sx4, int ix4, + int16x8_t tmp[], int bd) { const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS; if (ix4 <= -7) { @@ -197,7 +203,7 @@ static INLINE void warp_affine_horizontal(const uint16_t *ref, int width, } } -static INLINE void highbd_vertical_filter_4x1_f4( +static AOM_FORCE_INLINE void highbd_vertical_filter_4x1_f4( uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride, bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd, int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) { @@ -253,7 +259,7 @@ static INLINE void highbd_vertical_filter_4x1_f4( vst1_u16(dst16, res0); } -static INLINE void highbd_vertical_filter_8x1_f8( +static AOM_FORCE_INLINE void highbd_vertical_filter_8x1_f8( uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride, bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd, int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) { @@ -328,7 +334,7 @@ static INLINE void highbd_vertical_filter_8x1_f8( vst1_u16(dst16 + 4, res1); } -static INLINE void warp_affine_vertical( +static AOM_FORCE_INLINE void warp_affine_vertical( uint16_t *pred, int p_width, int p_height, int p_stride, int bd, uint16_t *dst, int dst_stride, bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd, int bwd, int16_t gamma, int16_t delta, @@ -354,7 +360,7 @@ static INLINE void warp_affine_vertical( } } -static INLINE void highbd_warp_affine_common( +static AOM_FORCE_INLINE void highbd_warp_affine_common( const int32_t *mat, const uint16_t *ref, int width, int height, int stride, uint16_t *pred, int p_col, int p_row, int p_width, int p_height, int p_stride, int subsampling_x, int subsampling_y, int bd, diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c b/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c index 7a14f21846..87e033fd00 100644 --- a/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c +++ b/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c @@ -15,7 +15,7 @@ #include #include "aom_dsp/aom_dsp_common.h" -#include "aom_dsp/arm/dot_sve.h" +#include "aom_dsp/arm/aom_neon_sve_bridge.h" #include "aom_dsp/arm/mem_neon.h" #include "aom_dsp/arm/transpose_neon.h" #include "aom_ports/mem.h" @@ -24,8 +24,8 @@ #include "config/av1_rtcd.h" #include "highbd_warp_plane_neon.h" -static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, - int sx, int alpha) { +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha) { int16x8_t f[4]; load_filters_4(f, sx, alpha); @@ -55,8 +55,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, return vcombine_s16(vmovn_s32(res), vdup_n_s16(0)); } -static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, - int sx, int alpha) { +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha) { int16x8_t f[8]; load_filters_8(f, sx, alpha); @@ -103,8 +103,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1)); } -static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, - int sx) { +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx) { int16x8_t f = load_filters_1(sx); int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), @@ -133,8 +133,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, return vcombine_s16(vmovn_s32(res), vdup_n_s16(0)); } -static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, - int sx) { +static AOM_FORCE_INLINE int16x8_t +highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx) { int16x8_t f = load_filters_1(sx); int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]), @@ -180,7 +180,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1)); } -static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) { +static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, + int sy) { const int16x8_t f = load_filters_1(sy); const int16x4_t f0123 = vget_low_s16(f); const int16x4_t f4567 = vget_high_s16(f); @@ -197,7 +198,8 @@ static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) { return m0123; } -static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) { +static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, + int sy) { const int16x8_t f = load_filters_1(sy); const int16x4_t f0123 = vget_low_s16(f); const int16x4_t f4567 = vget_high_s16(f); @@ -223,8 +225,8 @@ static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) { return (int32x4x2_t){ { m0123, m4567 } }; } -static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy, - int gamma) { +static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, + int sy, int gamma) { int16x8_t s0, s1, s2, s3; transpose_elems_s16_4x8( vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]), @@ -244,8 +246,8 @@ static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy, return vcombine_s32(vmovn_s64(m01), vmovn_s64(m23)); } -static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy, - int gamma) { +static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, + int sy, int gamma) { int16x8_t s0 = tmp[0]; int16x8_t s1 = tmp[1]; int16x8_t s2 = tmp[2]; diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.c b/third_party/aom/av1/common/arm/warp_plane_neon.c index 4723154398..546aa2965b 100644 --- a/third_party/aom/av1/common/arm/warp_plane_neon.c +++ b/third_party/aom/av1/common/arm/warp_plane_neon.c @@ -11,8 +11,8 @@ #include "warp_plane_neon.h" -static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, - int alpha) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, + int sx, int alpha) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); // Loading the 8 filter taps @@ -39,8 +39,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, return vreinterpretq_s16_u16(res); } -static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, - int alpha) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, + int sx, int alpha) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); // Loading the 8 filter taps @@ -75,7 +75,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, return vreinterpretq_s16_u16(res); } -static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, + int sx) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); int16x8_t f_s16 = @@ -101,7 +102,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) { return vreinterpretq_s16_u16(res); } -static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, + int sx) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); int16x8_t f_s16 = @@ -135,8 +137,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) { return vreinterpretq_s16_u16(res); } -static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, - int sy) { +static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src, + int32x4_t *res, int sy) { int16x4_t s0 = vget_low_s16(src[0]); int16x4_t s1 = vget_low_s16(src[1]); int16x4_t s2 = vget_low_s16(src[2]); @@ -161,8 +163,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, *res = m0123; } -static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, - int sy, int gamma) { +static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src, + int32x4_t *res, int sy, + int gamma) { int16x8_t s0, s1, s2, s3; transpose_elems_s16_4x8( vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]), @@ -186,9 +189,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, *res = horizontal_add_4d_s32x4(m0123_pairs); } -static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, - int32x4_t *res_low, - int32x4_t *res_high, int sy) { +static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, + int sy) { int16x8_t s0 = src[0]; int16x8_t s1 = src[1]; int16x8_t s2 = src[2]; @@ -223,10 +227,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, *res_high = m4567; } -static INLINE void vertical_filter_8x1_f8(const int16x8_t *src, - int32x4_t *res_low, - int32x4_t *res_high, int sy, - int gamma) { +static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy, + int gamma) { int16x8_t s0 = src[0]; int16x8_t s1 = src[1]; int16x8_t s2 = src[2]; diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.h b/third_party/aom/av1/common/arm/warp_plane_neon.h index 5afd72f4ab..eece007ef3 100644 --- a/third_party/aom/av1/common/arm/warp_plane_neon.h +++ b/third_party/aom/av1/common/arm/warp_plane_neon.h @@ -24,32 +24,37 @@ #include "av1/common/warped_motion.h" #include "av1/common/scale.h" -static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, - int alpha); +static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, + int sx, int alpha); -static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, - int alpha); +static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, + int sx, int alpha); -static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx); +static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, + int sx); -static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx); +static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, + int sx); -static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, - int sy); +static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src, + int32x4_t *res, int sy); -static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, - int sy, int gamma); +static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src, + int32x4_t *res, int sy, + int gamma); -static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, - int32x4_t *res_low, - int32x4_t *res_high, int sy); +static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, + int sy); -static INLINE void vertical_filter_8x1_f8(const int16x8_t *src, - int32x4_t *res_low, - int32x4_t *res_high, int sy, - int gamma); +static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy, + int gamma); -static INLINE void load_filters_4(int16x8_t out[], int offset, int stride) { +static AOM_FORCE_INLINE void load_filters_4(int16x8_t out[], int offset, + int stride) { out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >> WARPEDDIFF_PREC_BITS))); out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >> @@ -60,7 +65,8 @@ static INLINE void load_filters_4(int16x8_t out[], int offset, int stride) { WARPEDDIFF_PREC_BITS))); } -static INLINE void load_filters_8(int16x8_t out[], int offset, int stride) { +static AOM_FORCE_INLINE void load_filters_8(int16x8_t out[], int offset, + int stride) { out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >> WARPEDDIFF_PREC_BITS))); out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >> @@ -79,16 +85,14 @@ static INLINE void load_filters_8(int16x8_t out[], int offset, int stride) { WARPEDDIFF_PREC_BITS))); } -static INLINE int clamp_iy(int iy, int height) { +static AOM_FORCE_INLINE int clamp_iy(int iy, int height) { return clamp(iy, 0, height - 1); } -static INLINE void warp_affine_horizontal(const uint8_t *ref, int width, - int height, int stride, int p_width, - int p_height, int16_t alpha, - int16_t beta, const int64_t x4, - const int64_t y4, const int i, - int16x8_t tmp[]) { +static AOM_FORCE_INLINE void warp_affine_horizontal( + const uint8_t *ref, int width, int height, int stride, int p_width, + int p_height, int16_t alpha, int16_t beta, const int64_t x4, + const int64_t y4, const int i, int16x8_t tmp[]) { const int bd = 8; const int reduce_bits_horiz = ROUND0_BITS; const int height_limit = AOMMIN(8, p_height - i) + 7; @@ -197,7 +201,7 @@ static INLINE void warp_affine_horizontal(const uint8_t *ref, int width, } } -static INLINE void warp_affine_vertical( +static AOM_FORCE_INLINE void warp_affine_vertical( uint8_t *pred, int p_width, int p_height, int p_stride, int is_compound, uint16_t *dst, int dst_stride, int do_average, int use_dist_wtd_comp_avg, int16_t gamma, int16_t delta, const int64_t y4, const int i, const int j, @@ -325,7 +329,7 @@ static INLINE void warp_affine_vertical( } } -static INLINE void av1_warp_affine_common( +static AOM_FORCE_INLINE void av1_warp_affine_common( const int32_t *mat, const uint8_t *ref, int width, int height, int stride, uint8_t *pred, int p_col, int p_row, int p_width, int p_height, int p_stride, int subsampling_x, int subsampling_y, diff --git a/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c index 39e3ad99f4..22a1be17b5 100644 --- a/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c +++ b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c @@ -17,8 +17,8 @@ DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 }; -static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, - int alpha) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, + int sx, int alpha) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); // Loading the 8 filter taps @@ -45,8 +45,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, return vreinterpretq_s16_u16(res); } -static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, - int alpha) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, + int sx, int alpha) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); // Loading the 8 filter taps @@ -83,7 +83,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, return vreinterpretq_s16_u16(res); } -static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, + int sx) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); int16x8_t f_s16 = @@ -112,7 +113,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) { return vreinterpretq_s16_u16(res); } -static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, + int sx) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); int16x8_t f_s16 = @@ -149,8 +151,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) { return vreinterpretq_s16_u16(res); } -static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, - int sy) { +static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src, + int32x4_t *res, int sy) { int16x4_t s0 = vget_low_s16(src[0]); int16x4_t s1 = vget_low_s16(src[1]); int16x4_t s2 = vget_low_s16(src[2]); @@ -175,8 +177,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, *res = m0123; } -static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, - int sy, int gamma) { +static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src, + int32x4_t *res, int sy, + int gamma) { int16x8_t s0, s1, s2, s3; transpose_elems_s16_4x8( vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]), @@ -200,9 +203,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, *res = horizontal_add_4d_s32x4(m0123_pairs); } -static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, - int32x4_t *res_low, - int32x4_t *res_high, int sy) { +static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, + int sy) { int16x8_t s0 = src[0]; int16x8_t s1 = src[1]; int16x8_t s2 = src[2]; @@ -237,10 +241,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, *res_high = m4567; } -static INLINE void vertical_filter_8x1_f8(const int16x8_t *src, - int32x4_t *res_low, - int32x4_t *res_high, int sy, - int gamma) { +static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy, + int gamma) { int16x8_t s0 = src[0]; int16x8_t s1 = src[1]; int16x8_t s2 = src[2]; diff --git a/third_party/aom/av1/common/arm/warp_plane_sve.c b/third_party/aom/av1/common/arm/warp_plane_sve.c index 8a4bf5747b..c70b066174 100644 --- a/third_party/aom/av1/common/arm/warp_plane_sve.c +++ b/third_party/aom/av1/common/arm/warp_plane_sve.c @@ -11,7 +11,7 @@ #include -#include "aom_dsp/arm/dot_sve.h" +#include "aom_dsp/arm/aom_neon_sve_bridge.h" #include "warp_plane_neon.h" DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = { @@ -20,8 +20,8 @@ DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 }; -static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, - int alpha) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, + int sx, int alpha) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); // Loading the 8 filter taps @@ -48,8 +48,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx, return vreinterpretq_s16_u16(res); } -static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, - int alpha) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, + int sx, int alpha) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); // Loading the 8 filter taps @@ -86,7 +86,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx, return vreinterpretq_s16_u16(res); } -static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, + int sx) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); int16x8_t f_s16 = @@ -115,7 +116,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) { return vreinterpretq_s16_u16(res); } -static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) { +static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, + int sx) { const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1)); int16x8_t f_s16 = @@ -152,8 +154,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) { return vreinterpretq_s16_u16(res); } -static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, - int sy) { +static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src, + int32x4_t *res, int sy) { int16x4_t s0 = vget_low_s16(src[0]); int16x4_t s1 = vget_low_s16(src[1]); int16x4_t s2 = vget_low_s16(src[2]); @@ -178,8 +180,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res, *res = m0123; } -static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, - int sy, int gamma) { +static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src, + int32x4_t *res, int sy, + int gamma) { int16x8_t s0, s1, s2, s3; transpose_elems_s16_4x8( vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]), @@ -200,9 +203,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res, *res = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23)); } -static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, - int32x4_t *res_low, - int32x4_t *res_high, int sy) { +static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, + int sy) { int16x8_t s0 = src[0]; int16x8_t s1 = src[1]; int16x8_t s2 = src[2]; @@ -237,10 +241,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src, *res_high = m4567; } -static INLINE void vertical_filter_8x1_f8(const int16x8_t *src, - int32x4_t *res_low, - int32x4_t *res_high, int sy, - int gamma) { +static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src, + int32x4_t *res_low, + int32x4_t *res_high, int sy, + int gamma) { int16x8_t s0 = src[0]; int16x8_t s1 = src[1]; int16x8_t s2 = src[2]; diff --git a/third_party/aom/av1/common/av1_common_int.h b/third_party/aom/av1/common/av1_common_int.h index 4c0cb99d2b..4e14c4a8be 100644 --- a/third_party/aom/av1/common/av1_common_int.h +++ b/third_party/aom/av1/common/av1_common_int.h @@ -17,7 +17,7 @@ #include "aom/internal/aom_codec_internal.h" #include "aom_dsp/flow_estimation/corner_detect.h" -#include "aom_util/aom_thread.h" +#include "aom_util/aom_pthread.h" #include "av1/common/alloccommon.h" #include "av1/common/av1_loopfilter.h" #include "av1/common/entropy.h" diff --git a/third_party/aom/av1/common/av1_rtcd_defs.pl b/third_party/aom/av1/common/av1_rtcd_defs.pl index ef999fbba2..c0831330d1 100644 --- a/third_party/aom/av1/common/av1_rtcd_defs.pl +++ b/third_party/aom/av1/common/av1_rtcd_defs.pl @@ -77,6 +77,16 @@ EOF } forward_decls qw/av1_common_forward_decls/; +# Fallbacks for Valgrind support +# For normal use, we require SSE4.1. However, 32-bit Valgrind does not support +# SSE4.1, so we include fallbacks for some critical functions to improve +# performance +$sse2_x86 = $ssse3_x86 = ''; +if ($opts{arch} eq "x86") { + $sse2_x86 = 'sse2'; + $ssse3_x86 = 'ssse3'; +} + # functions that are 64 bit only. $mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = ''; if ($opts{arch} eq "x86_64") { @@ -345,7 +355,7 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") { #fwd txfm add_proto qw/void av1_lowbd_fwd_txfm/, "const int16_t *src_diff, tran_low_t *coeff, int diff_stride, TxfmParam *txfm_param"; - specialize qw/av1_lowbd_fwd_txfm sse2 sse4_1 avx2 neon/; + specialize qw/av1_lowbd_fwd_txfm sse4_1 avx2 neon/, $sse2_x86; add_proto qw/void av1_fwd_txfm2d_4x8/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd"; specialize qw/av1_fwd_txfm2d_4x8 sse4_1 neon/; @@ -436,9 +446,9 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") { specialize qw/av1_txb_init_levels sse4_1 avx2 neon/; add_proto qw/uint64_t av1_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N"; - specialize qw/av1_wedge_sse_from_residuals sse2 avx2 neon/; + specialize qw/av1_wedge_sse_from_residuals sse2 avx2 neon sve/; add_proto qw/int8_t av1_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit"; - specialize qw/av1_wedge_sign_from_residuals sse2 avx2 neon/; + specialize qw/av1_wedge_sign_from_residuals sse2 avx2 neon sve/; add_proto qw/void av1_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N"; specialize qw/av1_wedge_compute_delta_squares sse2 avx2 neon/; @@ -521,21 +531,21 @@ add_proto qw/void cdef_copy_rect8_16bit_to_16bit/, "uint16_t *dst, int dstride, # structs as arguments, which makes the v256 type of the intrinsics # hard to support, so optimizations for this target are disabled. if ($opts{config} !~ /libs-x86-win32-vs.*/) { - specialize qw/cdef_find_dir sse2 ssse3 sse4_1 avx2 neon/; - specialize qw/cdef_find_dir_dual sse2 ssse3 sse4_1 avx2 neon/; + specialize qw/cdef_find_dir sse4_1 avx2 neon/, "$ssse3_x86"; + specialize qw/cdef_find_dir_dual sse4_1 avx2 neon/, "$ssse3_x86"; - specialize qw/cdef_filter_8_0 sse2 ssse3 sse4_1 avx2 neon/; - specialize qw/cdef_filter_8_1 sse2 ssse3 sse4_1 avx2 neon/; - specialize qw/cdef_filter_8_2 sse2 ssse3 sse4_1 avx2 neon/; - specialize qw/cdef_filter_8_3 sse2 ssse3 sse4_1 avx2 neon/; + specialize qw/cdef_filter_8_0 sse4_1 avx2 neon/, "$ssse3_x86"; + specialize qw/cdef_filter_8_1 sse4_1 avx2 neon/, "$ssse3_x86"; + specialize qw/cdef_filter_8_2 sse4_1 avx2 neon/, "$ssse3_x86"; + specialize qw/cdef_filter_8_3 sse4_1 avx2 neon/, "$ssse3_x86"; - specialize qw/cdef_filter_16_0 sse2 ssse3 sse4_1 avx2 neon/; - specialize qw/cdef_filter_16_1 sse2 ssse3 sse4_1 avx2 neon/; - specialize qw/cdef_filter_16_2 sse2 ssse3 sse4_1 avx2 neon/; - specialize qw/cdef_filter_16_3 sse2 ssse3 sse4_1 avx2 neon/; + specialize qw/cdef_filter_16_0 sse4_1 avx2 neon/, "$ssse3_x86"; + specialize qw/cdef_filter_16_1 sse4_1 avx2 neon/, "$ssse3_x86"; + specialize qw/cdef_filter_16_2 sse4_1 avx2 neon/, "$ssse3_x86"; + specialize qw/cdef_filter_16_3 sse4_1 avx2 neon/, "$ssse3_x86"; - specialize qw/cdef_copy_rect8_8bit_to_16bit sse2 ssse3 sse4_1 avx2 neon/; - specialize qw/cdef_copy_rect8_16bit_to_16bit sse2 ssse3 sse4_1 avx2 neon/; + specialize qw/cdef_copy_rect8_8bit_to_16bit sse4_1 avx2 neon/, "$ssse3_x86"; + specialize qw/cdef_copy_rect8_16bit_to_16bit sse4_1 avx2 neon/, "$ssse3_x86"; } # WARPED_MOTION / GLOBAL_MOTION functions @@ -591,20 +601,20 @@ if(aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") { specialize qw/av1_convolve_y_sr sse2 avx2 neon/; specialize qw/av1_convolve_y_sr_intrabc neon/; specialize qw/av1_convolve_2d_scale sse4_1/; - specialize qw/av1_dist_wtd_convolve_2d sse2 ssse3 avx2 neon neon_dotprod neon_i8mm/; + specialize qw/av1_dist_wtd_convolve_2d ssse3 avx2 neon neon_dotprod neon_i8mm/; specialize qw/av1_dist_wtd_convolve_2d_copy sse2 avx2 neon/; specialize qw/av1_dist_wtd_convolve_x sse2 avx2 neon neon_dotprod neon_i8mm/; specialize qw/av1_dist_wtd_convolve_y sse2 avx2 neon/; if(aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") { - specialize qw/av1_highbd_dist_wtd_convolve_2d sse4_1 avx2 neon/; - specialize qw/av1_highbd_dist_wtd_convolve_x sse4_1 avx2 neon/; - specialize qw/av1_highbd_dist_wtd_convolve_y sse4_1 avx2 neon/; + specialize qw/av1_highbd_dist_wtd_convolve_2d sse4_1 avx2 neon sve2/; + specialize qw/av1_highbd_dist_wtd_convolve_x sse4_1 avx2 neon sve2/; + specialize qw/av1_highbd_dist_wtd_convolve_y sse4_1 avx2 neon sve2/; specialize qw/av1_highbd_dist_wtd_convolve_2d_copy sse4_1 avx2 neon/; - specialize qw/av1_highbd_convolve_2d_sr ssse3 avx2 neon/; + specialize qw/av1_highbd_convolve_2d_sr ssse3 avx2 neon sve2/; specialize qw/av1_highbd_convolve_2d_sr_intrabc neon/; - specialize qw/av1_highbd_convolve_x_sr ssse3 avx2 neon/; + specialize qw/av1_highbd_convolve_x_sr ssse3 avx2 neon sve2/; specialize qw/av1_highbd_convolve_x_sr_intrabc neon/; - specialize qw/av1_highbd_convolve_y_sr ssse3 avx2 neon/; + specialize qw/av1_highbd_convolve_y_sr ssse3 avx2 neon sve2/; specialize qw/av1_highbd_convolve_y_sr_intrabc neon/; specialize qw/av1_highbd_convolve_2d_scale sse4_1 neon/; } diff --git a/third_party/aom/av1/common/cdef.c b/third_party/aom/av1/common/cdef.c index 12e9545441..5cec940a8e 100644 --- a/third_party/aom/av1/common/cdef.c +++ b/third_party/aom/av1/common/cdef.c @@ -10,15 +10,19 @@ */ #include -#include +#include #include #include "config/aom_scale_rtcd.h" #include "aom/aom_integer.h" +#include "aom_util/aom_pthread.h" #include "av1/common/av1_common_int.h" #include "av1/common/cdef.h" #include "av1/common/cdef_block.h" +#include "av1/common/common.h" +#include "av1/common/common_data.h" +#include "av1/common/enums.h" #include "av1/common/reconinter.h" #include "av1/common/thread_common.h" @@ -92,7 +96,7 @@ void av1_cdef_copy_sb8_16_lowbd(uint16_t *const dst, int dstride, const uint8_t *src, int src_voffset, int src_hoffset, int sstride, int vsize, int hsize) { - const uint8_t *base = &src[src_voffset * sstride + src_hoffset]; + const uint8_t *base = &src[src_voffset * (ptrdiff_t)sstride + src_hoffset]; cdef_copy_rect8_8bit_to_16bit(dst, dstride, base, sstride, hsize, vsize); } @@ -101,7 +105,7 @@ void av1_cdef_copy_sb8_16_highbd(uint16_t *const dst, int dstride, int src_hoffset, int sstride, int vsize, int hsize) { const uint16_t *base = - &CONVERT_TO_SHORTPTR(src)[src_voffset * sstride + src_hoffset]; + &CONVERT_TO_SHORTPTR(src)[src_voffset * (ptrdiff_t)sstride + src_hoffset]; cdef_copy_rect8_16bit_to_16bit(dst, dstride, base, sstride, hsize, vsize); } @@ -247,7 +251,8 @@ static void cdef_prepare_fb(const AV1_COMMON *const cm, CdefBlockInfo *fb_info, static INLINE void cdef_filter_fb(CdefBlockInfo *const fb_info, int plane, uint8_t use_highbitdepth) { - int offset = fb_info->dst_stride * fb_info->roffset + fb_info->coffset; + ptrdiff_t offset = + (ptrdiff_t)fb_info->dst_stride * fb_info->roffset + fb_info->coffset; if (use_highbitdepth) { av1_cdef_filter_fb( NULL, CONVERT_TO_SHORTPTR(fb_info->dst + offset), fb_info->dst_stride, diff --git a/third_party/aom/av1/common/entropymode.h b/third_party/aom/av1/common/entropymode.h index 09cd6bd1e9..028bd21ae3 100644 --- a/third_party/aom/av1/common/entropymode.h +++ b/third_party/aom/av1/common/entropymode.h @@ -12,6 +12,7 @@ #ifndef AOM_AV1_COMMON_ENTROPYMODE_H_ #define AOM_AV1_COMMON_ENTROPYMODE_H_ +#include "aom_ports/bitops.h" #include "av1/common/entropy.h" #include "av1/common/entropymv.h" #include "av1/common/filter.h" @@ -192,13 +193,7 @@ void av1_setup_past_independence(struct AV1Common *cm); // Returns (int)ceil(log2(n)). static INLINE int av1_ceil_log2(int n) { if (n < 2) return 0; - int i = 1; - unsigned int p = 2; - while (p < (unsigned int)n) { - i++; - p = p << 1; - } - return i; + return get_msb(n - 1) + 1; } // Returns the context for palette color index at row 'r' and column 'c', diff --git a/third_party/aom/av1/common/quant_common.c b/third_party/aom/av1/common/quant_common.c index b0976287ef..58eb113370 100644 --- a/third_party/aom/av1/common/quant_common.c +++ b/third_party/aom/av1/common/quant_common.c @@ -9,10 +9,15 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ +#include "config/aom_config.h" + +#include "aom/aom_frame_buffer.h" +#include "aom_scale/yv12config.h" #include "av1/common/av1_common_int.h" #include "av1/common/blockd.h" #include "av1/common/common.h" #include "av1/common/entropy.h" +#include "av1/common/filter.h" #include "av1/common/quant_common.h" #include "av1/common/seg_common.h" @@ -274,13 +279,16 @@ const qm_val_t *av1_get_qmatrix(const CommonQuantParams *quant_params, : quant_params->gqmatrix[NUM_QM_LEVELS - 1][0][qm_tx_size]; } +#if CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER #define QM_TOTAL_SIZE 3344 // We only use wt_matrix_ref[q] and iwt_matrix_ref[q] // for q = 0, ..., NUM_QM_LEVELS - 2. static const qm_val_t wt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE]; static const qm_val_t iwt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE]; +#endif void av1_qm_init(CommonQuantParams *quant_params, int num_planes) { +#if CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER for (int q = 0; q < NUM_QM_LEVELS; ++q) { for (int c = 0; c < num_planes; ++c) { int current = 0; @@ -306,6 +314,10 @@ void av1_qm_init(CommonQuantParams *quant_params, int num_planes) { } } } +#else + (void)quant_params; + (void)num_planes; +#endif // CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER } /* Provide 15 sets of quantization matrices for chroma and luma @@ -320,6 +332,8 @@ void av1_qm_init(CommonQuantParams *quant_params, int num_planes) { distances. Matrices for QM level 15 are omitted because they are not used. */ + +#if CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER static const qm_val_t iwt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE] = { { { /* Luma */ @@ -12873,4 +12887,6 @@ static const qm_val_t wt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE] = { 33, 33, 32, 32, 32, 32, 34, 33, 33, 33, 32, 32, 32, 32, 34, 33, 33, 33, 32, 32, 32, 32 }, }, -}; \ No newline at end of file +}; + +#endif // CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER diff --git a/third_party/aom/av1/common/reconintra.c b/third_party/aom/av1/common/reconintra.c index f68af18cb1..497863e117 100644 --- a/third_party/aom/av1/common/reconintra.c +++ b/third_party/aom/av1/common/reconintra.c @@ -1196,7 +1196,8 @@ static void build_directional_and_filter_intra_predictors( const int need_right = p_angle < 90; const int need_bottom = p_angle > 180; if (p_angle != 90 && p_angle != 180) { - const int ab_le = need_above_left ? 1 : 0; + assert(need_above_left); + const int ab_le = 1; if (need_above && need_left && (txwpx + txhpx >= 24)) { filter_intra_edge_corner(above_row, left_col); } @@ -1500,7 +1501,8 @@ static void highbd_build_directional_and_filter_intra_predictors( const int need_right = p_angle < 90; const int need_bottom = p_angle > 180; if (p_angle != 90 && p_angle != 180) { - const int ab_le = need_above_left ? 1 : 0; + assert(need_above_left); + const int ab_le = 1; if (need_above && need_left && (txwpx + txhpx >= 24)) { highbd_filter_intra_edge_corner(above_row, left_col); } diff --git a/third_party/aom/av1/common/resize.c b/third_party/aom/av1/common/resize.c index 1b348836a5..441323ab1f 100644 --- a/third_party/aom/av1/common/resize.c +++ b/third_party/aom/av1/common/resize.c @@ -524,7 +524,7 @@ static void fill_arr_to_col(uint8_t *img, int stride, int len, uint8_t *arr) { } } -bool av1_resize_plane(const uint8_t *const input, int height, int width, +bool av1_resize_plane(const uint8_t *input, int height, int width, int in_stride, uint8_t *output, int height2, int width2, int out_stride) { int i; @@ -881,7 +881,7 @@ static void highbd_fill_arr_to_col(uint16_t *img, int stride, int len, } } -void av1_highbd_resize_plane(const uint8_t *const input, int height, int width, +void av1_highbd_resize_plane(const uint8_t *input, int height, int width, int in_stride, uint8_t *output, int height2, int width2, int out_stride, int bd) { int i; @@ -980,10 +980,9 @@ static bool highbd_upscale_normative_rect(const uint8_t *const input, } #endif // CONFIG_AV1_HIGHBITDEPTH -void av1_resize_frame420(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, int height, int width, uint8_t *oy, - int oy_stride, uint8_t *ou, uint8_t *ov, +void av1_resize_frame420(const uint8_t *y, int y_stride, const uint8_t *u, + const uint8_t *v, int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, int owidth) { if (!av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride)) @@ -996,10 +995,9 @@ void av1_resize_frame420(const uint8_t *const y, int y_stride, abort(); } -bool av1_resize_frame422(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, int height, int width, uint8_t *oy, - int oy_stride, uint8_t *ou, uint8_t *ov, +bool av1_resize_frame422(const uint8_t *y, int y_stride, const uint8_t *u, + const uint8_t *v, int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, int owidth) { if (!av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride)) @@ -1013,10 +1011,9 @@ bool av1_resize_frame422(const uint8_t *const y, int y_stride, return true; } -bool av1_resize_frame444(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, int height, int width, uint8_t *oy, - int oy_stride, uint8_t *ou, uint8_t *ov, +bool av1_resize_frame444(const uint8_t *y, int y_stride, const uint8_t *u, + const uint8_t *v, int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, int owidth) { if (!av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride)) @@ -1031,8 +1028,8 @@ bool av1_resize_frame444(const uint8_t *const y, int y_stride, } #if CONFIG_AV1_HIGHBITDEPTH -void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, +void av1_highbd_resize_frame420(const uint8_t *y, int y_stride, + const uint8_t *u, const uint8_t *v, int uv_stride, int height, int width, uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, @@ -1045,8 +1042,8 @@ void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride, owidth / 2, ouv_stride, bd); } -void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, +void av1_highbd_resize_frame422(const uint8_t *y, int y_stride, + const uint8_t *u, const uint8_t *v, int uv_stride, int height, int width, uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, @@ -1059,8 +1056,8 @@ void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride, owidth / 2, ouv_stride, bd); } -void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, +void av1_highbd_resize_frame444(const uint8_t *y, int y_stride, + const uint8_t *u, const uint8_t *v, int uv_stride, int height, int width, uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, @@ -1126,7 +1123,7 @@ void av1_resize_and_extend_frame_c(const YV12_BUFFER_CONFIG *src, bool av1_resize_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int bd, - const int num_planes) { + int num_planes) { // TODO(dkovalev): replace YV12_BUFFER_CONFIG with aom_image_t // We use AOMMIN(num_planes, MAX_MB_PLANE) instead of num_planes to quiet @@ -1246,8 +1243,7 @@ void av1_upscale_normative_and_extend_frame(const AV1_COMMON *cm, YV12_BUFFER_CONFIG *av1_realloc_and_scale_if_required( AV1_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled, const InterpFilter filter, const int phase, const bool use_optimized_scaler, - const bool for_psnr, const int border_in_pixels, - const int num_pyramid_levels) { + const bool for_psnr, const int border_in_pixels, const bool alloc_pyramid) { // If scaling is performed for the sole purpose of calculating PSNR, then our // target dimensions are superres upscaled width/height. Otherwise our target // dimensions are coded width/height. @@ -1267,7 +1263,7 @@ YV12_BUFFER_CONFIG *av1_realloc_and_scale_if_required( scaled, scaled_width, scaled_height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, border_in_pixels, cm->features.byte_alignment, NULL, NULL, NULL, - num_pyramid_levels, 0)) + alloc_pyramid, 0)) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate scaled buffer"); @@ -1363,7 +1359,7 @@ static void copy_buffer_config(const YV12_BUFFER_CONFIG *const src, // TODO(afergs): aom_ vs av1_ functions? Which can I use? // Upscale decoded image. void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool, - int num_pyramid_levels) { + bool alloc_pyramid) { const int num_planes = av1_num_planes(cm); if (!av1_superres_scaled(cm)) return; const SequenceHeader *const seq_params = cm->seq_params; @@ -1378,7 +1374,7 @@ void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool, if (aom_alloc_frame_buffer( ©_buffer, aligned_width, cm->height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, - AOM_BORDER_IN_PIXELS, byte_alignment, 0, 0)) + AOM_BORDER_IN_PIXELS, byte_alignment, false, 0)) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate copy buffer for superres upscaling"); @@ -1411,7 +1407,7 @@ void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool, cm->superres_upscaled_height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS, byte_alignment, fb, cb, cb_priv, - num_pyramid_levels, 0)) { + alloc_pyramid, 0)) { unlock_buffer_pool(pool); aom_internal_error( cm->error, AOM_CODEC_MEM_ERROR, @@ -1428,7 +1424,7 @@ void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool, frame_to_show, cm->superres_upscaled_width, cm->superres_upscaled_height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, - AOM_BORDER_IN_PIXELS, byte_alignment, num_pyramid_levels, 0)) + AOM_BORDER_IN_PIXELS, byte_alignment, alloc_pyramid, 0)) aom_internal_error( cm->error, AOM_CODEC_MEM_ERROR, "Failed to reallocate current frame buffer for superres upscaling"); diff --git a/third_party/aom/av1/common/resize.h b/third_party/aom/av1/common/resize.h index 0ba3108f72..d573a538bf 100644 --- a/third_party/aom/av1/common/resize.h +++ b/third_party/aom/av1/common/resize.h @@ -20,44 +20,41 @@ extern "C" { #endif -bool av1_resize_plane(const uint8_t *const input, int height, int width, +bool av1_resize_plane(const uint8_t *input, int height, int width, int in_stride, uint8_t *output, int height2, int width2, int out_stride); // TODO(aomedia:3228): In libaom 4.0.0, remove av1_resize_frame420 from // av1/exports_com and delete this function. -void av1_resize_frame420(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, int height, int width, uint8_t *oy, - int oy_stride, uint8_t *ou, uint8_t *ov, +void av1_resize_frame420(const uint8_t *y, int y_stride, const uint8_t *u, + const uint8_t *v, int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, int owidth); -bool av1_resize_frame422(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, int height, int width, uint8_t *oy, - int oy_stride, uint8_t *ou, uint8_t *ov, +bool av1_resize_frame422(const uint8_t *y, int y_stride, const uint8_t *u, + const uint8_t *v, int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, int owidth); -bool av1_resize_frame444(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, int height, int width, uint8_t *oy, - int oy_stride, uint8_t *ou, uint8_t *ov, +bool av1_resize_frame444(const uint8_t *y, int y_stride, const uint8_t *u, + const uint8_t *v, int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, int owidth); -void av1_highbd_resize_plane(const uint8_t *const input, int height, int width, +void av1_highbd_resize_plane(const uint8_t *input, int height, int width, int in_stride, uint8_t *output, int height2, int width2, int out_stride, int bd); -void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, +void av1_highbd_resize_frame420(const uint8_t *y, int y_stride, + const uint8_t *u, const uint8_t *v, int uv_stride, int height, int width, uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, int owidth, int bd); -void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, +void av1_highbd_resize_frame422(const uint8_t *y, int y_stride, + const uint8_t *u, const uint8_t *v, int uv_stride, int height, int width, uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, int owidth, int bd); -void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, +void av1_highbd_resize_frame444(const uint8_t *y, int y_stride, + const uint8_t *u, const uint8_t *v, int uv_stride, int height, int width, uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov, int ouv_stride, int oheight, @@ -73,12 +70,11 @@ void av1_upscale_normative_and_extend_frame(const AV1_COMMON *cm, YV12_BUFFER_CONFIG *av1_realloc_and_scale_if_required( AV1_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled, const InterpFilter filter, const int phase, const bool use_optimized_scaler, - const bool for_psnr, const int border_in_pixels, - const int num_pyramid_levels); + const bool for_psnr, const int border_in_pixels, const bool alloc_pyramid); bool av1_resize_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int bd, - const int num_planes); + int num_planes); // Calculates the scaled dimensions from the given original dimensions and the // resize scale denominator. @@ -95,7 +91,7 @@ void av1_calculate_scaled_superres_size(int *width, int *height, void av1_calculate_unscaled_superres_size(int *width, int *height, int denom); void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool, - int num_pyramid_levels); + bool alloc_pyramid); // Returns 1 if a superres upscaled frame is scaled and 0 otherwise. static INLINE int av1_superres_scaled(const AV1_COMMON *cm) { diff --git a/third_party/aom/av1/common/restoration.c b/third_party/aom/av1/common/restoration.c index 0be126fa65..335fdc8c2a 100644 --- a/third_party/aom/av1/common/restoration.c +++ b/third_party/aom/av1/common/restoration.c @@ -11,20 +11,24 @@ */ #include +#include #include "config/aom_config.h" -#include "config/aom_dsp_rtcd.h" #include "config/aom_scale_rtcd.h" +#include "aom/internal/aom_codec_internal.h" #include "aom_mem/aom_mem.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_mem/aom_mem.h" +#include "aom_ports/mem.h" +#include "aom_util/aom_pthread.h" + #include "av1/common/av1_common_int.h" +#include "av1/common/convolve.h" +#include "av1/common/enums.h" #include "av1/common/resize.h" #include "av1/common/restoration.h" #include "av1/common/thread_common.h" -#include "aom_dsp/aom_dsp_common.h" -#include "aom_mem/aom_mem.h" - -#include "aom_ports/mem.h" // The 's' values are calculated based on original 'r' and 'e' values in the // spec using GenSgrprojVtable(). @@ -115,8 +119,9 @@ void av1_loop_restoration_precal(void) { #endif } -static void extend_frame_lowbd(uint8_t *data, int width, int height, int stride, - int border_horz, int border_vert) { +static void extend_frame_lowbd(uint8_t *data, int width, int height, + ptrdiff_t stride, int border_horz, + int border_vert) { uint8_t *data_p; int i; for (i = 0; i < height; ++i) { @@ -136,7 +141,8 @@ static void extend_frame_lowbd(uint8_t *data, int width, int height, int stride, #if CONFIG_AV1_HIGHBITDEPTH static void extend_frame_highbd(uint16_t *data, int width, int height, - int stride, int border_horz, int border_vert) { + ptrdiff_t stride, int border_horz, + int border_vert) { uint16_t *data_p; int i, j; for (i = 0; i < height; ++i) { @@ -988,8 +994,10 @@ void av1_loop_restoration_filter_unit( int unit_h = limits->v_end - limits->v_start; int unit_w = limits->h_end - limits->h_start; - uint8_t *data8_tl = data8 + limits->v_start * stride + limits->h_start; - uint8_t *dst8_tl = dst8 + limits->v_start * dst_stride + limits->h_start; + uint8_t *data8_tl = + data8 + limits->v_start * (ptrdiff_t)stride + limits->h_start; + uint8_t *dst8_tl = + dst8 + limits->v_start * (ptrdiff_t)dst_stride + limits->h_start; if (unit_rtype == RESTORE_NONE) { copy_rest_unit(unit_w, unit_h, data8_tl, stride, dst8_tl, dst_stride, @@ -1074,7 +1082,8 @@ void av1_loop_restoration_filter_frame_init(AV1LrStruct *lr_ctxt, if (aom_realloc_frame_buffer( lr_ctxt->dst, frame_width, frame_height, seq_params->subsampling_x, seq_params->subsampling_y, highbd, AOM_RESTORATION_FRAME_BORDER, - cm->features.byte_alignment, NULL, NULL, NULL, 0, 0) != AOM_CODEC_OK) + cm->features.byte_alignment, NULL, NULL, NULL, false, + 0) != AOM_CODEC_OK) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate restoration dst buffer"); @@ -1349,7 +1358,7 @@ static void save_deblock_boundary_lines( const int is_uv = plane > 0; const uint8_t *src_buf = REAL_PTR(use_highbd, frame->buffers[plane]); const int src_stride = frame->strides[is_uv] << use_highbd; - const uint8_t *src_rows = src_buf + row * src_stride; + const uint8_t *src_rows = src_buf + row * (ptrdiff_t)src_stride; uint8_t *bdry_buf = is_above ? boundaries->stripe_boundary_above : boundaries->stripe_boundary_below; @@ -1404,7 +1413,7 @@ static void save_cdef_boundary_lines(const YV12_BUFFER_CONFIG *frame, const int is_uv = plane > 0; const uint8_t *src_buf = REAL_PTR(use_highbd, frame->buffers[plane]); const int src_stride = frame->strides[is_uv] << use_highbd; - const uint8_t *src_rows = src_buf + row * src_stride; + const uint8_t *src_rows = src_buf + row * (ptrdiff_t)src_stride; uint8_t *bdry_buf = is_above ? boundaries->stripe_boundary_above : boundaries->stripe_boundary_below; diff --git a/third_party/aom/av1/common/thread_common.c b/third_party/aom/av1/common/thread_common.c index 45695147ff..8a137cc9f7 100644 --- a/third_party/aom/av1/common/thread_common.c +++ b/third_party/aom/av1/common/thread_common.c @@ -14,12 +14,19 @@ #include "config/aom_scale_rtcd.h" #include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/txfm_common.h" #include "aom_mem/aom_mem.h" +#include "aom_util/aom_pthread.h" +#include "aom_util/aom_thread.h" #include "av1/common/av1_loopfilter.h" +#include "av1/common/blockd.h" +#include "av1/common/cdef.h" #include "av1/common/entropymode.h" +#include "av1/common/enums.h" #include "av1/common/thread_common.h" #include "av1/common/reconinter.h" #include "av1/common/reconintra.h" +#include "av1/common/restoration.h" // Set up nsync by width. static INLINE int get_sync_range(int width) { diff --git a/third_party/aom/av1/common/thread_common.h b/third_party/aom/av1/common/thread_common.h index 675687dc98..7e681f322b 100644 --- a/third_party/aom/av1/common/thread_common.h +++ b/third_party/aom/av1/common/thread_common.h @@ -16,6 +16,7 @@ #include "av1/common/av1_loopfilter.h" #include "av1/common/cdef.h" +#include "aom_util/aom_pthread.h" #include "aom_util/aom_thread.h" #ifdef __cplusplus diff --git a/third_party/aom/av1/common/tile_common.c b/third_party/aom/av1/common/tile_common.c index b964f259b8..45a189d69a 100644 --- a/third_party/aom/av1/common/tile_common.c +++ b/third_party/aom/av1/common/tile_common.c @@ -177,46 +177,16 @@ int av1_get_sb_cols_in_tile(const AV1_COMMON *cm, const TileInfo *tile) { cm->seq_params->mib_size_log2); } -PixelRect av1_get_tile_rect(const TileInfo *tile_info, const AV1_COMMON *cm, - int is_uv) { - PixelRect r; - - // Calculate position in the Y plane - r.left = tile_info->mi_col_start * MI_SIZE; - r.right = tile_info->mi_col_end * MI_SIZE; - r.top = tile_info->mi_row_start * MI_SIZE; - r.bottom = tile_info->mi_row_end * MI_SIZE; - - // If upscaling is enabled, the tile limits need scaling to match the - // upscaled frame where the restoration units live. To do this, scale up the - // top-left and bottom-right of the tile. - if (av1_superres_scaled(cm)) { - av1_calculate_unscaled_superres_size(&r.left, &r.top, - cm->superres_scale_denominator); - av1_calculate_unscaled_superres_size(&r.right, &r.bottom, - cm->superres_scale_denominator); - } - - const int frame_w = cm->superres_upscaled_width; - const int frame_h = cm->superres_upscaled_height; - - // Make sure we don't fall off the bottom-right of the frame. - r.right = AOMMIN(r.right, frame_w); - r.bottom = AOMMIN(r.bottom, frame_h); - - // Convert to coordinates in the appropriate plane - const int ss_x = is_uv && cm->seq_params->subsampling_x; - const int ss_y = is_uv && cm->seq_params->subsampling_y; - - r.left = ROUND_POWER_OF_TWO(r.left, ss_x); - r.right = ROUND_POWER_OF_TWO(r.right, ss_x); - r.top = ROUND_POWER_OF_TWO(r.top, ss_y); - r.bottom = ROUND_POWER_OF_TWO(r.bottom, ss_y); - - return r; -} - -void av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) { +// Section 7.3.1 of the AV1 spec says, on pages 200-201: +// It is a requirement of bitstream conformance that the following conditions +// are met: +// ... +// * TileHeight is equal to (use_128x128_superblock ? 128 : 64) for all +// tiles (i.e. the tile is exactly one superblock high) +// * TileWidth is identical for all tiles and is an integer multiple of +// TileHeight (i.e. the tile is an integer number of superblocks wide) +// ... +bool av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) { const CommonTileParams *const tiles = &cm->tiles; if (tiles->uniform_spacing) { *w = tiles->width; @@ -226,7 +196,10 @@ void av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) { const int tile_width_sb = tiles->col_start_sb[i + 1] - tiles->col_start_sb[i]; const int tile_w = tile_width_sb * cm->seq_params->mib_size; - assert(i == 0 || tile_w == *w); // ensure all tiles have same dimension + // ensure all tiles have same dimension + if (i != 0 && tile_w != *w) { + return false; + } *w = tile_w; } @@ -234,10 +207,14 @@ void av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) { const int tile_height_sb = tiles->row_start_sb[i + 1] - tiles->row_start_sb[i]; const int tile_h = tile_height_sb * cm->seq_params->mib_size; - assert(i == 0 || tile_h == *h); // ensure all tiles have same dimension + // ensure all tiles have same dimension + if (i != 0 && tile_h != *h) { + return false; + } *h = tile_h; } } + return true; } int av1_is_min_tile_width_satisfied(const AV1_COMMON *cm) { diff --git a/third_party/aom/av1/common/tile_common.h b/third_party/aom/av1/common/tile_common.h index 5383ae940b..12228c9e94 100644 --- a/third_party/aom/av1/common/tile_common.h +++ b/third_party/aom/av1/common/tile_common.h @@ -12,13 +12,14 @@ #ifndef AOM_AV1_COMMON_TILE_COMMON_H_ #define AOM_AV1_COMMON_TILE_COMMON_H_ +#include + +#include "config/aom_config.h" + #ifdef __cplusplus extern "C" { #endif -#include "config/aom_config.h" -#include "aom_dsp/rect.h" - struct AV1Common; struct SequenceHeader; struct CommonTileParams; @@ -43,10 +44,6 @@ void av1_tile_set_col(TileInfo *tile, const struct AV1Common *cm, int col); int av1_get_sb_rows_in_tile(const struct AV1Common *cm, const TileInfo *tile); int av1_get_sb_cols_in_tile(const struct AV1Common *cm, const TileInfo *tile); -// Return the pixel extents of the given tile -PixelRect av1_get_tile_rect(const TileInfo *tile_info, - const struct AV1Common *cm, int is_uv); - // Define tile maximum width and area // There is no maximum height since height is limited by area and width limits // The minimum tile width or height is fixed at one superblock @@ -56,7 +53,9 @@ PixelRect av1_get_tile_rect(const TileInfo *tile_info, #define MAX_TILE_AREA_LEVEL_7_AND_ABOVE (4096 * 4608) #endif -void av1_get_uniform_tile_size(const struct AV1Common *cm, int *w, int *h); +// Gets the width and height (in units of MI_SIZE) of the tiles in a tile list. +// Returns true on success, false on failure. +bool av1_get_uniform_tile_size(const struct AV1Common *cm, int *w, int *h); void av1_get_tile_limits(struct AV1Common *const cm); void av1_calculate_tile_cols(const struct SequenceHeader *const seq_params, int cm_mi_rows, int cm_mi_cols, diff --git a/third_party/aom/av1/common/x86/cdef_block_sse2.c b/third_party/aom/av1/common/x86/cdef_block_sse2.c deleted file mode 100644 index 5ab7ffa2ff..0000000000 --- a/third_party/aom/av1/common/x86/cdef_block_sse2.c +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2016, Alliance for Open Media. All rights reserved - * - * This source code is subject to the terms of the BSD 2 Clause License and - * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License - * was not distributed with this source code in the LICENSE file, you can - * obtain it at www.aomedia.org/license/software. If the Alliance for Open - * Media Patent License 1.0 was not distributed with this source code in the - * PATENTS file, you can obtain it at www.aomedia.org/license/patent. - */ - -#include "aom_dsp/aom_simd.h" -#define SIMD_FUNC(name) name##_sse2 -#include "av1/common/cdef_block_simd.h" - -void cdef_find_dir_dual_sse2(const uint16_t *img1, const uint16_t *img2, - int stride, int32_t *var_out_1st, - int32_t *var_out_2nd, int coeff_shift, - int *out_dir_1st_8x8, int *out_dir_2nd_8x8) { - // Process first 8x8. - *out_dir_1st_8x8 = cdef_find_dir(img1, stride, var_out_1st, coeff_shift); - - // Process second 8x8. - *out_dir_2nd_8x8 = cdef_find_dir(img2, stride, var_out_2nd, coeff_shift); -} - -void cdef_copy_rect8_8bit_to_16bit_sse2(uint16_t *dst, int dstride, - const uint8_t *src, int sstride, - int width, int height) { - int j = 0; - for (int i = 0; i < height; i++) { - for (j = 0; j < (width & ~0x7); j += 8) { - v64 row = v64_load_unaligned(&src[i * sstride + j]); - v128_store_unaligned(&dst[i * dstride + j], v128_unpack_u8_s16(row)); - } - for (; j < width; j++) { - dst[i * dstride + j] = src[i * sstride + j]; - } - } -} diff --git a/third_party/aom/av1/common/x86/cdef_block_ssse3.c b/third_party/aom/av1/common/x86/cdef_block_ssse3.c index 0fb36eb6e0..14eb6c9e31 100644 --- a/third_party/aom/av1/common/x86/cdef_block_ssse3.c +++ b/third_party/aom/av1/common/x86/cdef_block_ssse3.c @@ -9,6 +9,17 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ +// Include SSSE3 CDEF code only for 32-bit x86, to support Valgrind. +// For normal use, we require SSE4.1, so cdef_*_sse4_1 will be used instead of +// these functions. However, 32-bit Valgrind does not support SSE4.1, so we +// include a fallback to SSSE3 to improve performance + +#include "config/aom_config.h" + +#if !AOM_ARCH_X86 +#error "cdef_block_ssse3.c is included for compatibility with 32-bit x86 only" +#endif // !AOM_ARCH_X86 + #include "aom_dsp/aom_simd.h" #define SIMD_FUNC(name) name##_ssse3 #include "av1/common/cdef_block_simd.h" diff --git a/third_party/aom/av1/common/x86/convolve_2d_avx2.c b/third_party/aom/av1/common/x86/convolve_2d_avx2.c index 1b39a0a8d5..d4c1169cc3 100644 --- a/third_party/aom/av1/common/x86/convolve_2d_avx2.c +++ b/third_party/aom/av1/common/x86/convolve_2d_avx2.c @@ -21,13 +21,11 @@ #include "av1/common/convolve.h" -void av1_convolve_2d_sr_general_avx2(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, int w, int h, - const InterpFilterParams *filter_params_x, - const InterpFilterParams *filter_params_y, - const int subpel_x_qn, - const int subpel_y_qn, - ConvolveParams *conv_params) { +static void convolve_2d_sr_general_avx2( + const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, const int subpel_x_qn, + const int subpel_y_qn, ConvolveParams *conv_params) { if (filter_params_x->taps > 8) { const int bd = 8; int im_stride = 8, i; @@ -150,9 +148,9 @@ void av1_convolve_2d_sr_avx2( const bool use_general = (tap_x == 12 || tap_y == 12); if (use_general) { - av1_convolve_2d_sr_general_avx2(src, src_stride, dst, dst_stride, w, h, - filter_params_x, filter_params_y, - subpel_x_q4, subpel_y_q4, conv_params); + convolve_2d_sr_general_avx2(src, src_stride, dst, dst_stride, w, h, + filter_params_x, filter_params_y, subpel_x_q4, + subpel_y_q4, conv_params); } else { av1_convolve_2d_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h, filter_params_x, filter_params_y, diff --git a/third_party/aom/av1/common/x86/convolve_2d_sse2.c b/third_party/aom/av1/common/x86/convolve_2d_sse2.c index 1b85f37294..68971eacc1 100644 --- a/third_party/aom/av1/common/x86/convolve_2d_sse2.c +++ b/third_party/aom/av1/common/x86/convolve_2d_sse2.c @@ -19,12 +19,11 @@ #include "aom_dsp/x86/convolve_common_intrin.h" #include "av1/common/convolve.h" -void av1_convolve_2d_sr_12tap_sse2(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, int w, int h, - const InterpFilterParams *filter_params_x, - const InterpFilterParams *filter_params_y, - const int subpel_x_qn, const int subpel_y_qn, - ConvolveParams *conv_params) { +static void convolve_2d_sr_12tap_sse2( + const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, + int h, const InterpFilterParams *filter_params_x, + const InterpFilterParams *filter_params_y, const int subpel_x_qn, + const int subpel_y_qn, ConvolveParams *conv_params) { const int bd = 8; DECLARE_ALIGNED(16, int16_t, @@ -231,9 +230,9 @@ void av1_convolve_2d_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst, filter_params_x, filter_params_y, subpel_x_qn, subpel_y_qn, conv_params); } else { - av1_convolve_2d_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h, - filter_params_x, filter_params_y, - subpel_x_qn, subpel_y_qn, conv_params); + convolve_2d_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h, + filter_params_x, filter_params_y, subpel_x_qn, + subpel_y_qn, conv_params); } } else { const int bd = 8; diff --git a/third_party/aom/av1/common/x86/convolve_sse2.c b/third_party/aom/av1/common/x86/convolve_sse2.c index 012e75c1ae..6383567a48 100644 --- a/third_party/aom/av1/common/x86/convolve_sse2.c +++ b/third_party/aom/av1/common/x86/convolve_sse2.c @@ -75,10 +75,10 @@ static INLINE __m128i convolve_hi_y(const __m128i *const s, return convolve(ss, coeffs); } -void av1_convolve_y_sr_12tap_sse2(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, int w, int h, - const InterpFilterParams *filter_params_y, - int subpel_y_qn) { +static void convolve_y_sr_12tap_sse2(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_y, + int subpel_y_qn) { const int fo_vert = filter_params_y->taps / 2 - 1; const uint8_t *src_ptr = src - fo_vert * src_stride; const __m128i round_const = _mm_set1_epi32((1 << FILTER_BITS) >> 1); @@ -185,8 +185,8 @@ void av1_convolve_y_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst, av1_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h, filter_params_y, subpel_y_qn); } else { - av1_convolve_y_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h, - filter_params_y, subpel_y_qn); + convolve_y_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h, + filter_params_y, subpel_y_qn); } } else { const int fo_vert = filter_params_y->taps / 2 - 1; @@ -337,11 +337,11 @@ void av1_convolve_y_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst, } } -void av1_convolve_x_sr_12tap_sse2(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, int w, int h, - const InterpFilterParams *filter_params_x, - int subpel_x_qn, - ConvolveParams *conv_params) { +static void convolve_x_sr_12tap_sse2(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, int w, int h, + const InterpFilterParams *filter_params_x, + int subpel_x_qn, + ConvolveParams *conv_params) { const int fo_horiz = filter_params_x->taps / 2 - 1; const uint8_t *src_ptr = src - fo_horiz; const int bits = FILTER_BITS - conv_params->round_0; @@ -402,8 +402,8 @@ void av1_convolve_x_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst, av1_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h, filter_params_x, subpel_x_qn, conv_params); } else { - av1_convolve_x_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h, - filter_params_x, subpel_x_qn, conv_params); + convolve_x_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h, + filter_params_x, subpel_x_qn, conv_params); } } else { const int fo_horiz = filter_params_x->taps / 2 - 1; diff --git a/third_party/aom/av1/common/x86/jnt_convolve_sse2.c b/third_party/aom/av1/common/x86/jnt_convolve_sse2.c index 8c5d9918fb..d5d2db7455 100644 --- a/third_party/aom/av1/common/x86/jnt_convolve_sse2.c +++ b/third_party/aom/av1/common/x86/jnt_convolve_sse2.c @@ -375,232 +375,3 @@ void av1_dist_wtd_convolve_y_sse2(const uint8_t *src, int src_stride, } while (j < w); } } - -void av1_dist_wtd_convolve_2d_sse2(const uint8_t *src, int src_stride, - uint8_t *dst0, int dst_stride0, int w, int h, - const InterpFilterParams *filter_params_x, - const InterpFilterParams *filter_params_y, - const int subpel_x_qn, const int subpel_y_qn, - ConvolveParams *conv_params) { - CONV_BUF_TYPE *dst = conv_params->dst; - int dst_stride = conv_params->dst_stride; - const int bd = 8; - - DECLARE_ALIGNED(16, int16_t, - im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]); - int im_h = h + filter_params_y->taps - 1; - int im_stride = MAX_SB_SIZE; - int i, j; - const int fo_vert = filter_params_y->taps / 2 - 1; - const int fo_horiz = filter_params_x->taps / 2 - 1; - const int do_average = conv_params->do_average; - const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg; - const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz; - - const __m128i zero = _mm_setzero_si128(); - - const int w0 = conv_params->fwd_offset; - const int w1 = conv_params->bck_offset; - const __m128i wt0 = _mm_set1_epi16(w0); - const __m128i wt1 = _mm_set1_epi16(w1); - const __m128i wt = _mm_unpacklo_epi16(wt0, wt1); - - const int offset_0 = - bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; - const int offset = (1 << offset_0) + (1 << (offset_0 - 1)); - const __m128i offset_const = _mm_set1_epi16(offset); - const int rounding_shift = - 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; - const __m128i rounding_const = _mm_set1_epi16((1 << rounding_shift) >> 1); - - /* Horizontal filter */ - { - const int16_t *x_filter = av1_get_interp_filter_subpel_kernel( - filter_params_x, subpel_x_qn & SUBPEL_MASK); - const __m128i coeffs_x = _mm_loadu_si128((__m128i *)x_filter); - - // coeffs 0 1 0 1 2 3 2 3 - const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_x, coeffs_x); - // coeffs 4 5 4 5 6 7 6 7 - const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_x, coeffs_x); - - // coeffs 0 1 0 1 0 1 0 1 - const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0); - // coeffs 2 3 2 3 2 3 2 3 - const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0); - // coeffs 4 5 4 5 4 5 4 5 - const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1); - // coeffs 6 7 6 7 6 7 6 7 - const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1); - - const __m128i round_const = _mm_set1_epi32( - ((1 << conv_params->round_0) >> 1) + (1 << (bd + FILTER_BITS - 1))); - const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_0); - - for (i = 0; i < im_h; ++i) { - for (j = 0; j < w; j += 8) { - __m128i temp_lo, temp_hi; - const __m128i data = - _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j]); - - const __m128i src_lo = _mm_unpacklo_epi8(data, zero); - const __m128i src_hi = _mm_unpackhi_epi8(data, zero); - - // Filter even-index pixels - const __m128i res_0 = _mm_madd_epi16(src_lo, coeff_01); - temp_lo = _mm_srli_si128(src_lo, 4); - temp_hi = _mm_slli_si128(src_hi, 12); - const __m128i src_2 = _mm_or_si128(temp_hi, temp_lo); - const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23); - temp_lo = _mm_srli_si128(src_lo, 8); - temp_hi = _mm_slli_si128(src_hi, 8); - const __m128i src_4 = _mm_or_si128(temp_hi, temp_lo); - const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45); - temp_lo = _mm_srli_si128(src_lo, 12); - temp_hi = _mm_slli_si128(src_hi, 4); - const __m128i src_6 = _mm_or_si128(temp_hi, temp_lo); - const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67); - - __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4), - _mm_add_epi32(res_2, res_6)); - res_even = - _mm_sra_epi32(_mm_add_epi32(res_even, round_const), round_shift); - - // Filter odd-index pixels - temp_lo = _mm_srli_si128(src_lo, 2); - temp_hi = _mm_slli_si128(src_hi, 14); - const __m128i src_1 = _mm_or_si128(temp_hi, temp_lo); - const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01); - temp_lo = _mm_srli_si128(src_lo, 6); - temp_hi = _mm_slli_si128(src_hi, 10); - const __m128i src_3 = _mm_or_si128(temp_hi, temp_lo); - const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23); - temp_lo = _mm_srli_si128(src_lo, 10); - temp_hi = _mm_slli_si128(src_hi, 6); - const __m128i src_5 = _mm_or_si128(temp_hi, temp_lo); - const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45); - temp_lo = _mm_srli_si128(src_lo, 14); - temp_hi = _mm_slli_si128(src_hi, 2); - const __m128i src_7 = _mm_or_si128(temp_hi, temp_lo); - const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67); - - __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5), - _mm_add_epi32(res_3, res_7)); - res_odd = - _mm_sra_epi32(_mm_add_epi32(res_odd, round_const), round_shift); - - // Pack in the column order 0, 2, 4, 6, 1, 3, 5, 7 - __m128i res = _mm_packs_epi32(res_even, res_odd); - _mm_store_si128((__m128i *)&im_block[i * im_stride + j], res); - } - } - } - - /* Vertical filter */ - { - const int16_t *y_filter = av1_get_interp_filter_subpel_kernel( - filter_params_y, subpel_y_qn & SUBPEL_MASK); - const __m128i coeffs_y = _mm_loadu_si128((__m128i *)y_filter); - - // coeffs 0 1 0 1 2 3 2 3 - const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_y, coeffs_y); - // coeffs 4 5 4 5 6 7 6 7 - const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_y, coeffs_y); - - // coeffs 0 1 0 1 0 1 0 1 - const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0); - // coeffs 2 3 2 3 2 3 2 3 - const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0); - // coeffs 4 5 4 5 4 5 4 5 - const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1); - // coeffs 6 7 6 7 6 7 6 7 - const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1); - - const __m128i round_const = _mm_set1_epi32( - ((1 << conv_params->round_1) >> 1) - - (1 << (bd + 2 * FILTER_BITS - conv_params->round_0 - 1))); - const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1); - - for (i = 0; i < h; ++i) { - for (j = 0; j < w; j += 8) { - // Filter even-index pixels - const int16_t *data = &im_block[i * im_stride + j]; - const __m128i src_0 = - _mm_unpacklo_epi16(*(__m128i *)(data + 0 * im_stride), - *(__m128i *)(data + 1 * im_stride)); - const __m128i src_2 = - _mm_unpacklo_epi16(*(__m128i *)(data + 2 * im_stride), - *(__m128i *)(data + 3 * im_stride)); - const __m128i src_4 = - _mm_unpacklo_epi16(*(__m128i *)(data + 4 * im_stride), - *(__m128i *)(data + 5 * im_stride)); - const __m128i src_6 = - _mm_unpacklo_epi16(*(__m128i *)(data + 6 * im_stride), - *(__m128i *)(data + 7 * im_stride)); - - const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01); - const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23); - const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45); - const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67); - - const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2), - _mm_add_epi32(res_4, res_6)); - - // Filter odd-index pixels - const __m128i src_1 = - _mm_unpackhi_epi16(*(__m128i *)(data + 0 * im_stride), - *(__m128i *)(data + 1 * im_stride)); - const __m128i src_3 = - _mm_unpackhi_epi16(*(__m128i *)(data + 2 * im_stride), - *(__m128i *)(data + 3 * im_stride)); - const __m128i src_5 = - _mm_unpackhi_epi16(*(__m128i *)(data + 4 * im_stride), - *(__m128i *)(data + 5 * im_stride)); - const __m128i src_7 = - _mm_unpackhi_epi16(*(__m128i *)(data + 6 * im_stride), - *(__m128i *)(data + 7 * im_stride)); - - const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01); - const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23); - const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45); - const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67); - - const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3), - _mm_add_epi32(res_5, res_7)); - - // Rearrange pixels back into the order 0 ... 7 - const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd); - const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd); - - const __m128i res_lo_round = - _mm_sra_epi32(_mm_add_epi32(res_lo, round_const), round_shift); - const __m128i res_hi_round = - _mm_sra_epi32(_mm_add_epi32(res_hi, round_const), round_shift); - - const __m128i res_16b = _mm_packs_epi32(res_lo_round, res_hi_round); - const __m128i res_unsigned = _mm_add_epi16(res_16b, offset_const); - - // Accumulate values into the destination buffer - if (do_average) { - const __m128i data_ref_0 = - _mm_loadu_si128((__m128i *)(&dst[i * dst_stride + j])); - - const __m128i comp_avg_res = - comp_avg(&data_ref_0, &res_unsigned, &wt, use_dist_wtd_comp_avg); - - const __m128i round_result = convolve_rounding( - &comp_avg_res, &offset_const, &rounding_const, rounding_shift); - - const __m128i res_8 = _mm_packus_epi16(round_result, round_result); - - if (w > 4) - _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_8); - else - *(int *)(&dst0[i * dst_stride0 + j]) = _mm_cvtsi128_si32(res_8); - } else { - _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_unsigned); - } - } - } - } -} diff --git a/third_party/aom/av1/decoder/decodeframe.c b/third_party/aom/av1/decoder/decodeframe.c index bb09347e1c..c027308ff3 100644 --- a/third_party/aom/av1/decoder/decodeframe.c +++ b/third_party/aom/av1/decoder/decodeframe.c @@ -14,20 +14,23 @@ #include #include "config/aom_config.h" -#include "config/aom_dsp_rtcd.h" #include "config/aom_scale_rtcd.h" -#include "config/av1_rtcd.h" #include "aom/aom_codec.h" +#include "aom/aom_image.h" +#include "aom/internal/aom_codec_internal.h" #include "aom_dsp/aom_dsp_common.h" #include "aom_dsp/binary_codes_reader.h" #include "aom_dsp/bitreader.h" #include "aom_dsp/bitreader_buffer.h" +#include "aom_dsp/txfm_common.h" #include "aom_mem/aom_mem.h" #include "aom_ports/aom_timer.h" #include "aom_ports/mem.h" #include "aom_ports/mem_ops.h" #include "aom_scale/aom_scale.h" +#include "aom_scale/yv12config.h" +#include "aom_util/aom_pthread.h" #include "aom_util/aom_thread.h" #if CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG @@ -35,33 +38,41 @@ #endif // CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG #include "av1/common/alloccommon.h" +#include "av1/common/av1_common_int.h" +#include "av1/common/blockd.h" #include "av1/common/cdef.h" #include "av1/common/cfl.h" -#if CONFIG_INSPECTION -#include "av1/decoder/inspection.h" -#endif +#include "av1/common/common_data.h" #include "av1/common/common.h" #include "av1/common/entropy.h" #include "av1/common/entropymode.h" #include "av1/common/entropymv.h" +#include "av1/common/enums.h" #include "av1/common/frame_buffers.h" #include "av1/common/idct.h" +#include "av1/common/mv.h" #include "av1/common/mvref_common.h" +#include "av1/common/obmc.h" #include "av1/common/pred_common.h" #include "av1/common/quant_common.h" #include "av1/common/reconinter.h" #include "av1/common/reconintra.h" #include "av1/common/resize.h" +#include "av1/common/restoration.h" +#include "av1/common/scale.h" #include "av1/common/seg_common.h" #include "av1/common/thread_common.h" #include "av1/common/tile_common.h" #include "av1/common/warped_motion.h" -#include "av1/common/obmc.h" + #include "av1/decoder/decodeframe.h" #include "av1/decoder/decodemv.h" #include "av1/decoder/decoder.h" #include "av1/decoder/decodetxb.h" #include "av1/decoder/detokenize.h" +#if CONFIG_INSPECTION +#include "av1/decoder/inspection.h" +#endif #define ACCT_STR __func__ @@ -1935,8 +1946,8 @@ static AOM_INLINE void setup_buffer_pool(AV1_COMMON *cm) { &cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, AOM_DEC_BORDER_IN_PIXELS, cm->features.byte_alignment, - &cm->cur_frame->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv, 0, - 0)) { + &cm->cur_frame->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv, + false, 0)) { unlock_buffer_pool(pool); aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate frame buffer"); @@ -2293,7 +2304,11 @@ static const uint8_t *get_ls_tile_buffers( const int tile_col_size_bytes = pbi->tile_col_size_bytes; const int tile_size_bytes = pbi->tile_size_bytes; int tile_width, tile_height; - av1_get_uniform_tile_size(cm, &tile_width, &tile_height); + if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) { + aom_internal_error( + &pbi->error, AOM_CODEC_CORRUPT_FRAME, + "Not all the tiles in the tile list have the same size."); + } const int tile_copy_mode = ((AOMMAX(tile_width, tile_height) << MI_SIZE_LOG2) <= 256) ? 1 : 0; // Read tile column sizes for all columns (we need the last tile buffer) @@ -2302,8 +2317,16 @@ static const uint8_t *get_ls_tile_buffers( size_t tile_col_size; if (!is_last) { + if (tile_col_size_bytes > data_end - data) { + aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME, + "Not enough data to read tile_col_size"); + } tile_col_size = mem_get_varsize(data, tile_col_size_bytes); data += tile_col_size_bytes; + if (tile_col_size > (size_t)(data_end - data)) { + aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME, + "tile_col_data_end[%d] is out of bound", c); + } tile_col_data_end[c] = data + tile_col_size; } else { tile_col_size = data_end - data; @@ -3871,8 +3894,8 @@ static AOM_INLINE void read_bitdepth( #endif } -void av1_read_film_grain_params(AV1_COMMON *cm, - struct aom_read_bit_buffer *rb) { +static void read_film_grain_params(AV1_COMMON *cm, + struct aom_read_bit_buffer *rb) { aom_film_grain_t *pars = &cm->film_grain_params; const SequenceHeader *const seq_params = cm->seq_params; @@ -4040,7 +4063,7 @@ static AOM_INLINE void read_film_grain(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) { if (cm->seq_params->film_grain_params_present && (cm->show_frame || cm->showable_frame)) { - av1_read_film_grain_params(cm, rb); + read_film_grain_params(cm, rb); } else { memset(&cm->film_grain_params, 0, sizeof(cm->film_grain_params)); } @@ -4768,7 +4791,7 @@ static int read_uncompressed_header(AV1Decoder *pbi, seq_params->max_frame_height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS, features->byte_alignment, - &buf->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv, 0, + &buf->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv, false, 0)) { decrease_ref_count(buf, pool); unlock_buffer_pool(pool); diff --git a/third_party/aom/av1/decoder/decodemv.h b/third_party/aom/av1/decoder/decodemv.h index 3d8629c9a5..7e77c030f8 100644 --- a/third_party/aom/av1/decoder/decodemv.h +++ b/third_party/aom/av1/decoder/decodemv.h @@ -20,6 +20,8 @@ extern "C" { #endif +int av1_neg_deinterleave(int diff, int ref, int max); + void av1_read_mode_info(AV1Decoder *const pbi, DecoderCodingBlock *dcb, aom_reader *r, int x_mis, int y_mis); diff --git a/third_party/aom/av1/decoder/decoder.c b/third_party/aom/av1/decoder/decoder.c index 32e94840be..a886ed469c 100644 --- a/third_party/aom/av1/decoder/decoder.c +++ b/third_party/aom/av1/decoder/decoder.c @@ -21,6 +21,7 @@ #include "aom_mem/aom_mem.h" #include "aom_ports/aom_timer.h" #include "aom_scale/aom_scale.h" +#include "aom_util/aom_pthread.h" #include "aom_util/aom_thread.h" #include "av1/common/alloccommon.h" diff --git a/third_party/aom/av1/decoder/dthread.h b/third_party/aom/av1/decoder/dthread.h index f82b9d8ccf..b0f6fda829 100644 --- a/third_party/aom/av1/decoder/dthread.h +++ b/third_party/aom/av1/decoder/dthread.h @@ -14,7 +14,6 @@ #include "config/aom_config.h" -#include "aom_util/aom_thread.h" #include "aom/internal/aom_codec_internal.h" #ifdef __cplusplus diff --git a/third_party/aom/av1/decoder/obu.c b/third_party/aom/av1/decoder/obu.c index 0e31ce9404..e0b2d87c32 100644 --- a/third_party/aom/av1/decoder/obu.c +++ b/third_party/aom/av1/decoder/obu.c @@ -367,16 +367,13 @@ static uint32_t read_one_tile_group_obu( return header_size + tg_payload_size; } -static void alloc_tile_list_buffer(AV1Decoder *pbi) { +static void alloc_tile_list_buffer(AV1Decoder *pbi, int tile_width_in_pixels, + int tile_height_in_pixels) { // The resolution of the output frame is read out from the bitstream. The data // are stored in the order of Y plane, U plane and V plane. As an example, for // image format 4:2:0, the output frame of U plane and V plane is 1/4 of the // output frame. AV1_COMMON *const cm = &pbi->common; - int tile_width, tile_height; - av1_get_uniform_tile_size(cm, &tile_width, &tile_height); - const int tile_width_in_pixels = tile_width * MI_SIZE; - const int tile_height_in_pixels = tile_height * MI_SIZE; const int output_frame_width = (pbi->output_frame_width_in_tiles_minus_1 + 1) * tile_width_in_pixels; const int output_frame_height = @@ -396,7 +393,7 @@ static void alloc_tile_list_buffer(AV1Decoder *pbi) { cm->seq_params->subsampling_y, (cm->seq_params->use_highbitdepth && (cm->seq_params->bit_depth > AOM_BITS_8)), - 0, cm->features.byte_alignment, 0, 0)) + 0, cm->features.byte_alignment, false, 0)) aom_internal_error(&pbi->error, AOM_CODEC_MEM_ERROR, "Failed to allocate the tile list output buffer"); } @@ -424,13 +421,10 @@ static void yv12_tile_copy(const YV12_BUFFER_CONFIG *src, int hstart1, return; } -static void copy_decoded_tile_to_tile_list_buffer(AV1Decoder *pbi, - int tile_idx) { +static void copy_decoded_tile_to_tile_list_buffer(AV1Decoder *pbi, int tile_idx, + int tile_width_in_pixels, + int tile_height_in_pixels) { AV1_COMMON *const cm = &pbi->common; - int tile_width, tile_height; - av1_get_uniform_tile_size(cm, &tile_width, &tile_height); - const int tile_width_in_pixels = tile_width * MI_SIZE; - const int tile_height_in_pixels = tile_height * MI_SIZE; const int ssy = cm->seq_params->subsampling_y; const int ssx = cm->seq_params->subsampling_x; const int num_planes = av1_num_planes(cm); @@ -501,13 +495,31 @@ static uint32_t read_and_decode_one_tile_list(AV1Decoder *pbi, pbi->output_frame_width_in_tiles_minus_1 = aom_rb_read_literal(rb, 8); pbi->output_frame_height_in_tiles_minus_1 = aom_rb_read_literal(rb, 8); pbi->tile_count_minus_1 = aom_rb_read_literal(rb, 16); + + // The output frame is used to store the decoded tile list. The decoded tile + // list has to fit into 1 output frame. + if ((pbi->tile_count_minus_1 + 1) > + (pbi->output_frame_width_in_tiles_minus_1 + 1) * + (pbi->output_frame_height_in_tiles_minus_1 + 1)) { + pbi->error.error_code = AOM_CODEC_CORRUPT_FRAME; + return 0; + } + if (pbi->tile_count_minus_1 > MAX_TILES - 1) { pbi->error.error_code = AOM_CODEC_CORRUPT_FRAME; return 0; } + int tile_width, tile_height; + if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) { + pbi->error.error_code = AOM_CODEC_CORRUPT_FRAME; + return 0; + } + const int tile_width_in_pixels = tile_width * MI_SIZE; + const int tile_height_in_pixels = tile_height * MI_SIZE; + // Allocate output frame buffer for the tile list. - alloc_tile_list_buffer(pbi); + alloc_tile_list_buffer(pbi, tile_width_in_pixels, tile_height_in_pixels); uint32_t tile_list_info_bytes = 4; tile_list_payload_size += tile_list_info_bytes; @@ -558,7 +570,8 @@ static uint32_t read_and_decode_one_tile_list(AV1Decoder *pbi, assert(data <= data_end); // Copy the decoded tile to the tile list output buffer. - copy_decoded_tile_to_tile_list_buffer(pbi, tile_idx); + copy_decoded_tile_to_tile_list_buffer(pbi, tile_idx, tile_width_in_pixels, + tile_height_in_pixels); tile_idx++; } diff --git a/third_party/aom/av1/encoder/allintra_vis.c b/third_party/aom/av1/encoder/allintra_vis.c index 8dcef5fc85..87becb80ef 100644 --- a/third_party/aom/av1/encoder/allintra_vis.c +++ b/third_party/aom/av1/encoder/allintra_vis.c @@ -13,6 +13,8 @@ #include "config/aom_config.h" +#include "aom_util/aom_pthread.h" + #if CONFIG_TFLITE #include "tensorflow/lite/c/c_api.h" #include "av1/encoder/deltaq4_model.c" @@ -588,7 +590,7 @@ void av1_set_mb_wiener_variance(AV1_COMP *cpi) { &cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL, - NULL, cpi->image_pyramid_levels, 0)) + NULL, cpi->alloc_pyramid, 0)) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate frame buffer"); av1_alloc_mb_wiener_var_pred_buf(&cpi->common, &cpi->td); diff --git a/third_party/aom/av1/encoder/aq_cyclicrefresh.c b/third_party/aom/av1/encoder/aq_cyclicrefresh.c index f48ff11e51..1aa8dde323 100644 --- a/third_party/aom/av1/encoder/aq_cyclicrefresh.c +++ b/third_party/aom/av1/encoder/aq_cyclicrefresh.c @@ -15,6 +15,7 @@ #include "av1/common/pred_common.h" #include "av1/common/seg_common.h" #include "av1/encoder/aq_cyclicrefresh.h" +#include "av1/encoder/encoder_utils.h" #include "av1/encoder/ratectrl.h" #include "av1/encoder/segmentation.h" #include "av1/encoder/tokenize.h" @@ -295,6 +296,7 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) { const CommonModeInfoParams *const mi_params = &cm->mi_params; CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; unsigned char *const seg_map = cpi->enc_seg.map; + unsigned char *const active_map_4x4 = cpi->active_map.map; int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame; int xmis, ymis, x, y; uint64_t sb_sad = 0; @@ -302,7 +304,12 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) { uint64_t thresh_sad = INT64_MAX; const int mi_rows = mi_params->mi_rows, mi_cols = mi_params->mi_cols; const int mi_stride = mi_cols; - memset(seg_map, CR_SEGMENT_ID_BASE, mi_rows * mi_cols); + // Don't set seg_map to 0 if active_maps is enabled. Active_maps will set + // seg_map to either 7 or 0 (AM_SEGMENT_ID_INACTIVE/ACTIVE), and cyclic + // refresh set below (segment 1 or 2) will only be set for ACTIVE blocks. + if (!cpi->active_map.enabled) { + memset(seg_map, CR_SEGMENT_ID_BASE, mi_rows * mi_cols); + } sb_cols = (mi_cols + cm->seq_params->mib_size - 1) / cm->seq_params->mib_size; sb_rows = (mi_rows + cm->seq_params->mib_size - 1) / cm->seq_params->mib_size; sbs_in_frame = sb_cols * sb_rows; @@ -357,7 +364,10 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) { // for possible boost/refresh (segment 1). The segment id may get // reset to 0 later if block gets coded anything other than low motion. // If the block_sad (sb_sad) is very low label it for refresh anyway. - if (cr->map[bl_index2] == 0 || sb_sad < thresh_sad_low) { + // If active_maps is enabled, only allow for setting on ACTIVE blocks. + if ((cr->map[bl_index2] == 0 || sb_sad < thresh_sad_low) && + (!cpi->active_map.enabled || + active_map_4x4[bl_index2] == AM_SEGMENT_ID_ACTIVE)) { sum_map += 4; } else if (cr->map[bl_index2] < 0) { cr->map[bl_index2]++; @@ -380,7 +390,8 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) { cr->sb_index = i; if (cr->target_num_seg_blocks == 0) { // Disable segmentation, seg_map is already set to 0 above. - av1_disable_segmentation(&cm->seg); + // Don't disable if active_map is being used. + if (!cpi->active_map.enabled) av1_disable_segmentation(&cm->seg); } } @@ -423,8 +434,6 @@ void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) { // function av1_cyclic_reset_segment_skip(). Skipping over // 4x4 will therefore have small bdrate loss (~0.2%), so // we use it only for speed > 9 for now. - // Also if loop-filter deltas is applied via segment, then - // we need to set cr->skip_over4x4 = 1. cr->skip_over4x4 = (cpi->oxcf.speed > 9) ? 1 : 0; // should we enable cyclic refresh on this frame. @@ -450,6 +459,15 @@ void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) { else cr->percent_refresh = 10 + cr->percent_refresh_adjustment; + if (cpi->active_map.enabled) { + // Scale down the percent_refresh to target the active blocks only. + cr->percent_refresh = + cr->percent_refresh * (100 - cpi->rc.percent_blocks_inactive) / 100; + if (cr->percent_refresh == 0) { + cr->apply_cyclic_refresh = 0; + } + } + cr->max_qdelta_perc = 60; cr->time_for_refresh = 0; cr->use_block_sad_scene_det = @@ -543,10 +561,14 @@ void av1_cyclic_refresh_setup(AV1_COMP *const cpi) { if (resolution_change) av1_cyclic_refresh_reset_resize(cpi); if (!cr->apply_cyclic_refresh) { - // Set segmentation map to 0 and disable. - unsigned char *const seg_map = cpi->enc_seg.map; - memset(seg_map, 0, cm->mi_params.mi_rows * cm->mi_params.mi_cols); - av1_disable_segmentation(&cm->seg); + // Don't disable and set seg_map to 0 if active_maps is enabled, unless + // whole frame is set as inactive (since we only apply cyclic_refresh to + // active blocks). + if (!cpi->active_map.enabled || cpi->rc.percent_blocks_inactive == 100) { + unsigned char *const seg_map = cpi->enc_seg.map; + memset(seg_map, 0, cm->mi_params.mi_rows * cm->mi_params.mi_cols); + av1_disable_segmentation(&cm->seg); + } if (frame_is_intra_only(cm) || scene_change_detected || cpi->ppi->rtc_ref.bias_recovery_frame) { cr->sb_index = 0; @@ -574,9 +596,11 @@ void av1_cyclic_refresh_setup(AV1_COMP *const cpi) { cr->thresh_rate_sb = INT64_MAX; } // Set up segmentation. - // Clear down the segment map. av1_enable_segmentation(&cm->seg); - av1_clearall_segfeatures(seg); + if (!cpi->active_map.enabled) { + // Clear down the segment map, only if active_maps is not enabled. + av1_clearall_segfeatures(seg); + } // Note: setting temporal_update has no effect, as the seg-map coding method // (temporal or spatial) is determined in @@ -644,6 +668,10 @@ void av1_cyclic_refresh_reset_resize(AV1_COMP *const cpi) { int av1_cyclic_refresh_disable_lf_cdef(AV1_COMP *const cpi) { CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; const int qindex = cpi->common.quant_params.base_qindex; + if (cpi->active_map.enabled && + cpi->rc.percent_blocks_inactive > + cpi->sf.rt_sf.thresh_active_maps_skip_lf_cdef) + return 1; if (cpi->rc.frames_since_key > 30 && cr->percent_refresh > 0 && cr->counter_encode_maxq_scene_change > 300 / cr->percent_refresh && cpi->rc.frame_source_sad < 1000 && diff --git a/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c b/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c index 63aad0b785..52803a9838 100644 --- a/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c +++ b/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c @@ -14,7 +14,7 @@ #include "config/aom_config.h" #include "aom_dsp/aom_dsp_common.h" -#include "aom_dsp/arm/dot_sve.h" +#include "aom_dsp/arm/aom_neon_sve_bridge.h" #include "aom_dsp/arm/mem_neon.h" int64_t av1_block_error_sve(const tran_low_t *coeff, const tran_low_t *dqcoeff, diff --git a/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c b/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c index 5a52e701a2..919521fec7 100644 --- a/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c +++ b/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c @@ -23,7 +23,15 @@ #define SSE_STRIDE (BW + 4) // clang-format off +// Table used to pad the first and last columns and apply the sliding window. +DECLARE_ALIGNED(16, static const uint8_t, kLoadPad[4][16]) = { + { 2, 2, 2, 3, 4, 255, 255, 255, 255, 2, 2, 3, 4, 5, 255, 255 }, + { 255, 255, 2, 3, 4, 5, 6, 255, 255, 255, 255, 3, 4, 5, 6, 7 }, + { 0, 1, 2, 3, 4, 255, 255, 255, 255, 1, 2, 3, 4, 5, 255, 255 }, + { 255, 255, 2, 3, 4, 5, 5, 255, 255, 255, 255, 3, 4, 5, 5, 5 } +}; +// For columns that don't need to be padded it's just a simple mask. DECLARE_ALIGNED(16, static const uint8_t, kSlidingWindowMask[]) = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, @@ -56,22 +64,6 @@ static INLINE void get_abs_diff(const uint8_t *frame1, const uint32_t stride1, } while (++i < block_height); } -static INLINE uint8x16_t load_and_pad(const uint8_t *src, const uint32_t col, - const uint32_t block_width) { - uint8x8_t s = vld1_u8(src); - - if (col == 0) { - const uint8_t lane2 = vget_lane_u8(s, 2); - s = vset_lane_u8(lane2, s, 0); - s = vset_lane_u8(lane2, s, 1); - } else if (col >= block_width - 4) { - const uint8_t lane5 = vget_lane_u8(s, 5); - s = vset_lane_u8(lane5, s, 6); - s = vset_lane_u8(lane5, s, 7); - } - return vcombine_u8(s, s); -} - static void apply_temporal_filter( const uint8_t *frame, const unsigned int stride, const uint32_t block_width, const uint32_t block_height, const int *subblock_mses, @@ -84,6 +76,10 @@ static void apply_temporal_filter( uint32_t acc_5x5_neon[BH][BW]; const uint8x16x2_t vmask = vld1q_u8_x2(kSlidingWindowMask); + const uint8x16_t pad_tbl0 = vld1q_u8(kLoadPad[0]); + const uint8x16_t pad_tbl1 = vld1q_u8(kLoadPad[1]); + const uint8x16_t pad_tbl2 = vld1q_u8(kLoadPad[2]); + const uint8x16_t pad_tbl3 = vld1q_u8(kLoadPad[3]); // Traverse 4 columns at a time - first and last two columns need padding. for (uint32_t col = 0; col < block_width; col += 4) { @@ -92,9 +88,18 @@ static void apply_temporal_filter( // Load, pad (for first and last two columns) and mask 3 rows from the top. for (int i = 2; i < 5; i++) { - const uint8x16_t s = load_and_pad(src, col, block_width); - vsrc[i][0] = vandq_u8(s, vmask.val[0]); - vsrc[i][1] = vandq_u8(s, vmask.val[1]); + uint8x8_t s = vld1_u8(src); + uint8x16_t s_dup = vcombine_u8(s, s); + if (col == 0) { + vsrc[i][0] = vqtbl1q_u8(s_dup, pad_tbl0); + vsrc[i][1] = vqtbl1q_u8(s_dup, pad_tbl1); + } else if (col >= block_width - 4) { + vsrc[i][0] = vqtbl1q_u8(s_dup, pad_tbl2); + vsrc[i][1] = vqtbl1q_u8(s_dup, pad_tbl3); + } else { + vsrc[i][0] = vandq_u8(s_dup, vmask.val[0]); + vsrc[i][1] = vandq_u8(s_dup, vmask.val[1]); + } src += SSE_STRIDE; } @@ -130,9 +135,18 @@ static void apply_temporal_filter( if (row <= block_height - 4) { // Load next row into the bottom of the sliding window. - uint8x16_t s = load_and_pad(src, col, block_width); - vsrc[4][0] = vandq_u8(s, vmask.val[0]); - vsrc[4][1] = vandq_u8(s, vmask.val[1]); + uint8x8_t s = vld1_u8(src); + uint8x16_t s_dup = vcombine_u8(s, s); + if (col == 0) { + vsrc[4][0] = vqtbl1q_u8(s_dup, pad_tbl0); + vsrc[4][1] = vqtbl1q_u8(s_dup, pad_tbl1); + } else if (col >= block_width - 4) { + vsrc[4][0] = vqtbl1q_u8(s_dup, pad_tbl2); + vsrc[4][1] = vqtbl1q_u8(s_dup, pad_tbl3); + } else { + vsrc[4][0] = vandq_u8(s_dup, vmask.val[0]); + vsrc[4][1] = vandq_u8(s_dup, vmask.val[1]); + } src += SSE_STRIDE; } else { // Pad the bottom 2 rows. diff --git a/third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c b/third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c new file mode 100644 index 0000000000..521601a3f3 --- /dev/null +++ b/third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2024, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +#include +#include + +#include "aom_dsp/arm/aom_neon_sve_bridge.h" +#include "aom_dsp/arm/sum_neon.h" +#include "av1/common/reconinter.h" + +uint64_t av1_wedge_sse_from_residuals_sve(const int16_t *r1, const int16_t *d, + const uint8_t *m, int N) { + assert(N % 64 == 0); + + // Predicate pattern with first 8 elements true. + const svbool_t pattern = svptrue_pat_b16(SV_VL8); + int64x2_t sse[2] = { vdupq_n_s64(0), vdupq_n_s64(0) }; + + int i = 0; + do { + int32x4_t sum[4]; + int16x8_t sum_s16[2]; + + const int16x8_t r1_l = vld1q_s16(r1 + i); + const int16x8_t r1_h = vld1q_s16(r1 + i + 8); + const int16x8_t d_l = vld1q_s16(d + i); + const int16x8_t d_h = vld1q_s16(d + i + 8); + + // Use a zero-extending load to widen the vector elements. + const int16x8_t m_l = svget_neonq_s16(svld1ub_s16(pattern, m + i)); + const int16x8_t m_h = svget_neonq_s16(svld1ub_s16(pattern, m + i + 8)); + + sum[0] = vshll_n_s16(vget_low_s16(r1_l), WEDGE_WEIGHT_BITS); + sum[1] = vshll_n_s16(vget_high_s16(r1_l), WEDGE_WEIGHT_BITS); + sum[2] = vshll_n_s16(vget_low_s16(r1_h), WEDGE_WEIGHT_BITS); + sum[3] = vshll_n_s16(vget_high_s16(r1_h), WEDGE_WEIGHT_BITS); + + sum[0] = vmlal_s16(sum[0], vget_low_s16(m_l), vget_low_s16(d_l)); + sum[1] = vmlal_s16(sum[1], vget_high_s16(m_l), vget_high_s16(d_l)); + sum[2] = vmlal_s16(sum[2], vget_low_s16(m_h), vget_low_s16(d_h)); + sum[3] = vmlal_s16(sum[3], vget_high_s16(m_h), vget_high_s16(d_h)); + + sum_s16[0] = vcombine_s16(vqmovn_s32(sum[0]), vqmovn_s32(sum[1])); + sum_s16[1] = vcombine_s16(vqmovn_s32(sum[2]), vqmovn_s32(sum[3])); + + sse[0] = aom_sdotq_s16(sse[0], sum_s16[0], sum_s16[0]); + sse[1] = aom_sdotq_s16(sse[1], sum_s16[1], sum_s16[1]); + + i += 16; + } while (i < N); + + const uint64_t csse = + (uint64_t)horizontal_add_s64x2(vaddq_s64(sse[0], sse[1])); + return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS); +} + +int8_t av1_wedge_sign_from_residuals_sve(const int16_t *ds, const uint8_t *m, + int N, int64_t limit) { + assert(N % 16 == 0); + + // Predicate pattern with first 8 elements true. + svbool_t pattern = svptrue_pat_b16(SV_VL8); + int64x2_t acc_l = vdupq_n_s64(0); + int64x2_t acc_h = vdupq_n_s64(0); + + do { + const int16x8_t ds_l = vld1q_s16(ds); + const int16x8_t ds_h = vld1q_s16(ds + 8); + + // Use a zero-extending load to widen the vector elements. + const int16x8_t m_l = svget_neonq_s16(svld1ub_s16(pattern, m)); + const int16x8_t m_h = svget_neonq_s16(svld1ub_s16(pattern, m + 8)); + + acc_l = aom_sdotq_s16(acc_l, ds_l, m_l); + acc_h = aom_sdotq_s16(acc_h, ds_h, m_h); + + ds += 16; + m += 16; + N -= 16; + } while (N != 0); + + const int64x2_t sum = vaddq_s64(acc_l, acc_h); + return horizontal_add_s64x2(sum) > limit; +} diff --git a/third_party/aom/av1/encoder/av1_temporal_denoiser.c b/third_party/aom/av1/encoder/av1_temporal_denoiser.c index 3012df6311..d4a1625612 100644 --- a/third_party/aom/av1/encoder/av1_temporal_denoiser.c +++ b/third_party/aom/av1/encoder/av1_temporal_denoiser.c @@ -489,7 +489,7 @@ static int av1_denoiser_realloc_svc_helper(AV1_COMMON *cm, &denoiser->running_avg_y[fb_idx], cm->width, cm->height, cm->seq_params->subsampling_x, cm->seq_params->subsampling_y, cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); if (fail) { av1_denoiser_free(denoiser); return 1; @@ -577,7 +577,7 @@ int av1_denoiser_alloc(AV1_COMMON *cm, struct SVC *svc, AV1_DENOISER *denoiser, fail = aom_alloc_frame_buffer( &denoiser->running_avg_y[i + denoiser->num_ref_frames * layer], denoise_width, denoise_height, ssx, ssy, use_highbitdepth, border, - legacy_byte_alignment, 0, 0); + legacy_byte_alignment, false, 0); if (fail) { av1_denoiser_free(denoiser); return 1; @@ -589,7 +589,7 @@ int av1_denoiser_alloc(AV1_COMMON *cm, struct SVC *svc, AV1_DENOISER *denoiser, fail = aom_alloc_frame_buffer( &denoiser->mc_running_avg_y[layer], denoise_width, denoise_height, ssx, - ssy, use_highbitdepth, border, legacy_byte_alignment, 0, 0); + ssy, use_highbitdepth, border, legacy_byte_alignment, false, 0); if (fail) { av1_denoiser_free(denoiser); return 1; @@ -600,7 +600,7 @@ int av1_denoiser_alloc(AV1_COMMON *cm, struct SVC *svc, AV1_DENOISER *denoiser, // layer. fail = aom_alloc_frame_buffer(&denoiser->last_source, width, height, ssx, ssy, use_highbitdepth, border, legacy_byte_alignment, - 0, 0); + false, 0); if (fail) { av1_denoiser_free(denoiser); return 1; diff --git a/third_party/aom/av1/encoder/bitstream.c b/third_party/aom/av1/encoder/bitstream.c index 219784fedf..9981871147 100644 --- a/third_party/aom/av1/encoder/bitstream.c +++ b/third_party/aom/av1/encoder/bitstream.c @@ -3391,8 +3391,8 @@ int av1_write_uleb_obu_size(size_t obu_header_size, size_t obu_payload_size, return AOM_CODEC_OK; } -size_t av1_obu_memmove(size_t obu_header_size, size_t obu_payload_size, - uint8_t *data) { +static size_t obu_memmove(size_t obu_header_size, size_t obu_payload_size, + uint8_t *data) { const size_t length_field_size = aom_uleb_size_in_bytes(obu_payload_size); const size_t move_dst_offset = length_field_size + obu_header_size; const size_t move_src_offset = obu_header_size; @@ -3581,7 +3581,7 @@ static void write_large_scale_tile_obu_size( *total_size += lst_obu->tg_hdr_size; const uint32_t obu_payload_size = *total_size - lst_obu->tg_hdr_size; const size_t length_field_size = - av1_obu_memmove(lst_obu->tg_hdr_size, obu_payload_size, dst); + obu_memmove(lst_obu->tg_hdr_size, obu_payload_size, dst); if (av1_write_uleb_obu_size(lst_obu->tg_hdr_size, obu_payload_size, dst) != AOM_CODEC_OK) assert(0); @@ -3806,7 +3806,7 @@ void av1_write_last_tile_info( const uint32_t obu_payload_size = (uint32_t)(*curr_tg_data_size) - obu_header_size; const size_t length_field_size = - av1_obu_memmove(obu_header_size, obu_payload_size, curr_tg_start); + obu_memmove(obu_header_size, obu_payload_size, curr_tg_start); if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, curr_tg_start) != AOM_CODEC_OK) { assert(0); @@ -4015,8 +4015,8 @@ static void write_tile_obu_size(AV1_COMP *const cpi, uint8_t *const dst, // to pack the smaller bitstream of such frames. This function computes the // number of required number of workers based on setup time overhead and job // dispatch time overhead for given tiles and available workers. -int calc_pack_bs_mt_workers(const TileDataEnc *tile_data, int num_tiles, - int avail_workers, bool pack_bs_mt_enabled) { +static int calc_pack_bs_mt_workers(const TileDataEnc *tile_data, int num_tiles, + int avail_workers, bool pack_bs_mt_enabled) { if (!pack_bs_mt_enabled) return 1; uint64_t frame_abs_sum_level = 0; @@ -4141,8 +4141,7 @@ static size_t av1_write_metadata_array(AV1_COMP *const cpi, uint8_t *dst) { OBU_METADATA, 0, dst); obu_payload_size = av1_write_metadata_obu(current_metadata, dst + obu_header_size); - length_field_size = - av1_obu_memmove(obu_header_size, obu_payload_size, dst); + length_field_size = obu_memmove(obu_header_size, obu_payload_size, dst); if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, dst) == AOM_CODEC_OK) { const size_t obu_size = obu_header_size + obu_payload_size; @@ -4192,7 +4191,7 @@ int av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size, obu_payload_size = av1_write_sequence_header_obu(cm->seq_params, data + obu_header_size); const size_t length_field_size = - av1_obu_memmove(obu_header_size, obu_payload_size, data); + obu_memmove(obu_header_size, obu_payload_size, data); if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, data) != AOM_CODEC_OK) { return AOM_CODEC_ERROR; @@ -4217,7 +4216,7 @@ int av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size, obu_payload_size = write_frame_header_obu(cpi, &cpi->td.mb.e_mbd, &saved_wb, data + obu_header_size, 1); - length_field = av1_obu_memmove(obu_header_size, obu_payload_size, data); + length_field = obu_memmove(obu_header_size, obu_payload_size, data); if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, data) != AOM_CODEC_OK) { return AOM_CODEC_ERROR; diff --git a/third_party/aom/av1/encoder/bitstream.h b/third_party/aom/av1/encoder/bitstream.h index 12e8a630db..d037039593 100644 --- a/third_party/aom/av1/encoder/bitstream.h +++ b/third_party/aom/av1/encoder/bitstream.h @@ -21,6 +21,7 @@ extern "C" { #include "av1/common/enums.h" #include "av1/encoder/level.h" #include "aom_dsp/bitwriter.h" +#include "aom_util/aom_pthread.h" struct aom_write_bit_buffer; struct AV1_COMP; diff --git a/third_party/aom/av1/encoder/block.h b/third_party/aom/av1/encoder/block.h index 33d2d8c2a0..1baf3f942e 100644 --- a/third_party/aom/av1/encoder/block.h +++ b/third_party/aom/av1/encoder/block.h @@ -1348,6 +1348,9 @@ typedef struct macroblock { //! Motion vector from superblock MV derived from int_pro_motion() in // the variance_partitioning. int_mv sb_me_mv; + //! Flag to indicate if a fixed partition should be used, only if the + // speed feature rt_sf->use_fast_fixed_part is enabled. + int sb_force_fixed_part; //! SSE of the current predictor. unsigned int pred_sse[REF_FRAMES]; //! Prediction for ML based partition. diff --git a/third_party/aom/av1/encoder/cnn.c b/third_party/aom/av1/encoder/cnn.c index 598b362753..b019ace685 100644 --- a/third_party/aom/av1/encoder/cnn.c +++ b/third_party/aom/av1/encoder/cnn.c @@ -138,14 +138,16 @@ static bool concat_tensor(const TENSOR *src, TENSOR *dst) { return true; } -int check_tensor_equal_dims(TENSOR *t1, TENSOR *t2) { +#ifndef NDEBUG +static int check_tensor_equal_dims(TENSOR *t1, TENSOR *t2) { return (t1->width == t2->width && t1->height == t2->height); } -int check_tensor_equal_size(TENSOR *t1, TENSOR *t2) { +static int check_tensor_equal_size(TENSOR *t1, TENSOR *t2) { return (t1->channels == t2->channels && t1->width == t2->width && t1->height == t2->height); } +#endif // NDEBUG void av1_find_cnn_layer_output_size(int in_width, int in_height, const CNN_LAYER_CONFIG *layer_config, @@ -189,8 +191,8 @@ void av1_find_cnn_layer_output_size(int in_width, int in_height, } } -void find_cnn_out_channels(const CNN_LAYER_CONFIG *layer_config, - int channels_per_branch[]) { +static void find_cnn_out_channels(const CNN_LAYER_CONFIG *layer_config, + int channels_per_branch[]) { int branch = layer_config->branch; const CNN_BRANCH_CONFIG *branch_config = &layer_config->branch_config; for (int b = 0; b < CNN_MAX_BRANCHES; ++b) { diff --git a/third_party/aom/av1/encoder/encode_strategy.c b/third_party/aom/av1/encoder/encode_strategy.c index 35ca83c3f4..db77dc0e3c 100644 --- a/third_party/aom/av1/encoder/encode_strategy.c +++ b/third_party/aom/av1/encoder/encode_strategy.c @@ -712,20 +712,6 @@ int av1_get_refresh_frame_flags( } #if !CONFIG_REALTIME_ONLY -void setup_mi(AV1_COMP *const cpi, YV12_BUFFER_CONFIG *src) { - AV1_COMMON *const cm = &cpi->common; - const int num_planes = av1_num_planes(cm); - MACROBLOCK *const x = &cpi->td.mb; - MACROBLOCKD *const xd = &x->e_mbd; - - av1_setup_src_planes(x, src, 0, 0, num_planes, cm->seq_params->sb_size); - - av1_setup_block_planes(xd, cm->seq_params->subsampling_x, - cm->seq_params->subsampling_y, num_planes); - - set_mi_offsets(&cm->mi_params, xd, 0, 0); -} - // Apply temporal filtering to source frames and encode the filtered frame. // If the current frame does not require filtering, this function is identical // to av1_encode() except that tpl is not performed. @@ -819,7 +805,7 @@ static int denoise_and_encode(AV1_COMP *const cpi, uint8_t *const dest, oxcf->frm_dim_cfg.height, cm->seq_params->subsampling_x, cm->seq_params->subsampling_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL, - NULL, cpi->image_pyramid_levels, 0); + NULL, cpi->alloc_pyramid, 0); if (ret) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate tf_buf_second_arf"); @@ -923,7 +909,7 @@ static int denoise_and_encode(AV1_COMP *const cpi, uint8_t *const dest, if (apply_filtering && is_psnr_calc_enabled(cpi)) { cpi->source = av1_realloc_and_scale_if_required( cm, source_buffer, &cpi->scaled_source, cm->features.interp_filter, 0, - false, true, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels); + false, true, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid); cpi->unscaled_source = source_buffer; } #if CONFIG_COLLECT_COMPONENT_TIMING @@ -1702,8 +1688,7 @@ int av1_encode_strategy(AV1_COMP *const cpi, size_t *const size, // This is used in rtc temporal filter case. Use true source in the PSNR // calculation. - if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf && - cpi->common.current_frame.frame_type != KEY_FRAME) { + if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf) { assert(cpi->orig_source.buffer_alloc_sz > 0); cpi->source = &cpi->orig_source; } @@ -1758,9 +1743,9 @@ int av1_encode_strategy(AV1_COMP *const cpi, size_t *const size, cpi->svc.temporal_layer_id == 0 && cpi->unscaled_source->y_width == cpi->svc.source_last_TL0.y_width && cpi->unscaled_source->y_height == cpi->svc.source_last_TL0.y_height) { - aom_yv12_copy_y(cpi->unscaled_source, &cpi->svc.source_last_TL0); - aom_yv12_copy_u(cpi->unscaled_source, &cpi->svc.source_last_TL0); - aom_yv12_copy_v(cpi->unscaled_source, &cpi->svc.source_last_TL0); + aom_yv12_copy_y(cpi->unscaled_source, &cpi->svc.source_last_TL0, 1); + aom_yv12_copy_u(cpi->unscaled_source, &cpi->svc.source_last_TL0, 1); + aom_yv12_copy_v(cpi->unscaled_source, &cpi->svc.source_last_TL0, 1); } return AOM_CODEC_OK; diff --git a/third_party/aom/av1/encoder/encodeframe.c b/third_party/aom/av1/encoder/encodeframe.c index e2213a8355..a9214f77c2 100644 --- a/third_party/aom/av1/encoder/encodeframe.c +++ b/third_party/aom/av1/encoder/encodeframe.c @@ -23,7 +23,7 @@ #include "aom_dsp/binary_codes_writer.h" #include "aom_ports/mem.h" #include "aom_ports/aom_timer.h" - +#include "aom_util/aom_pthread.h" #if CONFIG_MISMATCH_DEBUG #include "aom_util/debug_util.h" #endif // CONFIG_MISMATCH_DEBUG @@ -536,8 +536,8 @@ static AOM_INLINE void encode_nonrd_sb(AV1_COMP *cpi, ThreadData *td, #endif // Set the partition if (sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip || - (sf->rt_sf.use_fast_fixed_part && - x->content_state_sb.source_sad_nonrd < kMedSad)) { + (sf->rt_sf.use_fast_fixed_part && x->sb_force_fixed_part == 1 && + !frame_is_intra_only(cm))) { // set a fixed-size partition av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size); BLOCK_SIZE bsize_select = sf->part_sf.fixed_partition_size; @@ -1054,8 +1054,13 @@ static AOM_INLINE bool is_calc_src_content_needed(AV1_COMP *cpi, // The threshold is determined based on kLowSad and kHighSad threshold and // test results. - const uint64_t thresh_low = 15000; - const uint64_t thresh_high = 40000; + uint64_t thresh_low = 15000; + uint64_t thresh_high = 40000; + + if (cpi->sf.rt_sf.increase_source_sad_thresh) { + thresh_low = thresh_low << 1; + thresh_high = thresh_high << 1; + } if (avg_64x64_blk_sad > thresh_low && avg_64x64_blk_sad < thresh_high) { do_calc_src_content = false; @@ -1203,6 +1208,7 @@ static AOM_INLINE void encode_sb_row(AV1_COMP *cpi, ThreadData *td, x->sb_me_block = 0; x->sb_me_partition = 0; x->sb_me_mv.as_int = 0; + x->sb_force_fixed_part = 1; if (cpi->oxcf.mode == ALLINTRA) { x->intra_sb_rdmult_modifier = 128; @@ -1231,7 +1237,7 @@ static AOM_INLINE void encode_sb_row(AV1_COMP *cpi, ThreadData *td, // Grade the temporal variation of the sb, the grade will be used to decide // fast mode search strategy for coding blocks - grade_source_content_sb(cpi, x, tile_data, mi_row, mi_col); + if (!seg_skip) grade_source_content_sb(cpi, x, tile_data, mi_row, mi_col); // encode the superblock if (use_nonrd_mode) { @@ -2337,7 +2343,7 @@ void av1_encode_frame(AV1_COMP *cpi) { // a source or a ref frame should have an image pyramid allocated. // Check here so that issues can be caught early in debug mode #if !defined(NDEBUG) && !CONFIG_REALTIME_ONLY - if (cpi->image_pyramid_levels > 0) { + if (cpi->alloc_pyramid) { assert(cpi->source->y_pyramid); for (int ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame); diff --git a/third_party/aom/av1/encoder/encodeframe_utils.c b/third_party/aom/av1/encoder/encodeframe_utils.c index 949837184a..a8e4a88396 100644 --- a/third_party/aom/av1/encoder/encodeframe_utils.c +++ b/third_party/aom/av1/encoder/encodeframe_utils.c @@ -15,6 +15,7 @@ #include "av1/encoder/encoder.h" #include "av1/encoder/encodeframe_utils.h" +#include "av1/encoder/encoder_utils.h" #include "av1/encoder/rdopt.h" void av1_set_ssim_rdmult(const AV1_COMP *const cpi, int *errorperbit, @@ -306,6 +307,7 @@ void av1_update_state(const AV1_COMP *const cpi, ThreadData *td, // Else for cyclic refresh mode update the segment map, set the segment id // and then update the quantizer. if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ && + mi_addr->segment_id != AM_SEGMENT_ID_INACTIVE && !cpi->rc.rtc_external_ratectrl) { av1_cyclic_refresh_update_segment(cpi, x, mi_row, mi_col, bsize, ctx->rd_stats.rate, ctx->rd_stats.dist, @@ -1431,6 +1433,10 @@ void av1_source_content_sb(AV1_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data, if ((tmp_sse - tmp_variance) < (sum_sq_thresh >> 1)) x->content_state_sb.low_sumdiff = 1; + if (tmp_sse > ((avg_source_sse_threshold_high * 7) >> 3) && + !x->content_state_sb.lighting_change && !x->content_state_sb.low_sumdiff) + x->sb_force_fixed_part = 0; + if (!cpi->sf.rt_sf.use_rtc_tf || cpi->rc.high_source_sad || cpi->rc.frame_source_sad > 20000 || cpi->svc.number_spatial_layers > 1) return; diff --git a/third_party/aom/av1/encoder/encoder.c b/third_party/aom/av1/encoder/encoder.c index fe053af5cc..1ddbfda08b 100644 --- a/third_party/aom/av1/encoder/encoder.c +++ b/third_party/aom/av1/encoder/encoder.c @@ -35,6 +35,7 @@ #include "aom_ports/aom_timer.h" #include "aom_ports/mem.h" #include "aom_scale/aom_scale.h" +#include "aom_util/aom_pthread.h" #if CONFIG_BITSTREAM_DEBUG #include "aom_util/debug_util.h" #endif // CONFIG_BITSTREAM_DEBUG @@ -152,24 +153,33 @@ int av1_set_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows, unsigned char *const active_map_4x4 = cpi->active_map.map; const int mi_rows = mi_params->mi_rows; const int mi_cols = mi_params->mi_cols; - const int row_scale = mi_size_high_log2[BLOCK_16X16]; - const int col_scale = mi_size_wide_log2[BLOCK_16X16]; cpi->active_map.update = 0; - assert(mi_rows % 2 == 0); - assert(mi_cols % 2 == 0); + cpi->rc.percent_blocks_inactive = 0; + assert(mi_rows % 2 == 0 && mi_rows > 0); + assert(mi_cols % 2 == 0 && mi_cols > 0); if (new_map_16x16) { - for (int r = 0; r < (mi_rows >> row_scale); ++r) { - for (int c = 0; c < (mi_cols >> col_scale); ++c) { - const uint8_t val = new_map_16x16[r * cols + c] + int num_samples = 0; + int num_blocks_inactive = 0; + for (int r = 0; r < mi_rows; r += 4) { + for (int c = 0; c < mi_cols; c += 4) { + const uint8_t val = new_map_16x16[(r >> 2) * cols + (c >> 2)] ? AM_SEGMENT_ID_ACTIVE : AM_SEGMENT_ID_INACTIVE; - active_map_4x4[(2 * r + 0) * mi_cols + (c + 0)] = val; - active_map_4x4[(2 * r + 0) * mi_cols + (c + 1)] = val; - active_map_4x4[(2 * r + 1) * mi_cols + (c + 0)] = val; - active_map_4x4[(2 * r + 1) * mi_cols + (c + 1)] = val; + num_samples++; + if (val == AM_SEGMENT_ID_INACTIVE) num_blocks_inactive++; + const int row_max = AOMMIN(4, mi_rows - r); + const int col_max = AOMMIN(4, mi_cols - c); + for (int x = 0; x < row_max; ++x) { + for (int y = 0; y < col_max; ++y) { + active_map_4x4[(r + x) * mi_cols + (c + y)] = val; + } + } } } cpi->active_map.enabled = 1; + cpi->active_map.update = 1; + cpi->rc.percent_blocks_inactive = + (num_blocks_inactive * 100) / num_samples; } return 0; } @@ -943,14 +953,9 @@ void av1_change_config(struct AV1_COMP *cpi, const AV1EncoderConfig *oxcf, #if CONFIG_REALTIME_ONLY assert(!oxcf->tool_cfg.enable_global_motion); - cpi->image_pyramid_levels = 0; + cpi->alloc_pyramid = false; #else - if (oxcf->tool_cfg.enable_global_motion) { - cpi->image_pyramid_levels = - global_motion_pyr_levels[default_global_motion_method]; - } else { - cpi->image_pyramid_levels = 0; - } + cpi->alloc_pyramid = oxcf->tool_cfg.enable_global_motion; #endif // CONFIG_REALTIME_ONLY } @@ -2208,7 +2213,7 @@ void av1_set_frame_size(AV1_COMP *cpi, int width, int height) { &cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL, - NULL, cpi->image_pyramid_levels, 0)) + NULL, cpi->alloc_pyramid, 0)) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate frame buffer"); @@ -2389,7 +2394,10 @@ static void loopfilter_frame(AV1_COMP *cpi, AV1_COMMON *cm) { const int use_loopfilter = is_loopfilter_used(cm) && !cpi->mt_info.pipeline_lpf_mt_with_enc; - const int use_cdef = is_cdef_used(cm); + const int use_cdef = + is_cdef_used(cm) && (!cpi->active_map.enabled || + cpi->rc.percent_blocks_inactive <= + cpi->sf.rt_sf.thresh_active_maps_skip_lf_cdef); const int use_superres = av1_superres_scaled(cm); const int use_restoration = is_restoration_used(cm); @@ -2498,7 +2506,8 @@ static int encode_without_recode(AV1_COMP *cpi) { &cpi->svc.source_last_TL0, cpi->oxcf.frm_dim_cfg.width, cpi->oxcf.frm_dim_cfg.height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, - cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0)) { + cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, + 0)) { aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate buffer for source_last_TL0"); } @@ -2547,7 +2556,7 @@ static int encode_without_recode(AV1_COMP *cpi) { cpi->source = av1_realloc_and_scale_if_required( cm, unscaled, &cpi->scaled_source, filter_scaler, phase_scaler, true, - false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels); + false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid); if (frame_is_intra_only(cm) || resize_pending != 0) { const int current_size = (cm->mi_params.mi_rows * cm->mi_params.mi_cols) >> 2; @@ -2570,7 +2579,7 @@ static int encode_without_recode(AV1_COMP *cpi) { cpi->last_source = av1_realloc_and_scale_if_required( cm, cpi->unscaled_last_source, &cpi->scaled_last_source, filter_scaler, phase_scaler, true, false, cpi->oxcf.border_in_pixels, - cpi->image_pyramid_levels); + cpi->alloc_pyramid); } if (cpi->sf.rt_sf.use_temporal_noise_estimate) { @@ -2647,12 +2656,8 @@ static int encode_without_recode(AV1_COMP *cpi) { av1_setup_frame(cpi); } } - - if (q_cfg->aq_mode == CYCLIC_REFRESH_AQ) { - suppress_active_map(cpi); - av1_cyclic_refresh_setup(cpi); - } av1_apply_active_map(cpi); + if (q_cfg->aq_mode == CYCLIC_REFRESH_AQ) av1_cyclic_refresh_setup(cpi); if (cm->seg.enabled) { if (!cm->seg.update_data && cm->prev_frame) { segfeatures_copy(&cm->seg, &cm->prev_frame->seg); @@ -2667,26 +2672,26 @@ static int encode_without_recode(AV1_COMP *cpi) { cm->cur_frame->seg.enabled = cm->seg.enabled; // This is for rtc temporal filtering case. - if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf && - cm->current_frame.frame_type != KEY_FRAME) { + if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf) { const SequenceHeader *seq_params = cm->seq_params; if (cpi->orig_source.buffer_alloc_sz == 0 || - cpi->last_source->y_width != cpi->source->y_width || - cpi->last_source->y_height != cpi->source->y_height) { + cpi->rc.prev_coded_width != cpi->oxcf.frm_dim_cfg.width || + cpi->rc.prev_coded_height != cpi->oxcf.frm_dim_cfg.height) { // Allocate a source buffer to store the true source for psnr calculation. if (aom_alloc_frame_buffer( &cpi->orig_source, cpi->oxcf.frm_dim_cfg.width, cpi->oxcf.frm_dim_cfg.height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, - cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0)) + cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, + 0)) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate scaled buffer"); } - aom_yv12_copy_y(cpi->source, &cpi->orig_source); - aom_yv12_copy_u(cpi->source, &cpi->orig_source); - aom_yv12_copy_v(cpi->source, &cpi->orig_source); + aom_yv12_copy_y(cpi->source, &cpi->orig_source, 1); + aom_yv12_copy_u(cpi->source, &cpi->orig_source, 1); + aom_yv12_copy_v(cpi->source, &cpi->orig_source, 1); } #if CONFIG_COLLECT_COMPONENT_TIMING @@ -2725,9 +2730,9 @@ static int encode_without_recode(AV1_COMP *cpi) { (cm->width != cpi->unscaled_source->y_crop_width || cm->height != cpi->unscaled_source->y_crop_height)) { cpi->scaled_last_source_available = 1; - aom_yv12_copy_y(&cpi->scaled_source, &cpi->scaled_last_source); - aom_yv12_copy_u(&cpi->scaled_source, &cpi->scaled_last_source); - aom_yv12_copy_v(&cpi->scaled_source, &cpi->scaled_last_source); + aom_yv12_copy_y(&cpi->scaled_source, &cpi->scaled_last_source, 1); + aom_yv12_copy_u(&cpi->scaled_source, &cpi->scaled_last_source, 1); + aom_yv12_copy_v(&cpi->scaled_source, &cpi->scaled_last_source, 1); } #if CONFIG_COLLECT_COMPONENT_TIMING @@ -2846,7 +2851,7 @@ static int encode_with_recode_loop(AV1_COMP *cpi, size_t *size, uint8_t *dest) { } cpi->source = av1_realloc_and_scale_if_required( cm, cpi->unscaled_source, &cpi->scaled_source, EIGHTTAP_REGULAR, 0, - false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels); + false, false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid); #if CONFIG_TUNE_BUTTERAUGLI if (oxcf->tune_cfg.tuning == AOM_TUNE_BUTTERAUGLI) { @@ -2866,7 +2871,7 @@ static int encode_with_recode_loop(AV1_COMP *cpi, size_t *size, uint8_t *dest) { cpi->last_source = av1_realloc_and_scale_if_required( cm, cpi->unscaled_last_source, &cpi->scaled_last_source, EIGHTTAP_REGULAR, 0, false, false, cpi->oxcf.border_in_pixels, - cpi->image_pyramid_levels); + cpi->alloc_pyramid); } int scale_references = 0; @@ -4042,7 +4047,7 @@ int av1_encode(AV1_COMP *const cpi, uint8_t *const dest, } #if CONFIG_DENOISE -static int apply_denoise_2d(AV1_COMP *cpi, YV12_BUFFER_CONFIG *sd, +static int apply_denoise_2d(AV1_COMP *cpi, const YV12_BUFFER_CONFIG *sd, int block_size, float noise_level, int64_t time_stamp, int64_t end_time) { AV1_COMMON *const cm = &cpi->common; @@ -4077,7 +4082,7 @@ static int apply_denoise_2d(AV1_COMP *cpi, YV12_BUFFER_CONFIG *sd, #endif int av1_receive_raw_frame(AV1_COMP *cpi, aom_enc_frame_flags_t frame_flags, - YV12_BUFFER_CONFIG *sd, int64_t time_stamp, + const YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time) { AV1_COMMON *const cm = &cpi->common; const SequenceHeader *const seq_params = cm->seq_params; @@ -4139,8 +4144,7 @@ int av1_receive_raw_frame(AV1_COMP *cpi, aom_enc_frame_flags_t frame_flags, #endif // CONFIG_DENOISE if (av1_lookahead_push(cpi->ppi->lookahead, sd, time_stamp, end_time, - use_highbitdepth, cpi->image_pyramid_levels, - frame_flags)) { + use_highbitdepth, cpi->alloc_pyramid, frame_flags)) { aom_set_error(cm->error, AOM_CODEC_ERROR, "av1_lookahead_push() failed"); res = -1; } diff --git a/third_party/aom/av1/encoder/encoder.h b/third_party/aom/av1/encoder/encoder.h index e87ab9be1f..4de5d426ce 100644 --- a/third_party/aom/av1/encoder/encoder.h +++ b/third_party/aom/av1/encoder/encoder.h @@ -21,6 +21,7 @@ #include "config/aom_config.h" #include "aom/aomcx.h" +#include "aom_util/aom_pthread.h" #include "av1/common/alloccommon.h" #include "av1/common/av1_common_int.h" @@ -3631,10 +3632,10 @@ typedef struct AV1_COMP { unsigned int zeromv_skip_thresh_exit_part[BLOCK_SIZES_ALL]; /*! - * Number of downsampling pyramid levels to allocate for each frame + * Should we allocate a downsampling pyramid for each frame buffer? * This is currently only used for global motion */ - int image_pyramid_levels; + bool alloc_pyramid; #if CONFIG_SALIENCY_MAP /*! @@ -3808,7 +3809,7 @@ int av1_init_parallel_frame_context(const AV1_COMP_DATA *const first_cpi_data, * copy of the pointer. */ int av1_receive_raw_frame(AV1_COMP *cpi, aom_enc_frame_flags_t frame_flags, - YV12_BUFFER_CONFIG *sd, int64_t time_stamp, + const YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time_stamp); /*!\brief Encode a frame @@ -4310,7 +4311,7 @@ static AOM_INLINE int is_psnr_calc_enabled(const AV1_COMP *cpi) { const AV1_COMMON *const cm = &cpi->common; return cpi->ppi->b_calculate_psnr && !is_stat_generation_stage(cpi) && - cm->show_frame; + cm->show_frame && !cpi->is_dropped_frame; } static INLINE int is_frame_resize_pending(const AV1_COMP *const cpi) { diff --git a/third_party/aom/av1/encoder/encoder_alloc.h b/third_party/aom/av1/encoder/encoder_alloc.h index ce48496d48..f24d4b0a10 100644 --- a/third_party/aom/av1/encoder/encoder_alloc.h +++ b/third_party/aom/av1/encoder/encoder_alloc.h @@ -439,8 +439,7 @@ static AOM_INLINE YV12_BUFFER_CONFIG *realloc_and_scale_source( &cpi->scaled_source, scaled_width, scaled_height, cm->seq_params->subsampling_x, cm->seq_params->subsampling_y, cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS, - cm->features.byte_alignment, NULL, NULL, NULL, - cpi->image_pyramid_levels, 0)) + cm->features.byte_alignment, NULL, NULL, NULL, cpi->alloc_pyramid, 0)) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to reallocate scaled source buffer"); assert(cpi->scaled_source.y_crop_width == scaled_width); diff --git a/third_party/aom/av1/encoder/encoder_utils.c b/third_party/aom/av1/encoder/encoder_utils.c index c35873d207..1f81a530c9 100644 --- a/third_party/aom/av1/encoder/encoder_utils.c +++ b/third_party/aom/av1/encoder/encoder_utils.c @@ -9,8 +9,11 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ +#include + #include "aom/aomcx.h" +#include "av1/common/av1_common_int.h" #include "av1/encoder/bitstream.h" #include "av1/encoder/encodeframe.h" #include "av1/encoder/encoder.h" @@ -421,11 +424,13 @@ void av1_apply_active_map(AV1_COMP *cpi) { struct segmentation *const seg = &cpi->common.seg; unsigned char *const seg_map = cpi->enc_seg.map; const unsigned char *const active_map = cpi->active_map.map; - int i; assert(AM_SEGMENT_ID_ACTIVE == CR_SEGMENT_ID_BASE); - if (frame_is_intra_only(&cpi->common)) { + // Disable the active_maps on intra_only frames or if the + // input map for the current frame has no inactive blocks. + if (frame_is_intra_only(&cpi->common) || + cpi->rc.percent_blocks_inactive == 0) { cpi->active_map.enabled = 0; cpi->active_map.update = 1; } @@ -434,8 +439,7 @@ void av1_apply_active_map(AV1_COMP *cpi) { if (cpi->active_map.enabled) { const int num_mis = cpi->common.mi_params.mi_rows * cpi->common.mi_params.mi_cols; - for (i = 0; i < num_mis; ++i) - if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i]; + memcpy(seg_map, active_map, sizeof(active_map[0]) * num_mis); av1_enable_segmentation(seg); av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP); av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF_Y_H); @@ -725,7 +729,7 @@ void av1_scale_references(AV1_COMP *cpi, const InterpFilter filter, RefCntBuffer *ref_fb = get_ref_frame_buf(cm, ref_frame); if (aom_yv12_realloc_with_new_border( &ref_fb->buf, AOM_BORDER_IN_PIXELS, - cm->features.byte_alignment, cpi->image_pyramid_levels, + cm->features.byte_alignment, cpi->alloc_pyramid, num_planes) != 0) { aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate frame buffer"); @@ -749,7 +753,7 @@ void av1_scale_references(AV1_COMP *cpi, const InterpFilter filter, &new_fb->buf, cm->width, cm->height, cm->seq_params->subsampling_x, cm->seq_params->subsampling_y, cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS, - cm->features.byte_alignment, NULL, NULL, NULL, 0, 0)) { + cm->features.byte_alignment, NULL, NULL, NULL, false, 0)) { if (force_scaling) { // Release the reference acquired in the get_free_fb() call above. --new_fb->ref_count; @@ -1087,12 +1091,12 @@ void av1_determine_sc_tools_with_encoding(AV1_COMP *cpi, const int q_orig) { cpi->source = av1_realloc_and_scale_if_required( cm, cpi->unscaled_source, &cpi->scaled_source, cm->features.interp_filter, - 0, false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels); + 0, false, false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid); if (cpi->unscaled_last_source != NULL) { cpi->last_source = av1_realloc_and_scale_if_required( cm, cpi->unscaled_last_source, &cpi->scaled_last_source, cm->features.interp_filter, 0, false, false, cpi->oxcf.border_in_pixels, - cpi->image_pyramid_levels); + cpi->alloc_pyramid); } av1_setup_frame(cpi); diff --git a/third_party/aom/av1/encoder/encodetxb.c b/third_party/aom/av1/encoder/encodetxb.c index 5fe2a497c7..701c5489fe 100644 --- a/third_party/aom/av1/encoder/encodetxb.c +++ b/third_party/aom/av1/encoder/encodetxb.c @@ -134,14 +134,14 @@ int av1_get_eob_pos_token(const int eob, int *const extra) { } #if CONFIG_ENTROPY_STATS -void av1_update_eob_context(int cdf_idx, int eob, TX_SIZE tx_size, - TX_CLASS tx_class, PLANE_TYPE plane, - FRAME_CONTEXT *ec_ctx, FRAME_COUNTS *counts, - uint8_t allow_update_cdf) { +static void update_eob_context(int cdf_idx, int eob, TX_SIZE tx_size, + TX_CLASS tx_class, PLANE_TYPE plane, + FRAME_CONTEXT *ec_ctx, FRAME_COUNTS *counts, + uint8_t allow_update_cdf) { #else -void av1_update_eob_context(int eob, TX_SIZE tx_size, TX_CLASS tx_class, - PLANE_TYPE plane, FRAME_CONTEXT *ec_ctx, - uint8_t allow_update_cdf) { +static void update_eob_context(int eob, TX_SIZE tx_size, TX_CLASS tx_class, + PLANE_TYPE plane, FRAME_CONTEXT *ec_ctx, + uint8_t allow_update_cdf) { #endif int eob_extra; const int eob_pt = av1_get_eob_pos_token(eob, &eob_extra); @@ -623,11 +623,11 @@ void av1_update_and_record_txb_context(int plane, int block, int blk_row, td->rd_counts.tx_type_used[tx_size][tx_type]++; #if CONFIG_ENTROPY_STATS - av1_update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx, - td->counts, allow_update_cdf); + update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx, + td->counts, allow_update_cdf); #else - av1_update_eob_context(eob, tx_size, tx_class, plane_type, ec_ctx, - allow_update_cdf); + update_eob_context(eob, tx_size, tx_class, plane_type, ec_ctx, + allow_update_cdf); #endif DECLARE_ALIGNED(16, int8_t, coeff_contexts[MAX_TX_SQUARE]); @@ -785,8 +785,8 @@ void av1_record_txb_context(int plane, int block, int blk_row, int blk_col, #if CONFIG_ENTROPY_STATS FRAME_CONTEXT *ec_ctx = xd->tile_ctx; - av1_update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx, - td->counts, 0 /*allow_update_cdf*/); + update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx, + td->counts, 0 /*allow_update_cdf*/); DECLARE_ALIGNED(16, int8_t, coeff_contexts[MAX_TX_SQUARE]); av1_get_nz_map_contexts(levels, scan, eob, tx_size, tx_class, diff --git a/third_party/aom/av1/encoder/ethread.c b/third_party/aom/av1/encoder/ethread.c index d6a806d504..755535ba51 100644 --- a/third_party/aom/av1/encoder/ethread.c +++ b/third_party/aom/av1/encoder/ethread.c @@ -12,6 +12,8 @@ #include #include +#include "aom_util/aom_pthread.h" + #include "av1/common/warped_motion.h" #include "av1/common/thread_common.h" @@ -1415,7 +1417,7 @@ static AOM_INLINE void sync_fpmt_workers(AV1_PRIMARY *ppi, int num_workers = ppi->p_mt_info.p_num_workers; int had_error = 0; // Points to error in the earliest display order frame in the parallel set. - const struct aom_internal_error_info *error; + const struct aom_internal_error_info *error = NULL; // Encoding ends. for (int i = num_workers - 1; i >= 0; --i) { @@ -2227,8 +2229,8 @@ void av1_tpl_dealloc(AV1TplRowMultiThreadSync *tpl_sync) { } // Allocate memory for tpl row synchronization. -void av1_tpl_alloc(AV1TplRowMultiThreadSync *tpl_sync, AV1_COMMON *cm, - int mb_rows) { +static void av1_tpl_alloc(AV1TplRowMultiThreadSync *tpl_sync, AV1_COMMON *cm, + int mb_rows) { tpl_sync->rows = mb_rows; #if CONFIG_MULTITHREAD { diff --git a/third_party/aom/av1/encoder/firstpass.c b/third_party/aom/av1/encoder/firstpass.c index e20b6c177e..b94a50714a 100644 --- a/third_party/aom/av1/encoder/firstpass.c +++ b/third_party/aom/av1/encoder/firstpass.c @@ -22,6 +22,7 @@ #include "aom_ports/mem.h" #include "aom_scale/aom_scale.h" #include "aom_scale/yv12config.h" +#include "aom_util/aom_pthread.h" #include "av1/common/entropymv.h" #include "av1/common/quant_common.h" diff --git a/third_party/aom/av1/encoder/global_motion.c b/third_party/aom/av1/encoder/global_motion.c index 73910de121..0ae47809c6 100644 --- a/third_party/aom/av1/encoder/global_motion.c +++ b/third_party/aom/av1/encoder/global_motion.c @@ -30,83 +30,6 @@ // Border over which to compute the global motion #define ERRORADV_BORDER 0 -/* clang-format off */ -// Error metric used for global motion evaluation. -// For 8-bit input, the pixel error used to index this table will always -// be between -255 and +255. But for 10- and 12-bit input, we use interpolation -// which means that we need to support indices of -256 and +256 as well. -// Therefore, the table is offset so that logical index 0 corresponds to -// error_measure_lut[256]. -const int error_measure_lut[513] = { - // pow 0.7 - 16384, 16384, 16339, 16294, 16249, 16204, 16158, 16113, - 16068, 16022, 15977, 15932, 15886, 15840, 15795, 15749, - 15703, 15657, 15612, 15566, 15520, 15474, 15427, 15381, - 15335, 15289, 15242, 15196, 15149, 15103, 15056, 15010, - 14963, 14916, 14869, 14822, 14775, 14728, 14681, 14634, - 14587, 14539, 14492, 14445, 14397, 14350, 14302, 14254, - 14206, 14159, 14111, 14063, 14015, 13967, 13918, 13870, - 13822, 13773, 13725, 13676, 13628, 13579, 13530, 13481, - 13432, 13383, 13334, 13285, 13236, 13187, 13137, 13088, - 13038, 12988, 12939, 12889, 12839, 12789, 12739, 12689, - 12639, 12588, 12538, 12487, 12437, 12386, 12335, 12285, - 12234, 12183, 12132, 12080, 12029, 11978, 11926, 11875, - 11823, 11771, 11719, 11667, 11615, 11563, 11511, 11458, - 11406, 11353, 11301, 11248, 11195, 11142, 11089, 11036, - 10982, 10929, 10875, 10822, 10768, 10714, 10660, 10606, - 10552, 10497, 10443, 10388, 10333, 10279, 10224, 10168, - 10113, 10058, 10002, 9947, 9891, 9835, 9779, 9723, - 9666, 9610, 9553, 9497, 9440, 9383, 9326, 9268, - 9211, 9153, 9095, 9037, 8979, 8921, 8862, 8804, - 8745, 8686, 8627, 8568, 8508, 8449, 8389, 8329, - 8269, 8208, 8148, 8087, 8026, 7965, 7903, 7842, - 7780, 7718, 7656, 7593, 7531, 7468, 7405, 7341, - 7278, 7214, 7150, 7086, 7021, 6956, 6891, 6826, - 6760, 6695, 6628, 6562, 6495, 6428, 6361, 6293, - 6225, 6157, 6089, 6020, 5950, 5881, 5811, 5741, - 5670, 5599, 5527, 5456, 5383, 5311, 5237, 5164, - 5090, 5015, 4941, 4865, 4789, 4713, 4636, 4558, - 4480, 4401, 4322, 4242, 4162, 4080, 3998, 3916, - 3832, 3748, 3663, 3577, 3490, 3402, 3314, 3224, - 3133, 3041, 2948, 2854, 2758, 2661, 2562, 2461, - 2359, 2255, 2148, 2040, 1929, 1815, 1698, 1577, - 1452, 1323, 1187, 1045, 894, 731, 550, 339, - 0, 339, 550, 731, 894, 1045, 1187, 1323, - 1452, 1577, 1698, 1815, 1929, 2040, 2148, 2255, - 2359, 2461, 2562, 2661, 2758, 2854, 2948, 3041, - 3133, 3224, 3314, 3402, 3490, 3577, 3663, 3748, - 3832, 3916, 3998, 4080, 4162, 4242, 4322, 4401, - 4480, 4558, 4636, 4713, 4789, 4865, 4941, 5015, - 5090, 5164, 5237, 5311, 5383, 5456, 5527, 5599, - 5670, 5741, 5811, 5881, 5950, 6020, 6089, 6157, - 6225, 6293, 6361, 6428, 6495, 6562, 6628, 6695, - 6760, 6826, 6891, 6956, 7021, 7086, 7150, 7214, - 7278, 7341, 7405, 7468, 7531, 7593, 7656, 7718, - 7780, 7842, 7903, 7965, 8026, 8087, 8148, 8208, - 8269, 8329, 8389, 8449, 8508, 8568, 8627, 8686, - 8745, 8804, 8862, 8921, 8979, 9037, 9095, 9153, - 9211, 9268, 9326, 9383, 9440, 9497, 9553, 9610, - 9666, 9723, 9779, 9835, 9891, 9947, 10002, 10058, - 10113, 10168, 10224, 10279, 10333, 10388, 10443, 10497, - 10552, 10606, 10660, 10714, 10768, 10822, 10875, 10929, - 10982, 11036, 11089, 11142, 11195, 11248, 11301, 11353, - 11406, 11458, 11511, 11563, 11615, 11667, 11719, 11771, - 11823, 11875, 11926, 11978, 12029, 12080, 12132, 12183, - 12234, 12285, 12335, 12386, 12437, 12487, 12538, 12588, - 12639, 12689, 12739, 12789, 12839, 12889, 12939, 12988, - 13038, 13088, 13137, 13187, 13236, 13285, 13334, 13383, - 13432, 13481, 13530, 13579, 13628, 13676, 13725, 13773, - 13822, 13870, 13918, 13967, 14015, 14063, 14111, 14159, - 14206, 14254, 14302, 14350, 14397, 14445, 14492, 14539, - 14587, 14634, 14681, 14728, 14775, 14822, 14869, 14916, - 14963, 15010, 15056, 15103, 15149, 15196, 15242, 15289, - 15335, 15381, 15427, 15474, 15520, 15566, 15612, 15657, - 15703, 15749, 15795, 15840, 15886, 15932, 15977, 16022, - 16068, 16113, 16158, 16204, 16249, 16294, 16339, 16384, - 16384, -}; -/* clang-format on */ - int av1_is_enough_erroradvantage(double best_erroradvantage, int params_cost) { return best_erroradvantage < erroradv_tr && best_erroradvantage * params_cost < erroradv_prod_tr; @@ -541,6 +464,11 @@ int64_t av1_refine_integerized_param( } wm->wmtype = get_wmtype(wm); + // Recompute shear params for the refined model + // This should never fail, because we only ever consider warp-able models + if (!av1_get_shear_params(wm)) { + assert(0); + } return best_error; } diff --git a/third_party/aom/av1/encoder/global_motion.h b/third_party/aom/av1/encoder/global_motion.h index 8c9c60f0f5..de46a0e1f2 100644 --- a/third_party/aom/av1/encoder/global_motion.h +++ b/third_party/aom/av1/encoder/global_motion.h @@ -15,6 +15,7 @@ #include "aom/aom_integer.h" #include "aom_dsp/flow_estimation/flow_estimation.h" #include "aom_scale/yv12config.h" +#include "aom_util/aom_pthread.h" #include "aom_util/aom_thread.h" #ifdef __cplusplus @@ -97,37 +98,6 @@ void av1_compute_feature_segmentation_map(uint8_t *segment_map, int width, int height, int *inliers, int num_inliers); -extern const int error_measure_lut[513]; - -static INLINE int error_measure(int err) { - return error_measure_lut[256 + err]; -} - -#if CONFIG_AV1_HIGHBITDEPTH -static INLINE int highbd_error_measure(int err, int bd) { - const int b = bd - 8; - const int bmask = (1 << b) - 1; - const int v = (1 << b); - - // Split error into two parts and do an interpolated table lookup - // To compute the table index and interpolation value, we want to calculate - // the quotient and remainder of err / 2^b. But it is very important that - // the division must round down, and the remainder must be positive, - // ie. in the range [0, 2^b). - // - // In C, the >> and & operators do what we want, but the / and % operators - // give the wrong results for negative inputs. So we must use >> and & here. - // - // For example, if bd == 10 and err == -5, compare the results: - // (-5) >> 2 = -2, (-5) & 3 = 3 - // vs. (-5) / 4 = -1, (-5) % 4 = -1 - const int e1 = err >> b; - const int e2 = err & bmask; - return error_measure_lut[256 + e1] * (v - e2) + - error_measure_lut[257 + e1] * e2; -} -#endif // CONFIG_AV1_HIGHBITDEPTH - int64_t av1_segmented_frame_error(int use_hbd, int bd, const uint8_t *ref, int ref_stride, uint8_t *dst, int dst_stride, int p_width, int p_height, diff --git a/third_party/aom/av1/encoder/global_motion_facade.c b/third_party/aom/av1/encoder/global_motion_facade.c index 02a4e70ed3..687eeee18a 100644 --- a/third_party/aom/av1/encoder/global_motion_facade.c +++ b/third_party/aom/av1/encoder/global_motion_facade.c @@ -89,6 +89,7 @@ static AOM_INLINE void compute_global_motion_for_ref_frame( assert(ref_buf[frame] != NULL); int bit_depth = cpi->common.seq_params->bit_depth; GlobalMotionMethod global_motion_method = default_global_motion_method; + int downsample_level = cpi->sf.gm_sf.downsample_level; int num_refinements = cpi->sf.gm_sf.num_refinement_steps; bool mem_alloc_failed = false; @@ -99,9 +100,10 @@ static AOM_INLINE void compute_global_motion_for_ref_frame( double best_erroradv = erroradv_tr; for (TransformationType model = FIRST_GLOBAL_TRANS_TYPE; model <= LAST_GLOBAL_TRANS_TYPE; ++model) { - if (!aom_compute_global_motion( - model, cpi->source, ref_buf[frame], bit_depth, global_motion_method, - motion_models, RANSAC_NUM_MOTIONS, &mem_alloc_failed)) { + if (!aom_compute_global_motion(model, cpi->source, ref_buf[frame], + bit_depth, global_motion_method, + downsample_level, motion_models, + RANSAC_NUM_MOTIONS, &mem_alloc_failed)) { if (mem_alloc_failed) { aom_internal_error(error_info, AOM_CODEC_MEM_ERROR, "Failed to allocate global motion buffers"); @@ -115,6 +117,9 @@ static AOM_INLINE void compute_global_motion_for_ref_frame( WarpedMotionParams tmp_wm_params; av1_convert_model_to_params(motion_models[i].params, &tmp_wm_params); + // Check that the generated model is warp-able + if (!av1_get_shear_params(&tmp_wm_params)) continue; + // Skip models that we won't use (IDENTITY or TRANSLATION) // // For IDENTITY type models, we don't need to evaluate anything because @@ -151,6 +156,14 @@ static AOM_INLINE void compute_global_motion_for_ref_frame( double erroradvantage = (double)warp_error / ref_frame_error; + // Check that the model signaling cost is not too high + if (!av1_is_enough_erroradvantage( + erroradvantage, + gm_get_params_cost(&tmp_wm_params, ref_params, + cm->features.allow_high_precision_mv))) { + continue; + } + if (erroradvantage < best_erroradv) { best_erroradv = erroradvantage; // Save the wm_params modified by @@ -161,34 +174,6 @@ static AOM_INLINE void compute_global_motion_for_ref_frame( } } } - - if (!av1_get_shear_params(&cm->global_motion[frame])) - cm->global_motion[frame] = default_warp_params; - -#if 0 - // We never choose translational models, so this code is disabled - if (cm->global_motion[frame].wmtype == TRANSLATION) { - cm->global_motion[frame].wmmat[0] = - convert_to_trans_prec(cm->features.allow_high_precision_mv, - cm->global_motion[frame].wmmat[0]) * - GM_TRANS_ONLY_DECODE_FACTOR; - cm->global_motion[frame].wmmat[1] = - convert_to_trans_prec(cm->features.allow_high_precision_mv, - cm->global_motion[frame].wmmat[1]) * - GM_TRANS_ONLY_DECODE_FACTOR; - } -#endif - - if (cm->global_motion[frame].wmtype == IDENTITY) return; - - // If the best error advantage found doesn't meet the threshold for - // this motion type, revert to IDENTITY. - if (!av1_is_enough_erroradvantage( - best_erroradv, - gm_get_params_cost(&cm->global_motion[frame], ref_params, - cm->features.allow_high_precision_mv))) { - cm->global_motion[frame] = default_warp_params; - } } // Computes global motion for the given reference frame. diff --git a/third_party/aom/av1/encoder/k_means_template.h b/third_party/aom/av1/encoder/k_means_template.h index 4be2038a6f..239029345d 100644 --- a/third_party/aom/av1/encoder/k_means_template.h +++ b/third_party/aom/av1/encoder/k_means_template.h @@ -24,6 +24,9 @@ #define RENAME_(x, y) AV1_K_MEANS_RENAME(x, y) #define RENAME(x) RENAME_(x, AV1_K_MEANS_DIM) +#define K_MEANS_RENAME_C(x, y) x##_dim##y##_c +#define RENAME_C_(x, y) K_MEANS_RENAME_C(x, y) +#define RENAME_C(x) RENAME_C_(x, AV1_K_MEANS_DIM) // Though we want to compute the smallest L2 norm, in 1 dimension, // it is equivalent to find the smallest L1 norm and then square it. @@ -41,8 +44,8 @@ static int RENAME(calc_dist)(const int16_t *p1, const int16_t *p2) { #endif } -void RENAME(av1_calc_indices)(const int16_t *data, const int16_t *centroids, - uint8_t *indices, int64_t *dist, int n, int k) { +void RENAME_C(av1_calc_indices)(const int16_t *data, const int16_t *centroids, + uint8_t *indices, int64_t *dist, int n, int k) { if (dist) { *dist = 0; } @@ -149,3 +152,6 @@ void RENAME(av1_k_means)(const int16_t *data, int16_t *centroids, } #undef RENAME_ #undef RENAME +#undef K_MEANS_RENAME_C +#undef RENAME_C_ +#undef RENAME_C diff --git a/third_party/aom/av1/encoder/lookahead.c b/third_party/aom/av1/encoder/lookahead.c index 9ef9b88675..476c91ab95 100644 --- a/third_party/aom/av1/encoder/lookahead.c +++ b/third_party/aom/av1/encoder/lookahead.c @@ -46,7 +46,7 @@ struct lookahead_ctx *av1_lookahead_init( unsigned int width, unsigned int height, unsigned int subsampling_x, unsigned int subsampling_y, int use_highbitdepth, unsigned int depth, const int border_in_pixels, int byte_alignment, int num_lap_buffers, - bool is_all_intra, int num_pyramid_levels) { + bool is_all_intra, bool alloc_pyramid) { int lag_in_frames = AOMMAX(1, depth); // For all-intra frame encoding, previous source frames are not required. @@ -82,7 +82,7 @@ struct lookahead_ctx *av1_lookahead_init( if (aom_realloc_frame_buffer( &ctx->buf[i].img, width, height, subsampling_x, subsampling_y, use_highbitdepth, border_in_pixels, byte_alignment, NULL, NULL, - NULL, num_pyramid_levels, 0)) { + NULL, alloc_pyramid, 0)) { goto fail; } } @@ -100,7 +100,7 @@ int av1_lookahead_full(const struct lookahead_ctx *ctx) { int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src, int64_t ts_start, int64_t ts_end, int use_highbitdepth, - int num_pyramid_levels, aom_enc_frame_flags_t flags) { + bool alloc_pyramid, aom_enc_frame_flags_t flags) { int width = src->y_crop_width; int height = src->y_crop_height; int uv_width = src->uv_crop_width; @@ -124,9 +124,9 @@ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src, height != buf->img.y_crop_height || uv_width != buf->img.uv_crop_width || uv_height != buf->img.uv_crop_height; - larger_dimensions = width > buf->img.y_width || height > buf->img.y_height || - uv_width > buf->img.uv_width || - uv_height > buf->img.uv_height; + larger_dimensions = + width > buf->img.y_crop_width || height > buf->img.y_crop_height || + uv_width > buf->img.uv_crop_width || uv_height > buf->img.uv_crop_height; assert(!larger_dimensions || new_dimensions); if (larger_dimensions) { @@ -134,11 +134,15 @@ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src, memset(&new_img, 0, sizeof(new_img)); if (aom_alloc_frame_buffer(&new_img, width, height, subsampling_x, subsampling_y, use_highbitdepth, - AOM_BORDER_IN_PIXELS, 0, num_pyramid_levels, 0)) + AOM_BORDER_IN_PIXELS, 0, alloc_pyramid, 0)) return 1; aom_free_frame_buffer(&buf->img); buf->img = new_img; } else if (new_dimensions) { + buf->img.y_width = src->y_width; + buf->img.y_height = src->y_height; + buf->img.uv_width = src->uv_width; + buf->img.uv_height = src->uv_height; buf->img.y_crop_width = src->y_crop_width; buf->img.y_crop_height = src->y_crop_height; buf->img.uv_crop_width = src->uv_crop_width; @@ -146,7 +150,6 @@ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src, buf->img.subsampling_x = src->subsampling_x; buf->img.subsampling_y = src->subsampling_y; } - // Partial copy not implemented yet av1_copy_and_extend_frame(src, &buf->img); buf->ts_start = ts_start; diff --git a/third_party/aom/av1/encoder/lookahead.h b/third_party/aom/av1/encoder/lookahead.h index c0e6d222f5..41eca87fa3 100644 --- a/third_party/aom/av1/encoder/lookahead.h +++ b/third_party/aom/av1/encoder/lookahead.h @@ -70,7 +70,7 @@ struct lookahead_ctx *av1_lookahead_init( unsigned int width, unsigned int height, unsigned int subsampling_x, unsigned int subsampling_y, int use_highbitdepth, unsigned int depth, const int border_in_pixels, int byte_alignment, int num_lap_buffers, - bool is_all_intra, int num_pyramid_levels); + bool is_all_intra, bool alloc_pyramid); /**\brief Destroys the lookahead stage */ @@ -85,18 +85,18 @@ int av1_lookahead_full(const struct lookahead_ctx *ctx); * This function will copy the source image into a new framebuffer with * the expected stride/border. * - * \param[in] ctx Pointer to the lookahead context - * \param[in] src Pointer to the image to enqueue - * \param[in] ts_start Timestamp for the start of this frame - * \param[in] ts_end Timestamp for the end of this frame - * \param[in] use_highbitdepth Tell if HBD is used - * \param[in] num_pyramid_levels Number of pyramid levels to allocate - for each frame buffer - * \param[in] flags Flags set on this frame + * \param[in] ctx Pointer to the lookahead context + * \param[in] src Pointer to the image to enqueue + * \param[in] ts_start Timestamp for the start of this frame + * \param[in] ts_end Timestamp for the end of this frame + * \param[in] use_highbitdepth Tell if HBD is used + * \param[in] alloc_pyramid Whether to allocate a downsampling pyramid + * for each frame buffer + * \param[in] flags Flags set on this frame */ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src, int64_t ts_start, int64_t ts_end, int use_highbitdepth, - int num_pyramid_levels, aom_enc_frame_flags_t flags); + bool alloc_pyramid, aom_enc_frame_flags_t flags); /**\brief Get the next source buffer to encode * diff --git a/third_party/aom/av1/encoder/nonrd_pickmode.c b/third_party/aom/av1/encoder/nonrd_pickmode.c index f939b6d1fa..57c74f66d5 100644 --- a/third_party/aom/av1/encoder/nonrd_pickmode.c +++ b/third_party/aom/av1/encoder/nonrd_pickmode.c @@ -2357,6 +2357,10 @@ static AOM_FORCE_INLINE bool skip_inter_mode_nonrd( *ref_frame2 = NONE_FRAME; } + if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP) && + (*this_mode != GLOBALMV || *ref_frame != LAST_FRAME)) + return true; + if (x->sb_me_block && *ref_frame == LAST_FRAME) { // We want to make sure to test the superblock MV: // so don't skip (return false) for NEAREST_LAST or NEAR_LAST if they @@ -3241,7 +3245,8 @@ void av1_nonrd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data, inter_pred_params_sr.conv_params = get_conv_params(/*do_average=*/0, AOM_PLANE_Y, xd->bd); - x->block_is_zero_sad = x->content_state_sb.source_sad_nonrd == kZeroSad; + x->block_is_zero_sad = x->content_state_sb.source_sad_nonrd == kZeroSad || + segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP); if (cpi->oxcf.tune_cfg.content == AOM_CONTENT_SCREEN && !x->force_zeromv_skip_for_blk && x->content_state_sb.source_sad_nonrd != kZeroSad && diff --git a/third_party/aom/av1/encoder/palette.c b/third_party/aom/av1/encoder/palette.c index 7f79e9596e..45b56199c6 100644 --- a/third_party/aom/av1/encoder/palette.c +++ b/third_party/aom/av1/encoder/palette.c @@ -480,7 +480,7 @@ struct ColorCount { int count; }; -int color_count_comp(const void *c1, const void *c2) { +static int color_count_comp(const void *c1, const void *c2) { const struct ColorCount *color_count1 = (const struct ColorCount *)c1; const struct ColorCount *color_count2 = (const struct ColorCount *)c2; if (color_count1->count > color_count2->count) return -1; diff --git a/third_party/aom/av1/encoder/palette.h b/third_party/aom/av1/encoder/palette.h index 7da863a0cc..30886d37ae 100644 --- a/third_party/aom/av1/encoder/palette.h +++ b/third_party/aom/av1/encoder/palette.h @@ -26,7 +26,7 @@ struct PICK_MODE_CONTEXT; struct macroblock; /*!\cond */ -#define AV1_K_MEANS_RENAME(func, dim) func##_dim##dim##_c +#define AV1_K_MEANS_RENAME(func, dim) func##_dim##dim void AV1_K_MEANS_RENAME(av1_k_means, 1)(const int16_t *data, int16_t *centroids, uint8_t *indices, int n, int k, diff --git a/third_party/aom/av1/encoder/partition_search.c b/third_party/aom/av1/encoder/partition_search.c index 1c17b09ee1..61d49a23f2 100644 --- a/third_party/aom/av1/encoder/partition_search.c +++ b/third_party/aom/av1/encoder/partition_search.c @@ -2144,8 +2144,9 @@ static void encode_b_nonrd(const AV1_COMP *const cpi, TileDataEnc *tile_data, } if (tile_data->allow_update_cdf) update_stats(&cpi->common, td); } - if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ && mbmi->skip_txfm && - !cpi->rc.rtc_external_ratectrl && cm->seg.enabled) + if ((cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ || + cpi->active_map.enabled) && + mbmi->skip_txfm && !cpi->rc.rtc_external_ratectrl && cm->seg.enabled) av1_cyclic_reset_segment_skip(cpi, x, mi_row, mi_col, bsize, dry_run); // TODO(Ravi/Remya): Move this copy function to a better logical place // This function will copy the best mode information from block @@ -2254,6 +2255,8 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data, const AQ_MODE aq_mode = cpi->oxcf.q_cfg.aq_mode; TxfmSearchInfo *txfm_info = &x->txfm_search_info; int i; + const int seg_skip = + segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP); // This is only needed for real time/allintra row-mt enabled multi-threaded // encoding with cost update frequency set to COST_UPD_TILE/COST_UPD_OFF. @@ -2276,15 +2279,17 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data, } for (i = 0; i < 2; ++i) pd[i].color_index_map = ctx->color_index_map[i]; - x->force_zeromv_skip_for_blk = - get_force_zeromv_skip_flag_for_blk(cpi, x, bsize); + if (!seg_skip) { + x->force_zeromv_skip_for_blk = + get_force_zeromv_skip_flag_for_blk(cpi, x, bsize); - // Source variance may be already compute at superblock level, so no need - // to recompute, unless bsize < sb_size or source_variance is not yet set. - if (!x->force_zeromv_skip_for_blk && - (x->source_variance == UINT_MAX || bsize < cm->seq_params->sb_size)) - x->source_variance = av1_get_perpixel_variance_facade( - cpi, xd, &x->plane[0].src, bsize, AOM_PLANE_Y); + // Source variance may be already compute at superblock level, so no need + // to recompute, unless bsize < sb_size or source_variance is not yet set. + if (!x->force_zeromv_skip_for_blk && + (x->source_variance == UINT_MAX || bsize < cm->seq_params->sb_size)) + x->source_variance = av1_get_perpixel_variance_facade( + cpi, xd, &x->plane[0].src, bsize, AOM_PLANE_Y); + } // Save rdmult before it might be changed, so it can be restored later. const int orig_rdmult = x->rdmult; @@ -2305,16 +2310,13 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data, #if CONFIG_COLLECT_COMPONENT_TIMING start_timing(cpi, nonrd_pick_inter_mode_sb_time); #endif - if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { - RD_STATS invalid_rd; - av1_invalid_rd_stats(&invalid_rd); - // TODO(kyslov): add av1_nonrd_pick_inter_mode_sb_seg_skip - av1_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, mi_row, mi_col, - rd_cost, bsize, ctx, - invalid_rd.rdcost); - } else { - av1_nonrd_pick_inter_mode_sb(cpi, tile_data, x, rd_cost, bsize, ctx); + if (seg_skip) { + x->force_zeromv_skip_for_blk = 1; + // TODO(marpan): Consider adding a function for nonrd: + // av1_nonrd_pick_inter_mode_sb_seg_skip(), instead of setting + // x->force_zeromv_skip flag and entering av1_nonrd_pick_inter_mode_sb(). } + av1_nonrd_pick_inter_mode_sb(cpi, tile_data, x, rd_cost, bsize, ctx); #if CONFIG_COLLECT_COMPONENT_TIMING end_timing(cpi, nonrd_pick_inter_mode_sb_time); #endif @@ -2322,10 +2324,12 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data, if (cpi->sf.rt_sf.skip_cdef_sb) { // cdef_strength is initialized to 1 which means skip_cdef, and is updated // here. Check to see is skipping cdef is allowed. + // Always allow cdef_skip for seg_skip = 1. const int allow_cdef_skipping = - cpi->rc.frames_since_key > 10 && !cpi->rc.high_source_sad && - !(x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_U)] || - x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_V)]); + seg_skip || + (cpi->rc.frames_since_key > 10 && !cpi->rc.high_source_sad && + !(x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_U)] || + x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_V)])); // Find the corresponding 64x64 block. It'll be the 128x128 block if that's // the block size. diff --git a/third_party/aom/av1/encoder/partition_strategy.c b/third_party/aom/av1/encoder/partition_strategy.c index ce06313579..1d62f128c7 100644 --- a/third_party/aom/av1/encoder/partition_strategy.c +++ b/third_party/aom/av1/encoder/partition_strategy.c @@ -1761,7 +1761,7 @@ void av1_prune_partitions_by_max_min_bsize(SuperBlockEnc *sb_enc, // Decide whether to evaluate the AB partition specified by part_type based on // split and HORZ/VERT info -int evaluate_ab_partition_based_on_split( +static int evaluate_ab_partition_based_on_split( const PC_TREE *pc_tree, PARTITION_TYPE rect_part, const RD_RECT_PART_WIN_INFO *rect_part_win_info, int qindex, int split_idx1, int split_idx2) { diff --git a/third_party/aom/av1/encoder/pass2_strategy.c b/third_party/aom/av1/encoder/pass2_strategy.c index a9442ffc1a..bd8620c2be 100644 --- a/third_party/aom/av1/encoder/pass2_strategy.c +++ b/third_party/aom/av1/encoder/pass2_strategy.c @@ -158,28 +158,12 @@ static int frame_max_bits(const RATE_CONTROL *rc, return (int)max_bits; } -static const double q_pow_term[(QINDEX_RANGE >> 5) + 1] = { 0.65, 0.70, 0.75, - 0.80, 0.85, 0.90, - 0.95, 0.95, 0.95 }; -#define ERR_DIVISOR 96.0 -static double calc_correction_factor(double err_per_mb, int q) { - const double error_term = err_per_mb / ERR_DIVISOR; - const int index = q >> 5; - // Adjustment to power term based on qindex - const double power_term = - q_pow_term[index] + - (((q_pow_term[index + 1] - q_pow_term[index]) * (q % 32)) / 32.0); - assert(error_term >= 0.0); - return fclamp(pow(error_term, power_term), 0.05, 5.0); -} - // Based on history adjust expectations of bits per macroblock. static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) { TWO_PASS *const twopass = &cpi->ppi->twopass; const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; // Based on recent history adjust expectations of bits per macroblock. - double damp_fac = AOMMAX(5.0, rate_err_tol / 10.0); double rate_err_factor = 1.0; const double adj_limit = AOMMAX(0.2, (double)(100 - rate_err_tol) / 200.0); const double min_fac = 1.0 - adj_limit; @@ -214,9 +198,7 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) { } int err_estimate = p_rc->rate_error_estimate; - int64_t bits_left = twopass->bits_left; int64_t total_actual_bits = p_rc->total_actual_bits; - int64_t bits_off_target = p_rc->vbr_bits_off_target; double rolling_arf_group_actual_bits = (double)twopass->rolling_arf_group_actual_bits; double rolling_arf_group_target_bits = @@ -231,10 +213,6 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) { : 0; total_actual_bits = simulate_parallel_frame ? p_rc->temp_total_actual_bits : p_rc->total_actual_bits; - bits_off_target = simulate_parallel_frame ? p_rc->temp_vbr_bits_off_target - : p_rc->vbr_bits_off_target; - bits_left = - simulate_parallel_frame ? p_rc->temp_bits_left : twopass->bits_left; rolling_arf_group_target_bits = (double)(simulate_parallel_frame ? p_rc->temp_rolling_arf_group_target_bits @@ -247,21 +225,21 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) { : p_rc->rate_error_estimate; #endif - if (p_rc->bits_off_target && total_actual_bits > 0) { - if (cpi->ppi->lap_enabled) { - rate_err_factor = rolling_arf_group_actual_bits / - DOUBLE_DIVIDE_CHECK(rolling_arf_group_target_bits); + if ((p_rc->bits_off_target && total_actual_bits > 0) && + (rolling_arf_group_target_bits >= 1.0)) { + if (rolling_arf_group_actual_bits > rolling_arf_group_target_bits) { + double error_fraction = + (rolling_arf_group_actual_bits - rolling_arf_group_target_bits) / + rolling_arf_group_target_bits; + error_fraction = (error_fraction > 1.0) ? 1.0 : error_fraction; + rate_err_factor = 1.0 + error_fraction; } else { - rate_err_factor = 1.0 - ((double)(bits_off_target) / - AOMMAX(total_actual_bits, bits_left)); + double error_fraction = + (rolling_arf_group_target_bits - rolling_arf_group_actual_bits) / + rolling_arf_group_target_bits; + rate_err_factor = 1.0 - error_fraction; } - // Adjustment is damped if this is 1 pass with look ahead processing - // (as there are only ever a few frames of data) and for all but the first - // GOP in normal two pass. - if ((twopass->bpm_factor != 1.0) || cpi->ppi->lap_enabled) { - rate_err_factor = 1.0 + ((rate_err_factor - 1.0) / damp_fac); - } rate_err_factor = AOMMAX(min_fac, AOMMIN(max_fac, rate_err_factor)); } @@ -270,36 +248,38 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) { if ((rate_err_factor < 1.0 && err_estimate >= 0) || (rate_err_factor > 1.0 && err_estimate <= 0)) { twopass->bpm_factor *= rate_err_factor; - if (rate_err_tol >= 100) { - twopass->bpm_factor = - AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor)); - } else { - twopass->bpm_factor = AOMMAX(0.1, AOMMIN(10.0, twopass->bpm_factor)); - } + twopass->bpm_factor = AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor)); } } -static int qbpm_enumerator(int rate_err_tol) { - return 1200000 + ((300000 * AOMMIN(75, AOMMAX(rate_err_tol - 25, 0))) / 75); +static const double q_div_term[(QINDEX_RANGE >> 5) + 1] = { 32.0, 40.0, 46.0, + 52.0, 56.0, 60.0, + 64.0, 68.0, 72.0 }; +#define EPMB_SCALER 1250000 +static double calc_correction_factor(double err_per_mb, int q) { + double power_term = 0.90; + const int index = q >> 5; + const double divisor = + q_div_term[index] + + (((q_div_term[index + 1] - q_div_term[index]) * (q % 32)) / 32.0); + double error_term = EPMB_SCALER * pow(err_per_mb, power_term); + return error_term / divisor; } // Similar to find_qindex_by_rate() function in ratectrl.c, but includes // calculation of a correction_factor. static int find_qindex_by_rate_with_correction( int desired_bits_per_mb, aom_bit_depth_t bit_depth, double error_per_mb, - double group_weight_factor, int rate_err_tol, int best_qindex, - int worst_qindex) { + double group_weight_factor, int best_qindex, int worst_qindex) { assert(best_qindex <= worst_qindex); int low = best_qindex; int high = worst_qindex; while (low < high) { const int mid = (low + high) >> 1; - const double mid_factor = calc_correction_factor(error_per_mb, mid); + const double q_factor = calc_correction_factor(error_per_mb, mid); const double q = av1_convert_qindex_to_q(mid, bit_depth); - const int enumerator = qbpm_enumerator(rate_err_tol); - const int mid_bits_per_mb = - (int)((enumerator * mid_factor * group_weight_factor) / q); + const int mid_bits_per_mb = (int)((q_factor * group_weight_factor) / q); if (mid_bits_per_mb > desired_bits_per_mb) { low = mid + 1; @@ -359,8 +339,8 @@ static int get_twopass_worst_quality(AV1_COMP *cpi, const double av_frame_err, // content at the given rate. int q = find_qindex_by_rate_with_correction( target_norm_bits_per_mb, cpi->common.seq_params->bit_depth, - av_err_per_mb, cpi->ppi->twopass.bpm_factor, rate_err_tol, - rc->best_quality, rc->worst_quality); + av_err_per_mb, cpi->ppi->twopass.bpm_factor, rc->best_quality, + rc->worst_quality); // Restriction on active max q for constrained quality mode. if (rc_cfg->mode == AOM_CQ) q = AOMMAX(q, rc_cfg->cq_level); @@ -4235,12 +4215,13 @@ void av1_twopass_postencode_update(AV1_COMP *cpi) { twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0); // If the rate control is drifting consider adjustment to min or maxq. - if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref) { + if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref && + (p_rc->rolling_target_bits > 0)) { int minq_adj_limit; int maxq_adj_limit; minq_adj_limit = (rc_cfg->mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT); - maxq_adj_limit = rc->worst_quality - rc->active_worst_quality; + maxq_adj_limit = (rc->worst_quality - rc->active_worst_quality); // Undershoot if ((rc_cfg->under_shoot_pct < 100) && @@ -4252,8 +4233,9 @@ void av1_twopass_postencode_update(AV1_COMP *cpi) { if ((pct_error >= rc_cfg->under_shoot_pct) && (p_rc->rate_error_estimate > 0)) { twopass->extend_minq += 1; + twopass->extend_maxq -= 1; } - twopass->extend_maxq -= 1; + // Overshoot } else if ((rc_cfg->over_shoot_pct < 100) && (p_rc->rolling_actual_bits > p_rc->rolling_target_bits)) { @@ -4265,18 +4247,8 @@ void av1_twopass_postencode_update(AV1_COMP *cpi) { if ((pct_error >= rc_cfg->over_shoot_pct) && (p_rc->rate_error_estimate < 0)) { twopass->extend_maxq += 1; + twopass->extend_minq -= 1; } - twopass->extend_minq -= 1; - } else { - // Adjustment for extreme local overshoot. - // Only applies when normal adjustment above is not used (e.g. - // when threshold is set to 100). - if (rc->projected_frame_size > (2 * rc->base_frame_target) && - rc->projected_frame_size > (2 * rc->avg_frame_bandwidth)) - ++twopass->extend_maxq; - // Unwind extreme overshoot adjustment. - else if (p_rc->rolling_target_bits > p_rc->rolling_actual_bits) - --twopass->extend_maxq; } twopass->extend_minq = clamp(twopass->extend_minq, -minq_adj_limit, minq_adj_limit); diff --git a/third_party/aom/av1/encoder/pickcdef.c b/third_party/aom/av1/encoder/pickcdef.c index 232a2f9edb..ed5fa55f17 100644 --- a/third_party/aom/av1/encoder/pickcdef.c +++ b/third_party/aom/av1/encoder/pickcdef.c @@ -894,7 +894,7 @@ void av1_cdef_search(AV1_COMP *cpi) { int rdmult = cpi->td.mb.rdmult; for (int i = 0; i <= 3; i++) { if (i > max_signaling_bits) break; - int best_lev0[CDEF_MAX_STRENGTHS]; + int best_lev0[CDEF_MAX_STRENGTHS] = { 0 }; int best_lev1[CDEF_MAX_STRENGTHS] = { 0 }; const int nb_strengths = 1 << i; uint64_t tot_mse; diff --git a/third_party/aom/av1/encoder/picklpf.c b/third_party/aom/av1/encoder/picklpf.c index 9084d3f13a..a504535028 100644 --- a/third_party/aom/av1/encoder/picklpf.c +++ b/third_party/aom/av1/encoder/picklpf.c @@ -27,12 +27,25 @@ #include "av1/encoder/encoder.h" #include "av1/encoder/picklpf.h" +// AV1 loop filter applies to the whole frame according to mi_rows and mi_cols, +// which are calculated based on aligned width and aligned height, +// In addition, if super res is enabled, it copies the whole frame +// according to the aligned width and height (av1_superres_upscale()). +// So we need to copy the whole filtered region, instead of the cropped region. +// For example, input image size is: 160x90. +// Then src->y_crop_width = 160, src->y_crop_height = 90. +// The aligned frame size is: src->y_width = 160, src->y_height = 96. +// AV1 aligns frame size to a multiple of 8, if there is +// chroma subsampling, it is able to ensure the chroma is also +// an integer number of mi units. mi unit is 4x4, 8 = 4 * 2, and 2 luma mi +// units correspond to 1 chroma mi unit if there is subsampling. +// See: aom_realloc_frame_buffer() in yv12config.c. static void yv12_copy_plane(const YV12_BUFFER_CONFIG *src_bc, YV12_BUFFER_CONFIG *dst_bc, int plane) { switch (plane) { - case 0: aom_yv12_copy_y(src_bc, dst_bc); break; - case 1: aom_yv12_copy_u(src_bc, dst_bc); break; - case 2: aom_yv12_copy_v(src_bc, dst_bc); break; + case 0: aom_yv12_copy_y(src_bc, dst_bc, 0); break; + case 1: aom_yv12_copy_u(src_bc, dst_bc, 0); break; + case 2: aom_yv12_copy_v(src_bc, dst_bc, 0); break; default: assert(plane >= 0 && plane <= 2); break; } } @@ -311,7 +324,7 @@ void av1_pick_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi, &cpi->last_frame_uf, cm->width, cm->height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, NULL, NULL, NULL, 0, 0)) + cm->features.byte_alignment, NULL, NULL, NULL, false, 0)) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate last frame buffer"); diff --git a/third_party/aom/av1/encoder/pickrst.c b/third_party/aom/av1/encoder/pickrst.c index 6429064175..b0d0d0bb78 100644 --- a/third_party/aom/av1/encoder/pickrst.c +++ b/third_party/aom/av1/encoder/pickrst.c @@ -1103,6 +1103,39 @@ static INLINE int wrap_index(int i, int wiener_win) { return (i >= wiener_halfwin1 ? wiener_win - 1 - i : i); } +// Splits each w[i] into smaller components w1[i] and w2[i] such that +// w[i] = w1[i] * WIENER_TAP_SCALE_FACTOR + w2[i]. +static INLINE void split_wiener_filter_coefficients(int wiener_win, + const int32_t *w, + int32_t *w1, int32_t *w2) { + for (int i = 0; i < wiener_win; i++) { + w1[i] = w[i] / WIENER_TAP_SCALE_FACTOR; + w2[i] = w[i] - w1[i] * WIENER_TAP_SCALE_FACTOR; + assert(w[i] == w1[i] * WIENER_TAP_SCALE_FACTOR + w2[i]); + } +} + +// Calculates x * w / WIENER_TAP_SCALE_FACTOR, where +// w = w1 * WIENER_TAP_SCALE_FACTOR + w2. +// +// The multiplication x * w may overflow, so we multiply x by the components of +// w (w1 and w2) and combine the multiplication with the division. +static INLINE int64_t multiply_and_scale(int64_t x, int32_t w1, int32_t w2) { + // Let y = x * w / WIENER_TAP_SCALE_FACTOR + // = x * (w1 * WIENER_TAP_SCALE_FACTOR + w2) / WIENER_TAP_SCALE_FACTOR + const int64_t y = x * w1 + x * w2 / WIENER_TAP_SCALE_FACTOR; + // Double-check the calculation using __int128. + // TODO(wtc): Remove after 2024-04-30. +#if !defined(NDEBUG) && defined(__GNUC__) && defined(__LP64__) + const int32_t w = w1 * WIENER_TAP_SCALE_FACTOR + w2; + const __int128 z = (__int128)x * w / WIENER_TAP_SCALE_FACTOR; + assert(z >= INT64_MIN); + assert(z <= INT64_MAX); + assert(y == (int64_t)z); +#endif + return y; +} + // Solve linear equations to find Wiener filter tap values // Taps are output scaled by WIENER_FILT_STEP static int linsolve_wiener(int n, int64_t *A, int stride, int64_t *b, @@ -1175,10 +1208,12 @@ static int linsolve_wiener(int n, int64_t *A, int stride, int64_t *b, // Fix vector b, update vector a static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc, - int64_t **Hc, int32_t *a, int32_t *b) { + int64_t **Hc, int32_t *a, + const int32_t *b) { int i, j; int64_t S[WIENER_WIN]; int64_t A[WIENER_HALFWIN1], B[WIENER_HALFWIN1 * WIENER_HALFWIN1]; + int32_t b1[WIENER_WIN], b2[WIENER_WIN]; const int wiener_win2 = wiener_win * wiener_win; const int wiener_halfwin1 = (wiener_win >> 1) + 1; memset(A, 0, sizeof(A)); @@ -1189,16 +1224,7 @@ static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc, A[jj] += Mc[i][j] * b[i] / WIENER_TAP_SCALE_FACTOR; } } - - // b/274668506: This is the dual branch for the issue in b/272139363. The fix - // is similar. See comments in update_b_sep_sym() below. - int32_t max_b_l = 0; - for (int l = 0; l < wiener_win; ++l) { - const int32_t abs_b_l = abs(b[l]); - if (abs_b_l > max_b_l) max_b_l = abs_b_l; - } - const int scale_threshold = 128 * WIENER_TAP_SCALE_FACTOR; - const int scaler = max_b_l < scale_threshold ? 1 : 4; + split_wiener_filter_coefficients(wiener_win, b, b1, b2); for (i = 0; i < wiener_win; i++) { for (j = 0; j < wiener_win; j++) { @@ -1207,10 +1233,17 @@ static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc, const int kk = wrap_index(k, wiener_win); for (l = 0; l < wiener_win; ++l) { const int ll = wrap_index(l, wiener_win); - B[ll * wiener_halfwin1 + kk] += - Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] / - (scaler * WIENER_TAP_SCALE_FACTOR) * b[j] / - (WIENER_TAP_SCALE_FACTOR / scaler); + // Calculate + // B[ll * wiener_halfwin1 + kk] += + // Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] / + // WIENER_TAP_SCALE_FACTOR * b[j] / WIENER_TAP_SCALE_FACTOR; + // + // The last multiplication may overflow, so we combine the last + // multiplication with the last division. + const int64_t x = Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] / + WIENER_TAP_SCALE_FACTOR; + // b[j] = b1[j] * WIENER_TAP_SCALE_FACTOR + b2[j] + B[ll * wiener_halfwin1 + kk] += multiply_and_scale(x, b1[j], b2[j]); } } } @@ -1246,10 +1279,12 @@ static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc, // Fix vector a, update vector b static AOM_INLINE void update_b_sep_sym(int wiener_win, int64_t **Mc, - int64_t **Hc, int32_t *a, int32_t *b) { + int64_t **Hc, const int32_t *a, + int32_t *b) { int i, j; int64_t S[WIENER_WIN]; int64_t A[WIENER_HALFWIN1], B[WIENER_HALFWIN1 * WIENER_HALFWIN1]; + int32_t a1[WIENER_WIN], a2[WIENER_WIN]; const int wiener_win2 = wiener_win * wiener_win; const int wiener_halfwin1 = (wiener_win >> 1) + 1; memset(A, 0, sizeof(A)); @@ -1260,32 +1295,7 @@ static AOM_INLINE void update_b_sep_sym(int wiener_win, int64_t **Mc, A[ii] += Mc[i][j] * a[j] / WIENER_TAP_SCALE_FACTOR; } } - - // b/272139363: The computation, - // Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] / - // WIENER_TAP_SCALE_FACTOR * a[l] / WIENER_TAP_SCALE_FACTOR; - // may generate a signed-integer-overflow. Conditionally scale the terms to - // avoid a potential overflow. - // - // Hc contains accumulated correlation statistics and it is desired to leave - // as much room as possible for Hc. It was experimentally observed that the - // primary issue manifests itself with the second, a[l], multiply. For - // max_a_l < WIENER_TAP_SCALE_FACTOR the first multiply with a[k] should not - // increase dynamic range and the second multiply should hence be safe. - // Thereafter a safe scale_threshold depends on the actual operational range - // of Hc. The largest scale_threshold is expected to depend on bit-depth - // (av1_compute_stats_highbd_c() scales highbd to 8-bit) and maximum - // restoration-unit size (256), leading up to 32-bit positive numbers in Hc. - // Noting that the caller, wiener_decompose_sep_sym(), initializes a[...] - // to a range smaller than 16 bits, the scale_threshold is set as below for - // convenience. - int32_t max_a_l = 0; - for (int l = 0; l < wiener_win; ++l) { - const int32_t abs_a_l = abs(a[l]); - if (abs_a_l > max_a_l) max_a_l = abs_a_l; - } - const int scale_threshold = 128 * WIENER_TAP_SCALE_FACTOR; - const int scaler = max_a_l < scale_threshold ? 1 : 4; + split_wiener_filter_coefficients(wiener_win, a, a1, a2); for (i = 0; i < wiener_win; i++) { const int ii = wrap_index(i, wiener_win); @@ -1294,10 +1304,17 @@ static AOM_INLINE void update_b_sep_sym(int wiener_win, int64_t **Mc, int k, l; for (k = 0; k < wiener_win; ++k) { for (l = 0; l < wiener_win; ++l) { - B[jj * wiener_halfwin1 + ii] += - Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] / - (scaler * WIENER_TAP_SCALE_FACTOR) * a[l] / - (WIENER_TAP_SCALE_FACTOR / scaler); + // Calculate + // B[jj * wiener_halfwin1 + ii] += + // Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] / + // WIENER_TAP_SCALE_FACTOR * a[l] / WIENER_TAP_SCALE_FACTOR; + // + // The last multiplication may overflow, so we combine the last + // multiplication with the last division. + const int64_t x = Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] / + WIENER_TAP_SCALE_FACTOR; + // a[l] = a1[l] * WIENER_TAP_SCALE_FACTOR + a2[l] + B[jj * wiener_halfwin1 + ii] += multiply_and_scale(x, a1[l], a2[l]); } } } @@ -2050,7 +2067,7 @@ void av1_pick_filter_restoration(const YV12_BUFFER_CONFIG *src, AV1_COMP *cpi) { &cpi->trial_frame_rst, cm->superres_upscaled_width, cm->superres_upscaled_height, seq_params->subsampling_x, seq_params->subsampling_y, highbd, AOM_RESTORATION_FRAME_BORDER, - cm->features.byte_alignment, NULL, NULL, NULL, 0, 0)) + cm->features.byte_alignment, NULL, NULL, NULL, false, 0)) aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, "Failed to allocate trial restored frame buffer"); diff --git a/third_party/aom/av1/encoder/ratectrl.c b/third_party/aom/av1/encoder/ratectrl.c index df86380272..7639484df5 100644 --- a/third_party/aom/av1/encoder/ratectrl.c +++ b/third_party/aom/av1/encoder/ratectrl.c @@ -30,6 +30,7 @@ #include "av1/common/seg_common.h" #include "av1/encoder/encodemv.h" +#include "av1/encoder/encoder_utils.h" #include "av1/encoder/encode_strategy.h" #include "av1/encoder/gop_structure.h" #include "av1/encoder/random.h" @@ -405,10 +406,10 @@ void av1_primary_rc_init(const AV1EncoderConfig *oxcf, p_rc->rate_correction_factors[KF_STD] = 1.0; p_rc->bits_off_target = p_rc->starting_buffer_level; - p_rc->rolling_target_bits = - (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate); - p_rc->rolling_actual_bits = - (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate); + p_rc->rolling_target_bits = AOMMAX( + 1, (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate)); + p_rc->rolling_actual_bits = AOMMAX( + 1, (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate)); } void av1_rc_init(const AV1EncoderConfig *oxcf, RATE_CONTROL *rc) { @@ -439,6 +440,7 @@ void av1_rc_init(const AV1EncoderConfig *oxcf, RATE_CONTROL *rc) { rc->rtc_external_ratectrl = 0; rc->frame_level_fast_extra_bits = 0; rc->use_external_qp_one_pass = 0; + rc->percent_blocks_inactive = 0; } static bool check_buffer_below_thresh(AV1_COMP *cpi, int64_t buffer_level, @@ -1719,41 +1721,39 @@ static void adjust_active_best_and_worst_quality(const AV1_COMP *cpi, const AV1_COMMON *const cm = &cpi->common; const RATE_CONTROL *const rc = &cpi->rc; const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; - const RefreshFrameInfo *const refresh_frame = &cpi->refresh_frame; int active_best_quality = *active_best; int active_worst_quality = *active_worst; #if CONFIG_FPMT_TEST - const int simulate_parallel_frame = - cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 && - cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE; - int extend_minq = simulate_parallel_frame ? p_rc->temp_extend_minq - : cpi->ppi->twopass.extend_minq; - int extend_maxq = simulate_parallel_frame ? p_rc->temp_extend_maxq - : cpi->ppi->twopass.extend_maxq; #endif // Extension to max or min Q if undershoot or overshoot is outside // the permitted range. if (cpi->oxcf.rc_cfg.mode != AOM_Q) { +#if CONFIG_FPMT_TEST + const int simulate_parallel_frame = + cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 && + cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE; + const int extend_minq = simulate_parallel_frame + ? p_rc->temp_extend_minq + : cpi->ppi->twopass.extend_minq; + const int extend_maxq = simulate_parallel_frame + ? p_rc->temp_extend_maxq + : cpi->ppi->twopass.extend_maxq; + const RefreshFrameInfo *const refresh_frame = &cpi->refresh_frame; if (frame_is_intra_only(cm) || (!rc->is_src_frame_alt_ref && (refresh_frame->golden_frame || is_intrl_arf_boost || refresh_frame->alt_ref_frame))) { -#if CONFIG_FPMT_TEST active_best_quality -= extend_minq; active_worst_quality += (extend_maxq / 2); -#else - active_best_quality -= cpi->ppi->twopass.extend_minq / 4; - active_worst_quality += (cpi->ppi->twopass.extend_maxq / 2); -#endif } else { -#if CONFIG_FPMT_TEST active_best_quality -= extend_minq / 2; active_worst_quality += extend_maxq; + } #else - active_best_quality -= cpi->ppi->twopass.extend_minq / 4; - active_worst_quality += cpi->ppi->twopass.extend_maxq; + (void)is_intrl_arf_boost; + active_best_quality -= cpi->ppi->twopass.extend_minq / 8; + active_worst_quality += cpi->ppi->twopass.extend_maxq / 4; #endif - } } #ifndef STRICT_RC @@ -2991,6 +2991,24 @@ void av1_set_rtc_reference_structure_one_layer(AV1_COMP *cpi, int gf_update) { cpi->rt_reduce_num_ref_buffers &= (rtc_ref->ref_idx[2] < 7); } +static int set_block_is_active(unsigned char *const active_map_4x4, int mi_cols, + int mi_rows, int sbi_col, int sbi_row, int sh, + int num_4x4) { + int r = sbi_row << sh; + int c = sbi_col << sh; + const int row_max = AOMMIN(num_4x4, mi_rows - r); + const int col_max = AOMMIN(num_4x4, mi_cols - c); + // Active map is set for 16x16 blocks, so only need to + // check over16x16, + for (int x = 0; x < row_max; x += 4) { + for (int y = 0; y < col_max; y += 4) { + if (active_map_4x4[(r + x) * mi_cols + (c + y)] == AM_SEGMENT_ID_ACTIVE) + return 1; + } + } + return 0; +} + /*!\brief Check for scene detection, for 1 pass real-time mode. * * Compute average source sad (temporal sad: between current source and @@ -3093,11 +3111,26 @@ static void rc_scene_detection_onepass_rt(AV1_COMP *cpi, sizeof(*cpi->src_sad_blk_64x64))); } } + const CommonModeInfoParams *const mi_params = &cpi->common.mi_params; + const int mi_cols = mi_params->mi_cols; + const int mi_rows = mi_params->mi_rows; + int sh = (cm->seq_params->sb_size == BLOCK_128X128) ? 5 : 4; + int num_4x4 = (cm->seq_params->sb_size == BLOCK_128X128) ? 32 : 16; + unsigned char *const active_map_4x4 = cpi->active_map.map; // Avoid bottom and right border. for (int sbi_row = 0; sbi_row < sb_rows - border; ++sbi_row) { for (int sbi_col = 0; sbi_col < sb_cols; ++sbi_col) { - tmp_sad = cpi->ppi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y, - last_src_ystride); + int block_is_active = 1; + if (cpi->active_map.enabled && rc->percent_blocks_inactive > 0) { + block_is_active = set_block_is_active(active_map_4x4, mi_cols, mi_rows, + sbi_col, sbi_row, sh, num_4x4); + } + if (block_is_active) { + tmp_sad = cpi->ppi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y, + last_src_ystride); + } else { + tmp_sad = 0; + } if (cpi->src_sad_blk_64x64 != NULL) cpi->src_sad_blk_64x64[sbi_col + sbi_row * sb_cols] = tmp_sad; if (check_light_change) { @@ -3456,8 +3489,13 @@ void av1_get_one_pass_rt_params(AV1_COMP *cpi, FRAME_TYPE *const frame_type, } } } - // Check for scene change: for SVC check on base spatial layer only. - if (cpi->sf.rt_sf.check_scene_detection && svc->spatial_layer_id == 0) { + if (cpi->active_map.enabled && cpi->rc.percent_blocks_inactive == 100) { + rc->frame_source_sad = 0; + rc->avg_source_sad = (3 * rc->avg_source_sad + rc->frame_source_sad) >> 2; + rc->percent_blocks_with_motion = 0; + rc->high_source_sad = 0; + } else if (cpi->sf.rt_sf.check_scene_detection && + svc->spatial_layer_id == 0) { if (rc->prev_coded_width == cm->width && rc->prev_coded_height == cm->height) { rc_scene_detection_onepass_rt(cpi, frame_input); @@ -3522,6 +3560,10 @@ void av1_get_one_pass_rt_params(AV1_COMP *cpi, FRAME_TYPE *const frame_type, } } +#define CHECK_INTER_LAYER_PRED(ref_frame) \ + ((cpi->ref_frame_flags & av1_ref_frame_flag_list[ref_frame]) && \ + (av1_check_ref_is_low_spatial_res_super_frame(cpi, ref_frame))) + int av1_encodedframe_overshoot_cbr(AV1_COMP *cpi, int *q) { AV1_COMMON *const cm = &cpi->common; PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; @@ -3532,12 +3574,26 @@ int av1_encodedframe_overshoot_cbr(AV1_COMP *cpi, int *q) { int target_bits_per_mb; double q2; int enumerator; + int inter_layer_pred_on = 0; int is_screen_content = (cpi->oxcf.tune_cfg.content == AOM_CONTENT_SCREEN); - *q = (3 * cpi->rc.worst_quality + *q) >> 2; - // For screen content use the max-q set by the user to allow for less - // overshoot on slide changes. - if (is_screen_content) *q = cpi->rc.worst_quality; cpi->cyclic_refresh->counter_encode_maxq_scene_change = 0; + if (cpi->svc.spatial_layer_id > 0) { + // For spatial layers: check if inter-layer (spatial) prediction is used + // (check if any reference is being used that is the lower spatial layer), + inter_layer_pred_on = CHECK_INTER_LAYER_PRED(LAST_FRAME) || + CHECK_INTER_LAYER_PRED(GOLDEN_FRAME) || + CHECK_INTER_LAYER_PRED(ALTREF_FRAME); + } + // If inter-layer prediction is on: we expect to pull up the quality from + // the lower spatial layer, so we can use a lower q. + if (cpi->svc.spatial_layer_id > 0 && inter_layer_pred_on) { + *q = (cpi->rc.worst_quality + *q) >> 1; + } else { + *q = (3 * cpi->rc.worst_quality + *q) >> 2; + // For screen content use the max-q set by the user to allow for less + // overshoot on slide changes. + if (is_screen_content) *q = cpi->rc.worst_quality; + } // Adjust avg_frame_qindex, buffer_level, and rate correction factors, as // these parameters will affect QP selection for subsequent frames. If they // have settled down to a very different (low QP) state, then not adjusting @@ -3566,8 +3622,10 @@ int av1_encodedframe_overshoot_cbr(AV1_COMP *cpi, int *q) { rate_correction_factor; } // For temporal layers: reset the rate control parameters across all - // temporal layers. - if (cpi->svc.number_temporal_layers > 1) { + // temporal layers. Only do it for spatial enhancement layers when + // inter_layer_pred_on is not set (off). + if (cpi->svc.number_temporal_layers > 1 && + (cpi->svc.spatial_layer_id == 0 || inter_layer_pred_on == 0)) { SVC *svc = &cpi->svc; for (int tl = 0; tl < svc->number_temporal_layers; ++tl) { int sl = svc->spatial_layer_id; diff --git a/third_party/aom/av1/encoder/ratectrl.h b/third_party/aom/av1/encoder/ratectrl.h index 6802ad42d0..5121a909f4 100644 --- a/third_party/aom/av1/encoder/ratectrl.h +++ b/third_party/aom/av1/encoder/ratectrl.h @@ -249,6 +249,9 @@ typedef struct { // signals if number of blocks with motion is high int percent_blocks_with_motion; + // signals percentage of 16x16 blocks that are inactive, via active_maps + int percent_blocks_inactive; + // Maximum value of source sad across all blocks of frame. uint64_t max_block_source_sad; diff --git a/third_party/aom/av1/encoder/speed_features.c b/third_party/aom/av1/encoder/speed_features.c index 63d69cadc5..256b6fc9eb 100644 --- a/third_party/aom/av1/encoder/speed_features.c +++ b/third_party/aom/av1/encoder/speed_features.c @@ -1177,6 +1177,7 @@ static void set_good_speed_features_framesize_independent( sf->mv_sf.subpel_search_method = SUBPEL_TREE_PRUNED_MORE; sf->gm_sf.prune_zero_mv_with_sse = 2; + sf->gm_sf.downsample_level = 1; sf->part_sf.simple_motion_search_prune_agg = allow_screen_content_tools ? SIMPLE_AGG_LVL0 : SIMPLE_AGG_LVL2; @@ -1282,6 +1283,8 @@ static void set_good_speed_features_framesize_independent( sf->hl_sf.disable_extra_sc_testing = 1; sf->hl_sf.second_alt_ref_filtering = 0; + sf->gm_sf.downsample_level = 2; + sf->inter_sf.prune_inter_modes_based_on_tpl = boosted ? 0 : 3; sf->inter_sf.selective_ref_frame = 6; sf->inter_sf.prune_single_ref = is_boosted_arf2_bwd_type ? 0 : 2; @@ -1465,6 +1468,7 @@ static void set_rt_speed_feature_framesize_dependent(const AV1_COMP *const cpi, if (is_360p_or_larger) { sf->part_sf.fixed_partition_size = BLOCK_32X32; sf->rt_sf.use_fast_fixed_part = 1; + sf->mv_sf.subpel_force_stop = HALF_PEL; } sf->rt_sf.increase_source_sad_thresh = 1; sf->rt_sf.part_early_exit_zeromv = 2; @@ -1472,6 +1476,7 @@ static void set_rt_speed_feature_framesize_dependent(const AV1_COMP *const cpi, for (int i = 0; i < BLOCK_SIZES; ++i) { sf->rt_sf.intra_y_mode_bsize_mask_nrd[i] = INTRA_DC; } + sf->rt_sf.hybrid_intra_pickmode = 0; } // Setting for SVC, or when the ref_frame_config control is // used to set the reference structure. @@ -1572,13 +1577,13 @@ static void set_rt_speed_feature_framesize_dependent(const AV1_COMP *const cpi, sf->rt_sf.screen_content_cdef_filter_qindex_thresh = 80; sf->rt_sf.part_early_exit_zeromv = 1; sf->rt_sf.nonrd_aggressive_skip = 1; + sf->rt_sf.thresh_active_maps_skip_lf_cdef = 90; } if (speed >= 11) { sf->rt_sf.skip_lf_screen = 2; sf->rt_sf.skip_cdef_sb = 2; sf->rt_sf.part_early_exit_zeromv = 2; sf->rt_sf.prune_palette_nonrd = 1; - sf->rt_sf.set_zeromv_skip_based_on_source_sad = 2; sf->rt_sf.increase_color_thresh_palette = 0; } sf->rt_sf.use_nonrd_altref_frame = 0; @@ -1974,6 +1979,7 @@ static AOM_INLINE void init_gm_sf(GLOBAL_MOTION_SPEED_FEATURES *gm_sf) { gm_sf->prune_ref_frame_for_gm_search = 0; gm_sf->prune_zero_mv_with_sse = 0; gm_sf->disable_gm_search_based_on_stats = 0; + gm_sf->downsample_level = 0; gm_sf->num_refinement_steps = GM_MAX_REFINEMENT_STEPS; } @@ -2270,6 +2276,7 @@ static AOM_INLINE void init_rt_sf(REAL_TIME_SPEED_FEATURES *rt_sf) { rt_sf->part_early_exit_zeromv = 0; rt_sf->sse_early_term_inter_search = EARLY_TERM_DISABLED; rt_sf->skip_lf_screen = 0; + rt_sf->thresh_active_maps_skip_lf_cdef = 100; rt_sf->sad_based_adp_altref_lag = 0; rt_sf->partition_direct_merging = 0; rt_sf->var_part_based_on_qidx = 0; diff --git a/third_party/aom/av1/encoder/speed_features.h b/third_party/aom/av1/encoder/speed_features.h index 60c000e4f4..d59cb38a71 100644 --- a/third_party/aom/av1/encoder/speed_features.h +++ b/third_party/aom/av1/encoder/speed_features.h @@ -587,6 +587,9 @@ typedef struct GLOBAL_MOTION_SPEED_FEATURES { // GF group int disable_gm_search_based_on_stats; + // Downsampling pyramid level to use for global motion estimation + int downsample_level; + // Number of refinement steps to apply after initial model generation int num_refinement_steps; } GLOBAL_MOTION_SPEED_FEATURES; @@ -1771,6 +1774,10 @@ typedef struct REAL_TIME_SPEED_FEATURES { // where rc->high_source_sad = 0 (no slide-changes). int skip_lf_screen; + // Threshold on the active/inactive region percent to disable + // the loopfilter and cdef. Setting to 100 disables this feature. + int thresh_active_maps_skip_lf_cdef; + // For nonrd: early exit out of variance partition that sets the // block size to superblock size, and sets mode to zeromv-last skip. // 0: disabled diff --git a/third_party/aom/av1/encoder/superres_scale.c b/third_party/aom/av1/encoder/superres_scale.c index 3b47909b15..41225d55ae 100644 --- a/third_party/aom/av1/encoder/superres_scale.c +++ b/third_party/aom/av1/encoder/superres_scale.c @@ -404,7 +404,7 @@ void av1_superres_post_encode(AV1_COMP *cpi) { assert(!is_lossless_requested(&cpi->oxcf.rc_cfg)); assert(!cm->features.all_lossless); - av1_superres_upscale(cm, NULL, cpi->image_pyramid_levels); + av1_superres_upscale(cm, NULL, cpi->alloc_pyramid); // If regular resizing is occurring the source will need to be downscaled to // match the upscaled superres resolution. Otherwise the original source is diff --git a/third_party/aom/av1/encoder/svc_layercontext.c b/third_party/aom/av1/encoder/svc_layercontext.c index 2c99cb89b8..33da3afbd3 100644 --- a/third_party/aom/av1/encoder/svc_layercontext.c +++ b/third_party/aom/av1/encoder/svc_layercontext.c @@ -203,8 +203,10 @@ void av1_update_temporal_layer_framerate(AV1_COMP *const cpi) { } } -static AOM_INLINE bool check_ref_is_low_spatial_res_super_frame( - int ref_frame, const SVC *svc, const RTC_REF *rtc_ref) { +bool av1_check_ref_is_low_spatial_res_super_frame(AV1_COMP *const cpi, + int ref_frame) { + SVC *svc = &cpi->svc; + RTC_REF *const rtc_ref = &cpi->ppi->rtc_ref; int ref_frame_idx = rtc_ref->ref_idx[ref_frame - 1]; return rtc_ref->buffer_time_index[ref_frame_idx] == svc->current_superframe && rtc_ref->buffer_spatial_layer[ref_frame_idx] <= @@ -253,13 +255,13 @@ void av1_restore_layer_context(AV1_COMP *const cpi) { // previous spatial layer(s) at the same time (current_superframe). if (rtc_ref->set_ref_frame_config && svc->force_zero_mode_spatial_ref && cpi->sf.rt_sf.use_nonrd_pick_mode) { - if (check_ref_is_low_spatial_res_super_frame(LAST_FRAME, svc, rtc_ref)) { + if (av1_check_ref_is_low_spatial_res_super_frame(cpi, LAST_FRAME)) { svc->skip_mvsearch_last = 1; } - if (check_ref_is_low_spatial_res_super_frame(GOLDEN_FRAME, svc, rtc_ref)) { + if (av1_check_ref_is_low_spatial_res_super_frame(cpi, GOLDEN_FRAME)) { svc->skip_mvsearch_gf = 1; } - if (check_ref_is_low_spatial_res_super_frame(ALTREF_FRAME, svc, rtc_ref)) { + if (av1_check_ref_is_low_spatial_res_super_frame(cpi, ALTREF_FRAME)) { svc->skip_mvsearch_altref = 1; } } diff --git a/third_party/aom/av1/encoder/svc_layercontext.h b/third_party/aom/av1/encoder/svc_layercontext.h index 93118be2d4..d56ea77791 100644 --- a/third_party/aom/av1/encoder/svc_layercontext.h +++ b/third_party/aom/av1/encoder/svc_layercontext.h @@ -223,6 +223,21 @@ void av1_update_layer_context_change_config(struct AV1_COMP *const cpi, */ void av1_update_temporal_layer_framerate(struct AV1_COMP *const cpi); +/*!\brief Prior to check if reference is lower spatial layer at the same + * timestamp/superframe. + * + * \ingroup SVC + * \callgraph + * \callergraph + * + * \param[in] cpi Top level encoder structure + * \param[in] ref_frame Reference frame + * + * \return True if the ref_frame if lower spatial layer, otherwise false. + */ +bool av1_check_ref_is_low_spatial_res_super_frame(struct AV1_COMP *const cpi, + int ref_frame); + /*!\brief Prior to encoding the frame, set the layer context, for the current layer to be encoded, to the cpi struct. * diff --git a/third_party/aom/av1/encoder/temporal_filter.c b/third_party/aom/av1/encoder/temporal_filter.c index 7d4d25de6a..e8cc145030 100644 --- a/third_party/aom/av1/encoder/temporal_filter.c +++ b/third_party/aom/av1/encoder/temporal_filter.c @@ -463,12 +463,12 @@ static void tf_build_predictor(const YV12_BUFFER_CONFIG *ref_frame, // Returns: // Nothing will be returned. But the content to which `accum` and `pred` // point will be modified. -void tf_apply_temporal_filter_self(const YV12_BUFFER_CONFIG *ref_frame, - const MACROBLOCKD *mbd, - const BLOCK_SIZE block_size, - const int mb_row, const int mb_col, - const int num_planes, uint32_t *accum, - uint16_t *count) { +static void tf_apply_temporal_filter_self(const YV12_BUFFER_CONFIG *ref_frame, + const MACROBLOCKD *mbd, + const BLOCK_SIZE block_size, + const int mb_row, const int mb_col, + const int num_planes, uint32_t *accum, + uint16_t *count) { // Block information. const int mb_height = block_size_high[block_size]; const int mb_width = block_size_wide[block_size]; @@ -564,9 +564,10 @@ static INLINE void compute_square_diff(const uint8_t *ref, const int ref_offset, // Returns: // Nothing will be returned. But the content to which `luma_sse_sum` points // will be modified. -void compute_luma_sq_error_sum(uint32_t *square_diff, uint32_t *luma_sse_sum, - int block_height, int block_width, - int ss_x_shift, int ss_y_shift) { +static void compute_luma_sq_error_sum(uint32_t *square_diff, + uint32_t *luma_sse_sum, int block_height, + int block_width, int ss_x_shift, + int ss_y_shift) { for (int i = 0; i < block_height; ++i) { for (int j = 0; j < block_width; ++j) { for (int ii = 0; ii < (1 << ss_y_shift); ++ii) { @@ -1456,7 +1457,7 @@ bool av1_tf_info_alloc(TEMPORAL_FILTER_INFO *tf_info, const AV1_COMP *cpi) { oxcf->frm_dim_cfg.height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL, - NULL, cpi->image_pyramid_levels, 0)) { + NULL, cpi->alloc_pyramid, 0)) { return false; } } diff --git a/third_party/aom/av1/encoder/temporal_filter.h b/third_party/aom/av1/encoder/temporal_filter.h index 6504b91b66..a40fb039b9 100644 --- a/third_party/aom/av1/encoder/temporal_filter.h +++ b/third_party/aom/av1/encoder/temporal_filter.h @@ -14,6 +14,8 @@ #include +#include "aom_util/aom_pthread.h" + #ifdef __cplusplus extern "C" { #endif diff --git a/third_party/aom/av1/encoder/tpl_model.c b/third_party/aom/av1/encoder/tpl_model.c index ca60e4981e..86f5485a26 100644 --- a/third_party/aom/av1/encoder/tpl_model.c +++ b/third_party/aom/av1/encoder/tpl_model.c @@ -19,6 +19,7 @@ #include "config/aom_scale_rtcd.h" #include "aom/aom_codec.h" +#include "aom_util/aom_pthread.h" #include "av1/common/av1_common_int.h" #include "av1/common/enums.h" @@ -193,7 +194,7 @@ void av1_setup_tpl_buffers(AV1_PRIMARY *const ppi, &tpl_data->tpl_rec_pool[frame], width, height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, tpl_data->border_in_pixels, - byte_alignment, 0, alloc_y_plane_only)) + byte_alignment, false, alloc_y_plane_only)) aom_internal_error(&ppi->error, AOM_CODEC_MEM_ERROR, "Failed to allocate frame buffer"); } diff --git a/third_party/aom/av1/encoder/tpl_model.h b/third_party/aom/av1/encoder/tpl_model.h index bcd58216c5..0150c702f9 100644 --- a/third_party/aom/av1/encoder/tpl_model.h +++ b/third_party/aom/av1/encoder/tpl_model.h @@ -30,6 +30,7 @@ struct TPL_INFO; #include "config/aom_config.h" #include "aom_scale/yv12config.h" +#include "aom_util/aom_pthread.h" #include "av1/common/mv.h" #include "av1/common/scale.h" diff --git a/third_party/aom/av1/encoder/tune_butteraugli.c b/third_party/aom/av1/encoder/tune_butteraugli.c index 92fc4b2a92..4381af6a8b 100644 --- a/third_party/aom/av1/encoder/tune_butteraugli.c +++ b/third_party/aom/av1/encoder/tune_butteraugli.c @@ -209,7 +209,7 @@ void av1_setup_butteraugli_source(AV1_COMP *cpi) { if (dst->buffer_alloc_sz == 0) { aom_alloc_frame_buffer( dst, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth, - cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0); + cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0); } av1_copy_and_extend_frame(cpi->source, dst); @@ -218,7 +218,7 @@ void av1_setup_butteraugli_source(AV1_COMP *cpi) { aom_alloc_frame_buffer( resized_dst, width / resize_factor, height / resize_factor, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); } if (!av1_resize_and_extend_frame_nonnormative( cpi->source, resized_dst, bit_depth, av1_num_planes(cm))) { @@ -244,7 +244,7 @@ void av1_setup_butteraugli_rdmult_and_restore_source(AV1_COMP *cpi, double K) { aom_alloc_frame_buffer( &resized_recon, width / resize_factor, height / resize_factor, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); copy_img(&cpi->common.cur_frame->buf, &resized_recon, width / resize_factor, height / resize_factor); @@ -267,12 +267,12 @@ void av1_setup_butteraugli_rdmult(AV1_COMP *cpi) { cpi->source = av1_realloc_and_scale_if_required( cm, cpi->unscaled_source, &cpi->scaled_source, cm->features.interp_filter, - 0, false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels); + 0, false, false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid); if (cpi->unscaled_last_source != NULL) { cpi->last_source = av1_realloc_and_scale_if_required( cm, cpi->unscaled_last_source, &cpi->scaled_last_source, cm->features.interp_filter, 0, false, false, cpi->oxcf.border_in_pixels, - cpi->image_pyramid_levels); + cpi->alloc_pyramid); } av1_setup_butteraugli_source(cpi); diff --git a/third_party/aom/av1/encoder/tune_vmaf.c b/third_party/aom/av1/encoder/tune_vmaf.c index 4e5ffa387c..91db3db726 100644 --- a/third_party/aom/av1/encoder/tune_vmaf.c +++ b/third_party/aom/av1/encoder/tune_vmaf.c @@ -288,10 +288,10 @@ static AOM_INLINE void gaussian_blur(const int bit_depth, } } -static AOM_INLINE double cal_approx_vmaf(const AV1_COMP *const cpi, - double source_variance, - YV12_BUFFER_CONFIG *const source, - YV12_BUFFER_CONFIG *const sharpened) { +static AOM_INLINE double cal_approx_vmaf( + const AV1_COMP *const cpi, double source_variance, + const YV12_BUFFER_CONFIG *const source, + const YV12_BUFFER_CONFIG *const sharpened) { const int bit_depth = cpi->td.mb.e_mbd.bd; const bool cal_vmaf_neg = cpi->oxcf.tune_cfg.tuning == AOM_TUNE_VMAF_NEG_MAX_GAIN; @@ -305,11 +305,11 @@ static AOM_INLINE double cal_approx_vmaf(const AV1_COMP *const cpi, } static double find_best_frame_unsharp_amount_loop( - const AV1_COMP *const cpi, YV12_BUFFER_CONFIG *const source, - YV12_BUFFER_CONFIG *const blurred, YV12_BUFFER_CONFIG *const sharpened, - double best_vmaf, const double baseline_variance, - const double unsharp_amount_start, const double step_size, - const int max_loop_count, const double max_amount) { + const AV1_COMP *const cpi, const YV12_BUFFER_CONFIG *const source, + const YV12_BUFFER_CONFIG *const blurred, + const YV12_BUFFER_CONFIG *const sharpened, double best_vmaf, + const double baseline_variance, const double unsharp_amount_start, + const double step_size, const int max_loop_count, const double max_amount) { const double min_amount = 0.0; int loop_count = 0; double approx_vmaf = best_vmaf; @@ -328,13 +328,11 @@ static double find_best_frame_unsharp_amount_loop( return AOMMIN(max_amount, AOMMAX(unsharp_amount, min_amount)); } -static double find_best_frame_unsharp_amount(const AV1_COMP *const cpi, - YV12_BUFFER_CONFIG *const source, - YV12_BUFFER_CONFIG *const blurred, - const double unsharp_amount_start, - const double step_size, - const int max_loop_count, - const double max_filter_amount) { +static double find_best_frame_unsharp_amount( + const AV1_COMP *const cpi, const YV12_BUFFER_CONFIG *const source, + const YV12_BUFFER_CONFIG *const blurred, const double unsharp_amount_start, + const double step_size, const int max_loop_count, + const double max_filter_amount) { const AV1_COMMON *const cm = &cpi->common; const int width = source->y_width; const int height = source->y_height; @@ -343,7 +341,7 @@ static double find_best_frame_unsharp_amount(const AV1_COMP *const cpi, aom_alloc_frame_buffer( &sharpened, width, height, source->subsampling_x, source->subsampling_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); const double baseline_variance = frame_average_variance(cpi, source); double unsharp_amount; @@ -376,7 +374,7 @@ static double find_best_frame_unsharp_amount(const AV1_COMP *const cpi, } void av1_vmaf_neg_preprocessing(AV1_COMP *const cpi, - YV12_BUFFER_CONFIG *const source) { + const YV12_BUFFER_CONFIG *const source) { const AV1_COMMON *const cm = &cpi->common; const int bit_depth = cpi->td.mb.e_mbd.bd; const int width = source->y_width; @@ -395,7 +393,7 @@ void av1_vmaf_neg_preprocessing(AV1_COMP *const cpi, aom_alloc_frame_buffer( &blurred, width, height, source->subsampling_x, source->subsampling_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); gaussian_blur(bit_depth, source, &blurred); unsharp(cpi, source, &blurred, source, best_frame_unsharp_amount); @@ -403,7 +401,7 @@ void av1_vmaf_neg_preprocessing(AV1_COMP *const cpi, } void av1_vmaf_frame_preprocessing(AV1_COMP *const cpi, - YV12_BUFFER_CONFIG *const source) { + const YV12_BUFFER_CONFIG *const source) { const AV1_COMMON *const cm = &cpi->common; const int bit_depth = cpi->td.mb.e_mbd.bd; const int width = source->y_width; @@ -415,11 +413,11 @@ void av1_vmaf_frame_preprocessing(AV1_COMP *const cpi, aom_alloc_frame_buffer( &source_extended, width, height, source->subsampling_x, source->subsampling_y, cm->seq_params->use_highbitdepth, - cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0); + cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0); aom_alloc_frame_buffer( &blurred, width, height, source->subsampling_x, source->subsampling_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); av1_copy_and_extend_frame(source, &source_extended); gaussian_blur(bit_depth, &source_extended, &blurred); @@ -442,7 +440,7 @@ void av1_vmaf_frame_preprocessing(AV1_COMP *const cpi, } void av1_vmaf_blk_preprocessing(AV1_COMP *const cpi, - YV12_BUFFER_CONFIG *const source) { + const YV12_BUFFER_CONFIG *const source) { const AV1_COMMON *const cm = &cpi->common; const int width = source->y_width; const int height = source->y_height; @@ -455,11 +453,11 @@ void av1_vmaf_blk_preprocessing(AV1_COMP *const cpi, memset(&source_extended, 0, sizeof(source_extended)); aom_alloc_frame_buffer( &blurred, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth, - cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0); + cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0); aom_alloc_frame_buffer(&source_extended, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); av1_copy_and_extend_frame(source, &source_extended); gaussian_blur(bit_depth, &source_extended, &blurred); @@ -495,11 +493,11 @@ void av1_vmaf_blk_preprocessing(AV1_COMP *const cpi, aom_alloc_frame_buffer(&source_block, block_w, block_h, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); aom_alloc_frame_buffer(&blurred_block, block_w, block_h, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); for (int row = 0; row < num_rows; ++row) { for (int col = 0; col < num_cols; ++col) { @@ -622,7 +620,7 @@ void av1_set_mb_vmaf_rdmult_scaling(AV1_COMP *cpi) { aom_alloc_frame_buffer( &resized_source, y_width / resize_factor, y_height / resize_factor, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); if (!av1_resize_and_extend_frame_nonnormative( cpi->source, &resized_source, bit_depth, av1_num_planes(cm))) { aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR, @@ -643,7 +641,7 @@ void av1_set_mb_vmaf_rdmult_scaling(AV1_COMP *cpi) { aom_alloc_frame_buffer(&blurred, resized_y_width, resized_y_height, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); gaussian_blur(bit_depth, &resized_source, &blurred); YV12_BUFFER_CONFIG recon; @@ -651,7 +649,7 @@ void av1_set_mb_vmaf_rdmult_scaling(AV1_COMP *cpi) { aom_alloc_frame_buffer(&recon, resized_y_width, resized_y_height, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); aom_yv12_copy_frame(&resized_source, &recon, 1); VmafContext *vmaf_context; @@ -830,15 +828,15 @@ static double calc_vmaf_motion_score(const AV1_COMP *const cpi, aom_alloc_frame_buffer(&blurred_cur, y_width, y_height, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); aom_alloc_frame_buffer(&blurred_last, y_width, y_height, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); aom_alloc_frame_buffer(&blurred_next, y_width, y_height, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); gaussian_blur(bit_depth, cur, &blurred_cur); gaussian_blur(bit_depth, last, &blurred_last); @@ -881,8 +879,8 @@ static double calc_vmaf_motion_score(const AV1_COMP *const cpi, } static AOM_INLINE void get_neighbor_frames(const AV1_COMP *const cpi, - YV12_BUFFER_CONFIG **last, - YV12_BUFFER_CONFIG **next) { + const YV12_BUFFER_CONFIG **last, + const YV12_BUFFER_CONFIG **next) { const AV1_COMMON *const cm = &cpi->common; const GF_GROUP *gf_group = &cpi->ppi->gf_group; const int src_index = @@ -920,7 +918,7 @@ int av1_get_vmaf_base_qindex(const AV1_COMP *const cpi, int current_qindex) { if (approx_sse < sse_threshold || approx_dvmaf < vmaf_threshold) { return current_qindex; } - YV12_BUFFER_CONFIG *cur_buf = cpi->source; + const YV12_BUFFER_CONFIG *cur_buf = cpi->source; if (cm->show_frame == 0) { const int src_index = gf_group->arf_src_offset[cpi->gf_frame_index]; struct lookahead_entry *cur_entry = av1_lookahead_peek( @@ -929,7 +927,7 @@ int av1_get_vmaf_base_qindex(const AV1_COMP *const cpi, int current_qindex) { } assert(cur_buf); - YV12_BUFFER_CONFIG *next_buf, *last_buf; + const YV12_BUFFER_CONFIG *next_buf, *last_buf; get_neighbor_frames(cpi, &last_buf, &next_buf); assert(last_buf); @@ -954,8 +952,8 @@ int av1_get_vmaf_base_qindex(const AV1_COMP *const cpi, int current_qindex) { static AOM_INLINE double cal_approx_score( AV1_COMP *const cpi, double src_variance, double new_variance, - double src_score, YV12_BUFFER_CONFIG *const src, - YV12_BUFFER_CONFIG *const recon_sharpened) { + double src_score, const YV12_BUFFER_CONFIG *const src, + const YV12_BUFFER_CONFIG *const recon_sharpened) { double score; const uint32_t bit_depth = cpi->td.mb.e_mbd.bd; const bool cal_vmaf_neg = @@ -967,11 +965,12 @@ static AOM_INLINE double cal_approx_score( static double find_best_frame_unsharp_amount_loop_neg( AV1_COMP *const cpi, double src_variance, double base_score, - YV12_BUFFER_CONFIG *const src, YV12_BUFFER_CONFIG *const recon, - YV12_BUFFER_CONFIG *const ref, YV12_BUFFER_CONFIG *const src_blurred, - YV12_BUFFER_CONFIG *const recon_blurred, - YV12_BUFFER_CONFIG *const src_sharpened, - YV12_BUFFER_CONFIG *const recon_sharpened, FULLPEL_MV *mvs, + const YV12_BUFFER_CONFIG *const src, const YV12_BUFFER_CONFIG *const recon, + const YV12_BUFFER_CONFIG *const ref, + const YV12_BUFFER_CONFIG *const src_blurred, + const YV12_BUFFER_CONFIG *const recon_blurred, + const YV12_BUFFER_CONFIG *const src_sharpened, + const YV12_BUFFER_CONFIG *const recon_sharpened, FULLPEL_MV *mvs, double best_score, const double unsharp_amount_start, const double step_size, const int max_loop_count, const double max_amount) { const double min_amount = 0.0; @@ -999,8 +998,8 @@ static double find_best_frame_unsharp_amount_loop_neg( } static double find_best_frame_unsharp_amount_neg( - AV1_COMP *const cpi, YV12_BUFFER_CONFIG *const src, - YV12_BUFFER_CONFIG *const recon, YV12_BUFFER_CONFIG *const ref, + AV1_COMP *const cpi, const YV12_BUFFER_CONFIG *const src, + const YV12_BUFFER_CONFIG *const recon, const YV12_BUFFER_CONFIG *const ref, double base_score, const double unsharp_amount_start, const double step_size, const int max_loop_count, const double max_filter_amount) { @@ -1023,18 +1022,18 @@ static double find_best_frame_unsharp_amount_neg( aom_alloc_frame_buffer(&recon_sharpened, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); aom_alloc_frame_buffer(&src_sharpened, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); aom_alloc_frame_buffer(&recon_blurred, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, - cm->features.byte_alignment, 0, 0); + cm->features.byte_alignment, false, 0); aom_alloc_frame_buffer( &src_blurred, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth, - cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0); + cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0); gaussian_blur(bit_depth, recon, &recon_blurred); gaussian_blur(bit_depth, src, &src_blurred); @@ -1076,8 +1075,8 @@ static double find_best_frame_unsharp_amount_neg( } void av1_update_vmaf_curve(AV1_COMP *cpi) { - YV12_BUFFER_CONFIG *source = cpi->source; - YV12_BUFFER_CONFIG *recon = &cpi->common.cur_frame->buf; + const YV12_BUFFER_CONFIG *source = cpi->source; + const YV12_BUFFER_CONFIG *recon = &cpi->common.cur_frame->buf; const int bit_depth = cpi->td.mb.e_mbd.bd; const GF_GROUP *const gf_group = &cpi->ppi->gf_group; const int layer_depth = @@ -1099,7 +1098,7 @@ void av1_update_vmaf_curve(AV1_COMP *cpi) { } if (cpi->oxcf.tune_cfg.tuning == AOM_TUNE_VMAF_NEG_MAX_GAIN) { - YV12_BUFFER_CONFIG *last, *next; + const YV12_BUFFER_CONFIG *last, *next; get_neighbor_frames(cpi, &last, &next); double best_unsharp_amount_start = get_layer_value(cpi->vmaf_info.last_frame_unsharp_amount, layer_depth); diff --git a/third_party/aom/av1/encoder/tune_vmaf.h b/third_party/aom/av1/encoder/tune_vmaf.h index a04a29e6fe..404fd1029a 100644 --- a/third_party/aom/av1/encoder/tune_vmaf.h +++ b/third_party/aom/av1/encoder/tune_vmaf.h @@ -43,13 +43,13 @@ typedef struct { struct AV1_COMP; void av1_vmaf_blk_preprocessing(struct AV1_COMP *cpi, - YV12_BUFFER_CONFIG *source); + const YV12_BUFFER_CONFIG *source); void av1_vmaf_frame_preprocessing(struct AV1_COMP *cpi, - YV12_BUFFER_CONFIG *source); + const YV12_BUFFER_CONFIG *source); void av1_vmaf_neg_preprocessing(struct AV1_COMP *cpi, - YV12_BUFFER_CONFIG *source); + const YV12_BUFFER_CONFIG *source); void av1_set_mb_vmaf_rdmult_scaling(struct AV1_COMP *cpi); diff --git a/third_party/aom/av1/encoder/tx_search.c b/third_party/aom/av1/encoder/tx_search.c index 7292c01191..5dcc08c0ff 100644 --- a/third_party/aom/av1/encoder/tx_search.c +++ b/third_party/aom/av1/encoder/tx_search.c @@ -1109,13 +1109,11 @@ static INLINE void dist_block_tx_domain(MACROBLOCK *x, int plane, int block, *out_sse = RIGHT_SIGNED_SHIFT(this_sse, shift); } -uint16_t prune_txk_type_separ(const AV1_COMP *cpi, MACROBLOCK *x, int plane, - int block, TX_SIZE tx_size, int blk_row, - int blk_col, BLOCK_SIZE plane_bsize, int *txk_map, - int16_t allowed_tx_mask, int prune_factor, - const TXB_CTX *const txb_ctx, - int reduced_tx_set_used, int64_t ref_best_rd, - int num_sel) { +static uint16_t prune_txk_type_separ( + const AV1_COMP *cpi, MACROBLOCK *x, int plane, int block, TX_SIZE tx_size, + int blk_row, int blk_col, BLOCK_SIZE plane_bsize, int *txk_map, + int16_t allowed_tx_mask, int prune_factor, const TXB_CTX *const txb_ctx, + int reduced_tx_set_used, int64_t ref_best_rd, int num_sel) { const AV1_COMMON *cm = &cpi->common; MACROBLOCKD *xd = &x->e_mbd; @@ -1255,11 +1253,12 @@ uint16_t prune_txk_type_separ(const AV1_COMP *cpi, MACROBLOCK *x, int plane, return prune; } -uint16_t prune_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane, - int block, TX_SIZE tx_size, int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, int *txk_map, - uint16_t allowed_tx_mask, int prune_factor, - const TXB_CTX *const txb_ctx, int reduced_tx_set_used) { +static uint16_t prune_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane, + int block, TX_SIZE tx_size, int blk_row, + int blk_col, BLOCK_SIZE plane_bsize, + int *txk_map, uint16_t allowed_tx_mask, + int prune_factor, const TXB_CTX *const txb_ctx, + int reduced_tx_set_used) { const AV1_COMMON *cm = &cpi->common; MACROBLOCKD *xd = &x->e_mbd; int tx_type; diff --git a/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c index a4def754b0..31cc37db7a 100644 --- a/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c +++ b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c @@ -2638,6 +2638,11 @@ void av1_lowbd_fwd_txfm2d_16x64_sse2(const int16_t *input, int32_t *output, } } +// Include top-level function only for 32-bit x86, to support Valgrind. +// For normal use, we require SSE4.1, so av1_lowbd_fwd_txfm_sse4_1 will be used +// instead of this function. However, 32-bit Valgrind does not support SSE4.1, +// so we include a fallback to SSE2 to improve performance +#if AOM_ARCH_X86 static FwdTxfm2dFunc fwd_txfm2d_func_ls[TX_SIZES_ALL] = { av1_lowbd_fwd_txfm2d_4x4_sse2, // 4x4 transform av1_lowbd_fwd_txfm2d_8x8_sse2, // 8x8 transform @@ -2671,3 +2676,4 @@ void av1_lowbd_fwd_txfm_sse2(const int16_t *src_diff, tran_low_t *coeff, fwd_txfm2d_func(src_diff, coeff, diff_stride, txfm_param->tx_type, txfm_param->bd); } +#endif // AOM_ARCH_X86 diff --git a/third_party/aom/av1/encoder/x86/cnn_avx2.c b/third_party/aom/av1/encoder/x86/cnn_avx2.c index ee93b3d5a0..9c26a56641 100644 --- a/third_party/aom/av1/encoder/x86/cnn_avx2.c +++ b/third_party/aom/av1/encoder/x86/cnn_avx2.c @@ -466,7 +466,7 @@ static INLINE void cnn_convolve_no_maxpool_padding_valid_layer2_avx2( // As per the layer config set by av1_intra_mode_cnn_partition_cnn_config, // the filter_width and filter_height are equal to 2 for layer >= 1. So // convolution happens at 2x2 for layer >= 1. -void cnn_convolve_no_maxpool_padding_valid_2x2_avx2( +static void cnn_convolve_no_maxpool_padding_valid_2x2_avx2( const float **input, int in_width, int in_height, int in_stride, const CNN_LAYER_CONFIG *const layer_config, float **output, int out_stride, int start_idx, const int cstep, const int channel_step) { -- cgit v1.2.3