summaryrefslogtreecommitdiffstats
path: root/third_party/aom
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/aom')
-rw-r--r--third_party/aom/CHANGELOG19
-rw-r--r--third_party/aom/CMakeLists.txt22
-rw-r--r--third_party/aom/README.md26
-rw-r--r--third_party/aom/aom/aom_encoder.h5
-rw-r--r--third_party/aom/aom/aomdx.h7
-rw-r--r--third_party/aom/aom/src/aom_encoder.c5
-rw-r--r--third_party/aom/aom/src/aom_image.c2
-rw-r--r--third_party/aom/aom_dsp/aom_dsp.cmake15
-rwxr-xr-xthird_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl118
-rw-r--r--third_party/aom/aom_dsp/arm/aom_convolve8_neon_dotprod.c116
-rw-r--r--third_party/aom/aom_dsp/arm/aom_convolve8_neon_i8mm.c73
-rw-r--r--third_party/aom/aom_dsp/arm/aom_filter.h33
-rw-r--r--third_party/aom/aom_dsp/arm/aom_neon_sve2_bridge.h36
-rw-r--r--third_party/aom/aom_dsp/arm/aom_neon_sve_bridge.h (renamed from third_party/aom/aom_dsp/arm/dot_sve.h)24
-rw-r--r--third_party/aom/aom_dsp/arm/avg_sve.c2
-rw-r--r--third_party/aom/aom_dsp/arm/blk_sse_sum_sve.c2
-rw-r--r--third_party/aom/aom_dsp/arm/highbd_convolve8_sve.c681
-rw-r--r--third_party/aom/aom_dsp/arm/highbd_sse_sve.c2
-rw-r--r--third_party/aom/aom_dsp/arm/highbd_variance_sve.c2
-rw-r--r--third_party/aom/aom_dsp/arm/mem_neon.h56
-rw-r--r--third_party/aom/aom_dsp/arm/sum_squares_sve.c2
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/corner_detect.c44
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/corner_detect.h5
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/corner_match.c317
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/corner_match.h12
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/disflow.c36
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/disflow.h11
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/flow_estimation.c20
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/flow_estimation.h7
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/ransac.c349
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c148
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c171
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/disflow_avx2.c417
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c424
-rw-r--r--third_party/aom/aom_dsp/mathutils.h1
-rw-r--r--third_party/aom/aom_dsp/noise_model.c6
-rw-r--r--third_party/aom/aom_dsp/noise_model.h6
-rw-r--r--third_party/aom/aom_dsp/pyramid.c181
-rw-r--r--third_party/aom/aom_dsp/pyramid.h61
-rw-r--r--third_party/aom/aom_dsp/rect.h35
-rw-r--r--third_party/aom/aom_dsp/variance.c125
-rw-r--r--third_party/aom/aom_dsp/x86/aom_asm_stubs.c34
-rw-r--r--third_party/aom/aom_dsp/x86/aom_subpixel_8t_intrin_sse2.c569
-rw-r--r--third_party/aom/aom_dsp/x86/aom_subpixel_8t_sse2.asm615
-rw-r--r--third_party/aom/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm295
-rw-r--r--third_party/aom/aom_dsp/x86/avg_intrin_sse2.c2
-rw-r--r--third_party/aom/aom_dsp/x86/fwd_txfm_impl_sse2.h6
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_variance_avx2.c63
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_variance_sse2.c12
-rw-r--r--third_party/aom/aom_dsp/x86/intrapred_ssse3.c8
-rw-r--r--third_party/aom/aom_dsp/x86/masked_sad4d_ssse3.c50
-rw-r--r--third_party/aom/aom_dsp/x86/subpel_variance_ssse3.asm (renamed from third_party/aom/aom_dsp/x86/subpel_variance_sse2.asm)28
-rw-r--r--third_party/aom/aom_dsp/x86/synonyms.h19
-rw-r--r--third_party/aom/aom_dsp/x86/synonyms_avx2.h25
-rw-r--r--third_party/aom/aom_dsp/x86/variance_avx2.c26
-rw-r--r--third_party/aom/aom_dsp/x86/variance_impl_avx2.c6
-rw-r--r--third_party/aom/aom_dsp/x86/variance_sse2.c16
-rw-r--r--third_party/aom/aom_ports/aarch64_cpudetect.c16
-rw-r--r--third_party/aom/aom_ports/arm.h2
-rw-r--r--third_party/aom/aom_ports/mem.h8
-rw-r--r--third_party/aom/aom_scale/aom_scale_rtcd.pl12
-rw-r--r--third_party/aom/aom_scale/generic/yv12config.c34
-rw-r--r--third_party/aom/aom_scale/generic/yv12extend.c42
-rw-r--r--third_party/aom/aom_scale/yv12config.h31
-rw-r--r--third_party/aom/aom_util/aom_pthread.h172
-rw-r--r--third_party/aom/aom_util/aom_thread.c56
-rw-r--r--third_party/aom/aom_util/aom_thread.h146
-rw-r--r--third_party/aom/aom_util/aom_util.cmake3
-rw-r--r--third_party/aom/apps/aomenc.c4
-rw-r--r--third_party/aom/av1/av1.cmake22
-rw-r--r--third_party/aom/av1/av1_cx_iface.c49
-rw-r--r--third_party/aom/av1/av1_dx_iface.c17
-rw-r--r--third_party/aom/av1/common/alloccommon.c6
-rw-r--r--third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c532
-rw-r--r--third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h293
-rw-r--r--third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c1555
-rw-r--r--third_party/aom/av1/common/arm/highbd_convolve_sve2.c1720
-rw-r--r--third_party/aom/av1/common/arm/highbd_convolve_sve2.h97
-rw-r--r--third_party/aom/av1/common/arm/highbd_warp_plane_neon.c30
-rw-r--r--third_party/aom/av1/common/arm/highbd_warp_plane_neon.h60
-rw-r--r--third_party/aom/av1/common/arm/highbd_warp_plane_sve.c32
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon.c38
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon.h60
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c38
-rw-r--r--third_party/aom/av1/common/arm/warp_plane_sve.c40
-rw-r--r--third_party/aom/av1/common/av1_common_int.h2
-rw-r--r--third_party/aom/av1/common/av1_rtcd_defs.pl54
-rw-r--r--third_party/aom/av1/common/cdef.c13
-rw-r--r--third_party/aom/av1/common/entropymode.h9
-rw-r--r--third_party/aom/av1/common/quant_common.c18
-rw-r--r--third_party/aom/av1/common/reconintra.c6
-rw-r--r--third_party/aom/av1/common/resize.c52
-rw-r--r--third_party/aom/av1/common/resize.h44
-rw-r--r--third_party/aom/av1/common/restoration.c35
-rw-r--r--third_party/aom/av1/common/thread_common.c7
-rw-r--r--third_party/aom/av1/common/thread_common.h1
-rw-r--r--third_party/aom/av1/common/tile_common.c61
-rw-r--r--third_party/aom/av1/common/tile_common.h15
-rw-r--r--third_party/aom/av1/common/x86/cdef_block_sse2.c40
-rw-r--r--third_party/aom/av1/common/x86/cdef_block_ssse3.c11
-rw-r--r--third_party/aom/av1/common/x86/convolve_2d_avx2.c18
-rw-r--r--third_party/aom/av1/common/x86/convolve_2d_sse2.c17
-rw-r--r--third_party/aom/av1/common/x86/convolve_sse2.c26
-rw-r--r--third_party/aom/av1/common/x86/jnt_convolve_sse2.c229
-rw-r--r--third_party/aom/av1/decoder/decodeframe.c49
-rw-r--r--third_party/aom/av1/decoder/decodemv.h2
-rw-r--r--third_party/aom/av1/decoder/decoder.c1
-rw-r--r--third_party/aom/av1/decoder/dthread.h1
-rw-r--r--third_party/aom/av1/decoder/obu.c41
-rw-r--r--third_party/aom/av1/encoder/allintra_vis.c4
-rw-r--r--third_party/aom/av1/encoder/aq_cyclicrefresh.c50
-rw-r--r--third_party/aom/av1/encoder/arm/neon/av1_error_sve.c2
-rw-r--r--third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c58
-rw-r--r--third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c92
-rw-r--r--third_party/aom/av1/encoder/av1_temporal_denoiser.c8
-rw-r--r--third_party/aom/av1/encoder/bitstream.c19
-rw-r--r--third_party/aom/av1/encoder/bitstream.h1
-rw-r--r--third_party/aom/av1/encoder/block.h3
-rw-r--r--third_party/aom/av1/encoder/cnn.c10
-rw-r--r--third_party/aom/av1/encoder/encode_strategy.c27
-rw-r--r--third_party/aom/av1/encoder/encodeframe.c20
-rw-r--r--third_party/aom/av1/encoder/encodeframe_utils.c6
-rw-r--r--third_party/aom/av1/encoder/encoder.c94
-rw-r--r--third_party/aom/av1/encoder/encoder.h9
-rw-r--r--third_party/aom/av1/encoder/encoder_alloc.h3
-rw-r--r--third_party/aom/av1/encoder/encoder_utils.c20
-rw-r--r--third_party/aom/av1/encoder/encodetxb.c26
-rw-r--r--third_party/aom/av1/encoder/ethread.c8
-rw-r--r--third_party/aom/av1/encoder/firstpass.c1
-rw-r--r--third_party/aom/av1/encoder/global_motion.c82
-rw-r--r--third_party/aom/av1/encoder/global_motion.h32
-rw-r--r--third_party/aom/av1/encoder/global_motion_facade.c47
-rw-r--r--third_party/aom/av1/encoder/k_means_template.h10
-rw-r--r--third_party/aom/av1/encoder/lookahead.c19
-rw-r--r--third_party/aom/av1/encoder/lookahead.h20
-rw-r--r--third_party/aom/av1/encoder/nonrd_pickmode.c7
-rw-r--r--third_party/aom/av1/encoder/palette.c2
-rw-r--r--third_party/aom/av1/encoder/palette.h2
-rw-r--r--third_party/aom/av1/encoder/partition_search.c48
-rw-r--r--third_party/aom/av1/encoder/partition_strategy.c2
-rw-r--r--third_party/aom/av1/encoder/pass2_strategy.c100
-rw-r--r--third_party/aom/av1/encoder/pickcdef.c2
-rw-r--r--third_party/aom/av1/encoder/picklpf.c21
-rw-r--r--third_party/aom/av1/encoder/pickrst.c111
-rw-r--r--third_party/aom/av1/encoder/ratectrl.c120
-rw-r--r--third_party/aom/av1/encoder/ratectrl.h3
-rw-r--r--third_party/aom/av1/encoder/speed_features.c9
-rw-r--r--third_party/aom/av1/encoder/speed_features.h7
-rw-r--r--third_party/aom/av1/encoder/superres_scale.c2
-rw-r--r--third_party/aom/av1/encoder/svc_layercontext.c12
-rw-r--r--third_party/aom/av1/encoder/svc_layercontext.h15
-rw-r--r--third_party/aom/av1/encoder/temporal_filter.c21
-rw-r--r--third_party/aom/av1/encoder/temporal_filter.h2
-rw-r--r--third_party/aom/av1/encoder/tpl_model.c3
-rw-r--r--third_party/aom/av1/encoder/tpl_model.h1
-rw-r--r--third_party/aom/av1/encoder/tune_butteraugli.c10
-rw-r--r--third_party/aom/av1/encoder/tune_vmaf.c105
-rw-r--r--third_party/aom/av1/encoder/tune_vmaf.h6
-rw-r--r--third_party/aom/av1/encoder/tx_search.c23
-rw-r--r--third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c6
-rw-r--r--third_party/aom/av1/encoder/x86/cnn_avx2.c2
-rw-r--r--third_party/aom/build/cmake/aom_config_defaults.cmake6
-rw-r--r--third_party/aom/build/cmake/aom_configure.cmake11
-rw-r--r--third_party/aom/build/cmake/compiler_flags.cmake4
-rw-r--r--third_party/aom/build/cmake/cpu.cmake21
-rwxr-xr-xthird_party/aom/build/cmake/rtcd.pl2
-rw-r--r--third_party/aom/doc/dev_guide/av1_encoder.dox28
-rw-r--r--third_party/aom/examples/av1_dec_fuzzer.cc15
-rw-r--r--third_party/aom/examples/svc_encoder_rtc.cc34
-rw-r--r--third_party/aom/libs.doxy_template57
-rw-r--r--third_party/aom/test/active_map_test.cc18
-rw-r--r--third_party/aom/test/aom_image_test.cc12
-rw-r--r--third_party/aom/test/av1_convolve_test.cc38
-rw-r--r--third_party/aom/test/av1_fwd_txfm2d_test.cc15
-rw-r--r--third_party/aom/test/av1_wedge_utils_test.cc12
-rw-r--r--third_party/aom/test/cdef_test.cc72
-rw-r--r--third_party/aom/test/convolve_test.cc35
-rw-r--r--third_party/aom/test/corner_match_test.cc221
-rw-r--r--third_party/aom/test/disflow_test.cc5
-rw-r--r--third_party/aom/test/encode_api_test.cc79
-rw-r--r--third_party/aom/test/hbd_metrics_test.cc8
-rw-r--r--third_party/aom/test/level_test.cc14
-rw-r--r--third_party/aom/test/quantize_func_test.cc9
-rw-r--r--third_party/aom/test/resize_test.cc40
-rw-r--r--third_party/aom/test/sad_test.cc2
-rw-r--r--third_party/aom/test/segment_binarization_sync.cc11
-rw-r--r--third_party/aom/test/sharpness_test.cc2
-rw-r--r--third_party/aom/test/test.cmake48
-rw-r--r--third_party/aom/test/test_libaom.cc1
-rw-r--r--third_party/aom/test/variance_test.cc78
-rw-r--r--third_party/aom/test/wiener_test.cc382
-rw-r--r--third_party/aom/third_party/libwebm/README.libaom2
-rw-r--r--third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.cc102
-rw-r--r--third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.h2
-rw-r--r--third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc10
-rw-r--r--third_party/aom/third_party/libwebm/mkvparser/mkvparser.cc11
196 files changed, 9029 insertions, 4956 deletions
diff --git a/third_party/aom/CHANGELOG b/third_party/aom/CHANGELOG
index b5c1afbba2..b8a3e4a6a5 100644
--- a/third_party/aom/CHANGELOG
+++ b/third_party/aom/CHANGELOG
@@ -1,3 +1,22 @@
+2024-03-08 v3.8.2
+ This release includes several bug fixes. This release is ABI
+ compatible with the last release. See
+ https://aomedia.googlesource.com/aom/+log/v3.8.1..v3.8.2 for all the
+ commits in this release.
+
+ - Bug Fixes
+ * aomedia:3523: SIGFPE in av1_twopass_postencode_update()
+ pass2_strategy.c:4261.
+ * aomedia:3535, b/317646516: Over reads in aom_convolve_copy_neon().
+ * aomedia:3543: invalid feature modifier when compiling
+ aom_dsp/arm/aom_convolve8_neon_i8mm.c on Debian 10 with arm64
+ architecture.
+ * aomedia:3545: Failed to parse configurations due to inconsistent
+ elements between two arrays "av1_ctrl_args" and "av1_arg_ctrl_map"
+ in aomenc.c.
+ * oss-fuzz:66474, b/319140742: Integer-overflow in search_wiener.
+ * Zero initialize an array in cdef search.
+
2024-01-17 v3.8.1
This release includes several bug fixes. This release is ABI
compatible with the last release. See
diff --git a/third_party/aom/CMakeLists.txt b/third_party/aom/CMakeLists.txt
index a02b220bdb..00a7e2bca9 100644
--- a/third_party/aom/CMakeLists.txt
+++ b/third_party/aom/CMakeLists.txt
@@ -59,7 +59,7 @@ endif()
#
# We set SO_FILE_VERSION = [c-a].a.r
set(LT_CURRENT 11)
-set(LT_REVISION 1)
+set(LT_REVISION 2)
set(LT_AGE 8)
math(EXPR SO_VERSION "${LT_CURRENT} - ${LT_AGE}")
set(SO_FILE_VERSION "${SO_VERSION}.${LT_AGE}.${LT_REVISION}")
@@ -374,6 +374,7 @@ file(WRITE "${AOM_GEN_SRC_DIR}/usage_exit.c"
#
if(ENABLE_EXAMPLES OR ENABLE_TESTS OR ENABLE_TOOLS)
add_library(aom_common_app_util OBJECT ${AOM_COMMON_APP_UTIL_SOURCES})
+ add_library(aom_usage_exit OBJECT "${AOM_GEN_SRC_DIR}/usage_exit.c")
set_property(TARGET ${example} PROPERTY FOLDER examples)
if(CONFIG_AV1_DECODER)
add_library(aom_decoder_app_util OBJECT ${AOM_DECODER_APP_UTIL_SOURCES})
@@ -508,10 +509,10 @@ if(CONFIG_AV1_ENCODER)
# aom_entropy_optimizer.c won't work on macos, but dragging in all the
# helper machinery allows the link to succeed.
add_executable(aom_entropy_optimizer
- "${AOM_GEN_SRC_DIR}/usage_exit.c"
"${AOM_ROOT}/tools/aom_entropy_optimizer.c"
$<TARGET_OBJECTS:aom_common_app_util>
- $<TARGET_OBJECTS:aom_encoder_app_util>)
+ $<TARGET_OBJECTS:aom_encoder_app_util>
+ $<TARGET_OBJECTS:aom_usage_exit>)
# Maintain a list of encoder tool targets.
list(APPEND AOM_ENCODER_TOOL_TARGETS aom_entropy_optimizer)
@@ -661,12 +662,12 @@ endif()
if(ENABLE_TOOLS)
if(CONFIG_AV1_DECODER)
- add_executable(dump_obu "${AOM_GEN_SRC_DIR}/usage_exit.c"
- "${AOM_ROOT}/tools/dump_obu.cc"
+ add_executable(dump_obu "${AOM_ROOT}/tools/dump_obu.cc"
"${AOM_ROOT}/tools/obu_parser.cc"
"${AOM_ROOT}/tools/obu_parser.h"
$<TARGET_OBJECTS:aom_common_app_util>
- $<TARGET_OBJECTS:aom_decoder_app_util>)
+ $<TARGET_OBJECTS:aom_decoder_app_util>
+ $<TARGET_OBJECTS:aom_usage_exit>)
list(APPEND AOM_TOOL_TARGETS dump_obu)
list(APPEND AOM_APP_TARGETS dump_obu)
@@ -825,7 +826,8 @@ if(BUILD_SHARED_LIBS)
# Clang's AddressSanitizer documentation says "When linking shared libraries,
# the AddressSanitizer run-time is not linked, so -Wl,-z,defs may cause link
# errors (don't use it with AddressSanitizer)." See
- # https://clang.llvm.org/docs/AddressSanitizer.html#usage.
+ # https://clang.llvm.org/docs/AddressSanitizer.html#usage. Similarly, see
+ # https://clang.llvm.org/docs/MemorySanitizer.html#usage.
if(NOT WIN32
AND NOT APPLE
AND NOT (CMAKE_C_COMPILER_ID MATCHES "Clang" AND SANITIZE))
@@ -843,12 +845,6 @@ if(BUILD_SHARED_LIBS)
setup_exports_target()
endif()
-# Do not allow implicit vector type conversions on Clang builds (this is already
-# the default on GCC builds).
-if(CMAKE_C_COMPILER_ID MATCHES "Clang")
- append_compiler_flag("-flax-vector-conversions=none")
-endif()
-
# Handle user supplied compile and link flags last to ensure they're obeyed.
set_user_flags()
diff --git a/third_party/aom/README.md b/third_party/aom/README.md
index 4e2eb2756c..f81e13e9bd 100644
--- a/third_party/aom/README.md
+++ b/third_party/aom/README.md
@@ -46,17 +46,23 @@ README.md {#LREADME}
### Prerequisites {#prerequisites}
- 1. [CMake](https://cmake.org). See CMakeLists.txt for the minimum version
- required.
- 2. [Git](https://git-scm.com/).
- 3. [Perl](https://www.perl.org/).
- 4. For x86 targets, [yasm](http://yasm.tortall.net/), which is preferred, or a
- recent version of [nasm](http://www.nasm.us/). If you download yasm with
- the intention to work with Visual Studio, please download win32.exe or
- win64.exe and rename it into yasm.exe. DO NOT download or use vsyasm.exe.
- 5. Building the documentation requires
+1. [CMake](https://cmake.org). See CMakeLists.txt for the minimum version
+ required.
+2. [Git](https://git-scm.com/).
+3. A modern C compiler. gcc 6+, clang 7+, Microsoft Visual Studio 2019+ or
+ the latest version of MinGW-w64 (clang64 or ucrt toolchains) are
+ recommended. A C++ compiler is necessary to build the unit tests and some
+ features contained in the examples.
+4. [Perl](https://www.perl.org/).
+5. For x86 targets, [yasm](http://yasm.tortall.net/) or a recent version (2.14
+ or later) of [nasm](http://www.nasm.us/). (If both yasm and nasm are
+ present, yasm will be used by default. Pass -DENABLE_NASM=ON to cmake to
+ select nasm.) If you download yasm with the intention to work with Visual
+ Studio, please download win32.exe or win64.exe and rename it into yasm.exe.
+ DO NOT download or use vsyasm.exe.
+6. Building the documentation requires
[doxygen version 1.8.10 or newer](http://doxygen.org).
- 6. Emscripten builds require the portable
+7. Emscripten builds require the portable
[EMSDK](https://kripken.github.io/emscripten-site/index.html).
### Get the code {#get-the-code}
diff --git a/third_party/aom/aom/aom_encoder.h b/third_party/aom/aom/aom_encoder.h
index 6a6254dafe..9bdadd6938 100644
--- a/third_party/aom/aom/aom_encoder.h
+++ b/third_party/aom/aom/aom_encoder.h
@@ -1044,6 +1044,11 @@ aom_fixed_buf_t *aom_codec_get_global_headers(aom_codec_ctx_t *ctx);
* Interface is not an encoder interface.
* \retval #AOM_CODEC_INVALID_PARAM
* A parameter was NULL, the image format is unsupported, etc.
+ *
+ * \note
+ * `duration` is of the unsigned long type, which can be 32 or 64 bits.
+ * `duration` must be less than or equal to UINT32_MAX so that its range is
+ * independent of the size of unsigned long.
*/
aom_codec_err_t aom_codec_encode(aom_codec_ctx_t *ctx, const aom_image_t *img,
aom_codec_pts_t pts, unsigned long duration,
diff --git a/third_party/aom/aom/aomdx.h b/third_party/aom/aom/aomdx.h
index 02ea19597c..2dd7bb3375 100644
--- a/third_party/aom/aom/aomdx.h
+++ b/third_party/aom/aom/aomdx.h
@@ -234,8 +234,11 @@ enum aom_dec_control_id {
*/
AV1D_GET_IMG_FORMAT,
- /*!\brief Codec control function to get the size of the tile, unsigned int*
- * parameter
+ /*!\brief Codec control function to get the width and height (in pixels) of
+ * the tiles in a tile list, unsigned int* parameter
+ *
+ * Tile width is in the high 16 bits of the output value, and tile height is
+ * in the low 16 bits of the output value.
*/
AV1D_GET_TILE_SIZE,
diff --git a/third_party/aom/aom/src/aom_encoder.c b/third_party/aom/aom/src/aom_encoder.c
index 70e0b75bcd..f188567b94 100644
--- a/third_party/aom/aom/src/aom_encoder.c
+++ b/third_party/aom/aom/src/aom_encoder.c
@@ -23,6 +23,7 @@
#endif
#include <limits.h>
+#include <stdint.h>
#include <string.h>
#include "aom/aom_encoder.h"
@@ -178,6 +179,10 @@ aom_codec_err_t aom_codec_encode(aom_codec_ctx_t *ctx, const aom_image_t *img,
else if (img && ((img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) != 0) !=
((ctx->init_flags & AOM_CODEC_USE_HIGHBITDEPTH) != 0)) {
res = AOM_CODEC_INVALID_PARAM;
+#if ULONG_MAX > UINT32_MAX
+ } else if (duration > UINT32_MAX) {
+ res = AOM_CODEC_INVALID_PARAM;
+#endif
} else {
/* Execute in a normalized floating point environment, if the platform
* requires it.
diff --git a/third_party/aom/aom/src/aom_image.c b/third_party/aom/aom/src/aom_image.c
index 8e94d5dd4f..3b1c33d056 100644
--- a/third_party/aom/aom/src/aom_image.c
+++ b/third_party/aom/aom/src/aom_image.c
@@ -41,6 +41,8 @@ static aom_image_t *img_alloc_helper(
if (img != NULL) memset(img, 0, sizeof(aom_image_t));
+ if (fmt == AOM_IMG_FMT_NONE) goto fail;
+
/* Treat align==0 like align==1 */
if (!buf_align) buf_align = 1;
diff --git a/third_party/aom/aom_dsp/aom_dsp.cmake b/third_party/aom/aom_dsp/aom_dsp.cmake
index 653f690741..de987cbd23 100644
--- a/third_party/aom/aom_dsp/aom_dsp.cmake
+++ b/third_party/aom/aom_dsp/aom_dsp.cmake
@@ -52,15 +52,12 @@ list(APPEND AOM_DSP_COMMON_SOURCES
list(APPEND AOM_DSP_COMMON_ASM_SSE2
"${AOM_ROOT}/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm"
"${AOM_ROOT}/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm"
- "${AOM_ROOT}/aom_dsp/x86/aom_subpixel_8t_sse2.asm"
- "${AOM_ROOT}/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm"
"${AOM_ROOT}/aom_dsp/x86/highbd_intrapred_asm_sse2.asm"
"${AOM_ROOT}/aom_dsp/x86/intrapred_asm_sse2.asm"
"${AOM_ROOT}/aom_dsp/x86/inv_wht_sse2.asm")
list(APPEND AOM_DSP_COMMON_INTRIN_SSE2
"${AOM_ROOT}/aom_dsp/x86/aom_convolve_copy_sse2.c"
- "${AOM_ROOT}/aom_dsp/x86/aom_subpixel_8t_intrin_sse2.c"
"${AOM_ROOT}/aom_dsp/x86/aom_asm_stubs.c"
"${AOM_ROOT}/aom_dsp/x86/convolve.h"
"${AOM_ROOT}/aom_dsp/x86/convolve_sse2.h"
@@ -145,6 +142,9 @@ if(CONFIG_AV1_HIGHBITDEPTH)
"${AOM_ROOT}/aom_dsp/arm/highbd_convolve8_neon.c"
"${AOM_ROOT}/aom_dsp/arm/highbd_intrapred_neon.c"
"${AOM_ROOT}/aom_dsp/arm/highbd_loopfilter_neon.c")
+
+ list(APPEND AOM_DSP_COMMON_INTRIN_SVE
+ "${AOM_ROOT}/aom_dsp/arm/highbd_convolve8_sve.c")
endif()
if(CONFIG_AV1_DECODER)
@@ -200,7 +200,8 @@ if(CONFIG_AV1_ENCODER)
"${AOM_ROOT}/aom_dsp/flow_estimation/x86/disflow_sse4.c")
list(APPEND AOM_DSP_ENCODER_INTRIN_AVX2
- "${AOM_ROOT}/aom_dsp/flow_estimation/x86/corner_match_avx2.c")
+ "${AOM_ROOT}/aom_dsp/flow_estimation/x86/corner_match_avx2.c"
+ "${AOM_ROOT}/aom_dsp/flow_estimation/x86/disflow_avx2.c")
list(APPEND AOM_DSP_ENCODER_INTRIN_NEON
"${AOM_ROOT}/aom_dsp/flow_estimation/arm/disflow_neon.c")
@@ -208,7 +209,6 @@ if(CONFIG_AV1_ENCODER)
list(APPEND AOM_DSP_ENCODER_ASM_SSE2 "${AOM_ROOT}/aom_dsp/x86/sad4d_sse2.asm"
"${AOM_ROOT}/aom_dsp/x86/sad_sse2.asm"
- "${AOM_ROOT}/aom_dsp/x86/subpel_variance_sse2.asm"
"${AOM_ROOT}/aom_dsp/x86/subtract_sse2.asm")
list(APPEND AOM_DSP_ENCODER_ASM_SSE2_X86_64
@@ -227,6 +227,9 @@ if(CONFIG_AV1_ENCODER)
"${AOM_ROOT}/aom_dsp/x86/variance_sse2.c"
"${AOM_ROOT}/aom_dsp/x86/jnt_sad_sse2.c")
+ list(APPEND AOM_DSP_ENCODER_ASM_SSSE3
+ "${AOM_ROOT}/aom_dsp/x86/subpel_variance_ssse3.asm")
+
list(APPEND AOM_DSP_ENCODER_ASM_SSSE3_X86_64
"${AOM_ROOT}/aom_dsp/x86/fwd_txfm_ssse3_x86_64.asm"
"${AOM_ROOT}/aom_dsp/x86/quantize_ssse3_x86_64.asm")
@@ -493,6 +496,8 @@ function(setup_aom_dsp_targets)
endif()
if(HAVE_SVE)
+ add_intrinsics_object_library("${AOM_SVE_FLAG}" "sve" "aom_dsp_common"
+ "AOM_DSP_COMMON_INTRIN_SVE")
if(CONFIG_AV1_ENCODER)
add_intrinsics_object_library("${AOM_SVE_FLAG}" "sve" "aom_dsp_encoder"
"AOM_DSP_ENCODER_INTRIN_SVE")
diff --git a/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl b/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl
index 7bb156ac59..7e746e9cb9 100755
--- a/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -498,8 +498,8 @@ add_proto qw/void aom_convolve8_horiz/, "const uint8_t *src, ptrdiff_t
add_proto qw/void aom_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
specialize qw/aom_convolve_copy neon sse2 avx2/;
-specialize qw/aom_convolve8_horiz neon neon_dotprod neon_i8mm sse2 ssse3/, "$avx2_ssse3";
-specialize qw/aom_convolve8_vert neon neon_dotprod neon_i8mm sse2 ssse3/, "$avx2_ssse3";
+specialize qw/aom_convolve8_horiz neon neon_dotprod neon_i8mm ssse3/, "$avx2_ssse3";
+specialize qw/aom_convolve8_vert neon neon_dotprod neon_i8mm ssse3/, "$avx2_ssse3";
add_proto qw/void aom_scaled_2d/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h";
specialize qw/aom_scaled_2d ssse3 neon/;
@@ -509,10 +509,10 @@ if (aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") {
specialize qw/aom_highbd_convolve_copy sse2 avx2 neon/;
add_proto qw/void aom_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bd";
- specialize qw/aom_highbd_convolve8_horiz sse2 avx2 neon/;
+ specialize qw/aom_highbd_convolve8_horiz sse2 avx2 neon sve/;
add_proto qw/void aom_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bd";
- specialize qw/aom_highbd_convolve8_vert sse2 avx2 neon/;
+ specialize qw/aom_highbd_convolve8_vert sse2 avx2 neon sve/;
}
#
@@ -1087,7 +1087,7 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
specialize qw/aom_sad_skip_16x32x4d avx2 sse2 neon neon_dotprod/;
specialize qw/aom_sad_skip_16x16x4d avx2 sse2 neon neon_dotprod/;
specialize qw/aom_sad_skip_16x8x4d avx2 sse2 neon neon_dotprod/;
- specialize qw/aom_sad_skip_16x4x4d neon neon_dotprod/;
+ specialize qw/aom_sad_skip_16x4x4d avx2 neon neon_dotprod/;
specialize qw/aom_sad_skip_8x32x4d sse2 neon/;
specialize qw/aom_sad_skip_8x16x4d sse2 neon/;
specialize qw/aom_sad_skip_8x8x4d sse2 neon/;
@@ -1116,7 +1116,7 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
specialize qw/aom_sad64x16x3d avx2 neon neon_dotprod/;
specialize qw/aom_sad32x8x3d avx2 neon neon_dotprod/;
specialize qw/aom_sad16x64x3d avx2 neon neon_dotprod/;
- specialize qw/aom_sad16x4x3d neon neon_dotprod/;
+ specialize qw/aom_sad16x4x3d avx2 neon neon_dotprod/;
specialize qw/aom_sad8x32x3d neon/;
specialize qw/aom_sad4x16x3d neon/;
@@ -1264,8 +1264,6 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
add_proto qw/int aom_vector_var/, "const int16_t *ref, const int16_t *src, int bwl";
specialize qw/aom_vector_var avx2 sse4_1 neon sve/;
- # TODO(kyslov@) bring back SSE2 by extending it to 128 block size
- #specialize qw/aom_vector_var neon sse2/;
#
# hamadard transform and satd for implmenting temporal dependency model
@@ -1357,6 +1355,11 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
specialize "aom_highbd_${bd}_mse16x8", qw/neon neon_dotprod/;
specialize "aom_highbd_${bd}_mse8x16", qw/neon neon_dotprod/;
specialize "aom_highbd_${bd}_mse8x8", qw/sse2 neon neon_dotprod/;
+ } elsif ($bd eq 10) {
+ specialize "aom_highbd_${bd}_mse16x16", qw/avx2 sse2 neon sve/;
+ specialize "aom_highbd_${bd}_mse16x8", qw/neon sve/;
+ specialize "aom_highbd_${bd}_mse8x16", qw/neon sve/;
+ specialize "aom_highbd_${bd}_mse8x8", qw/sse2 neon sve/;
} else {
specialize "aom_highbd_${bd}_mse16x16", qw/sse2 neon sve/;
specialize "aom_highbd_${bd}_mse16x8", qw/neon sve/;
@@ -1406,39 +1409,39 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
specialize qw/aom_variance4x8 sse2 neon neon_dotprod/;
specialize qw/aom_variance4x4 sse2 neon neon_dotprod/;
- specialize qw/aom_sub_pixel_variance128x128 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance128x64 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance64x128 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance64x64 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance64x32 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance32x64 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance32x32 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance32x16 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance16x32 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance16x16 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance16x8 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance8x16 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance8x8 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance8x4 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance4x8 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance4x4 neon sse2 ssse3/;
-
- specialize qw/aom_sub_pixel_avg_variance128x128 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance128x64 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance64x128 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance64x64 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance64x32 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance32x64 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance32x32 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance32x16 avx2 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance16x32 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance16x16 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance16x8 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance8x16 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance8x8 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance8x4 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance4x8 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance4x4 neon sse2 ssse3/;
+ specialize qw/aom_sub_pixel_variance128x128 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance128x64 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance64x128 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance64x64 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance64x32 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance32x64 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance32x32 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance32x16 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance16x32 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance16x16 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance16x8 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance8x16 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance8x8 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance8x4 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance4x8 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance4x4 neon ssse3/;
+
+ specialize qw/aom_sub_pixel_avg_variance128x128 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance128x64 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance64x128 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance64x64 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance64x32 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance32x64 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance32x32 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance32x16 avx2 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance16x32 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance16x16 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance16x8 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance8x16 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance8x8 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance8x4 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance4x8 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance4x4 neon ssse3/;
if (aom_config("CONFIG_REALTIME_ONLY") ne "yes") {
specialize qw/aom_variance4x16 neon neon_dotprod sse2/;
@@ -1448,18 +1451,18 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
specialize qw/aom_variance16x64 neon neon_dotprod sse2 avx2/;
specialize qw/aom_variance64x16 neon neon_dotprod sse2 avx2/;
- specialize qw/aom_sub_pixel_variance4x16 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance16x4 neon avx2 sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance8x32 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance32x8 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance16x64 neon avx2 sse2 ssse3/;
- specialize qw/aom_sub_pixel_variance64x16 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance4x16 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance16x4 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance8x32 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance32x8 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance16x64 neon sse2 ssse3/;
- specialize qw/aom_sub_pixel_avg_variance64x16 neon sse2 ssse3/;
+ specialize qw/aom_sub_pixel_variance4x16 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance16x4 neon avx2 ssse3/;
+ specialize qw/aom_sub_pixel_variance8x32 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance32x8 neon ssse3/;
+ specialize qw/aom_sub_pixel_variance16x64 neon avx2 ssse3/;
+ specialize qw/aom_sub_pixel_variance64x16 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance4x16 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance16x4 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance8x32 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance32x8 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance16x64 neon ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance64x16 neon ssse3/;
specialize qw/aom_dist_wtd_sub_pixel_avg_variance4x16 neon ssse3/;
specialize qw/aom_dist_wtd_sub_pixel_avg_variance16x4 neon ssse3/;
@@ -1789,11 +1792,14 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
# Flow estimation library
if (aom_config("CONFIG_REALTIME_ONLY") ne "yes") {
- add_proto qw/double av1_compute_cross_correlation/, "const unsigned char *frame1, int stride1, int x1, int y1, const unsigned char *frame2, int stride2, int x2, int y2";
- specialize qw/av1_compute_cross_correlation sse4_1 avx2/;
+ add_proto qw/bool aom_compute_mean_stddev/, "const unsigned char *frame, int stride, int x, int y, double *mean, double *one_over_stddev";
+ specialize qw/aom_compute_mean_stddev sse4_1 avx2/;
+
+ add_proto qw/double aom_compute_correlation/, "const unsigned char *frame1, int stride1, int x1, int y1, double mean1, double one_over_stddev1, const unsigned char *frame2, int stride2, int x2, int y2, double mean2, double one_over_stddev2";
+ specialize qw/aom_compute_correlation sse4_1 avx2/;
add_proto qw/void aom_compute_flow_at_point/, "const uint8_t *src, const uint8_t *ref, int x, int y, int width, int height, int stride, double *u, double *v";
- specialize qw/aom_compute_flow_at_point sse4_1 neon/;
+ specialize qw/aom_compute_flow_at_point sse4_1 avx2 neon/;
}
} # CONFIG_AV1_ENCODER
diff --git a/third_party/aom/aom_dsp/arm/aom_convolve8_neon_dotprod.c b/third_party/aom/aom_dsp/arm/aom_convolve8_neon_dotprod.c
index ac0a6efd00..c82125ba17 100644
--- a/third_party/aom/aom_dsp/arm/aom_convolve8_neon_dotprod.c
+++ b/third_party/aom/aom_dsp/arm/aom_convolve8_neon_dotprod.c
@@ -267,8 +267,6 @@ void aom_convolve8_vert_neon_dotprod(const uint8_t *src, ptrdiff_t src_stride,
const int32x4_t correction = vdupq_n_s32((int32_t)vaddvq_s16(correct_tmp));
const uint8x8_t range_limit = vdup_n_u8(128);
const uint8x16x3_t merge_block_tbl = vld1q_u8_x3(dot_prod_merge_block_tbl);
- uint8x8_t t0, t1, t2, t3, t4, t5, t6;
- int8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
int8x16x2_t samples_LUT;
assert((intptr_t)dst % 4 == 0);
@@ -282,46 +280,39 @@ void aom_convolve8_vert_neon_dotprod(const uint8_t *src, ptrdiff_t src_stride,
if (w == 4) {
const uint8x16_t tran_concat_tbl = vld1q_u8(dot_prod_tran_concat_tbl);
- int8x16_t s0123, s1234, s2345, s3456, s4567, s5678, s6789, s78910;
- int16x4_t d0, d1, d2, d3;
- uint8x8_t d01, d23;
+ uint8x8_t t0, t1, t2, t3, t4, t5, t6;
load_u8_8x7(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
src += 7 * src_stride;
/* Clamp sample range to [-128, 127] for 8-bit signed dot product. */
- s0 = vreinterpret_s8_u8(vsub_u8(t0, range_limit));
- s1 = vreinterpret_s8_u8(vsub_u8(t1, range_limit));
- s2 = vreinterpret_s8_u8(vsub_u8(t2, range_limit));
- s3 = vreinterpret_s8_u8(vsub_u8(t3, range_limit));
- s4 = vreinterpret_s8_u8(vsub_u8(t4, range_limit));
- s5 = vreinterpret_s8_u8(vsub_u8(t5, range_limit));
- s6 = vreinterpret_s8_u8(vsub_u8(t6, range_limit));
- s7 = vdup_n_s8(0);
- s8 = vdup_n_s8(0);
- s9 = vdup_n_s8(0);
+ int8x8_t s0 = vreinterpret_s8_u8(vsub_u8(t0, range_limit));
+ int8x8_t s1 = vreinterpret_s8_u8(vsub_u8(t1, range_limit));
+ int8x8_t s2 = vreinterpret_s8_u8(vsub_u8(t2, range_limit));
+ int8x8_t s3 = vreinterpret_s8_u8(vsub_u8(t3, range_limit));
+ int8x8_t s4 = vreinterpret_s8_u8(vsub_u8(t4, range_limit));
+ int8x8_t s5 = vreinterpret_s8_u8(vsub_u8(t5, range_limit));
+ int8x8_t s6 = vreinterpret_s8_u8(vsub_u8(t6, range_limit));
/* This operation combines a conventional transpose and the sample permute
* (see horizontal case) required before computing the dot product.
*/
+ int8x16_t s0123, s1234, s2345, s3456;
transpose_concat_4x4(s0, s1, s2, s3, &s0123, tran_concat_tbl);
transpose_concat_4x4(s1, s2, s3, s4, &s1234, tran_concat_tbl);
transpose_concat_4x4(s2, s3, s4, s5, &s2345, tran_concat_tbl);
transpose_concat_4x4(s3, s4, s5, s6, &s3456, tran_concat_tbl);
- transpose_concat_4x4(s4, s5, s6, s7, &s4567, tran_concat_tbl);
- transpose_concat_4x4(s5, s6, s7, s8, &s5678, tran_concat_tbl);
- transpose_concat_4x4(s6, s7, s8, s9, &s6789, tran_concat_tbl);
do {
uint8x8_t t7, t8, t9, t10;
-
load_u8_8x4(src, src_stride, &t7, &t8, &t9, &t10);
- s7 = vreinterpret_s8_u8(vsub_u8(t7, range_limit));
- s8 = vreinterpret_s8_u8(vsub_u8(t8, range_limit));
- s9 = vreinterpret_s8_u8(vsub_u8(t9, range_limit));
- s10 = vreinterpret_s8_u8(vsub_u8(t10, range_limit));
+ int8x8_t s7 = vreinterpret_s8_u8(vsub_u8(t7, range_limit));
+ int8x8_t s8 = vreinterpret_s8_u8(vsub_u8(t8, range_limit));
+ int8x8_t s9 = vreinterpret_s8_u8(vsub_u8(t9, range_limit));
+ int8x8_t s10 = vreinterpret_s8_u8(vsub_u8(t10, range_limit));
+ int8x16_t s4567, s5678, s6789, s78910;
transpose_concat_4x4(s7, s8, s9, s10, &s78910, tran_concat_tbl);
/* Merge new data into block from previous iteration. */
@@ -331,12 +322,13 @@ void aom_convolve8_vert_neon_dotprod(const uint8_t *src, ptrdiff_t src_stride,
s5678 = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[1]);
s6789 = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[2]);
- d0 = convolve8_4_sdot_partial(s0123, s4567, correction, filter);
- d1 = convolve8_4_sdot_partial(s1234, s5678, correction, filter);
- d2 = convolve8_4_sdot_partial(s2345, s6789, correction, filter);
- d3 = convolve8_4_sdot_partial(s3456, s78910, correction, filter);
- d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS);
- d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS);
+ int16x4_t d0 = convolve8_4_sdot_partial(s0123, s4567, correction, filter);
+ int16x4_t d1 = convolve8_4_sdot_partial(s1234, s5678, correction, filter);
+ int16x4_t d2 = convolve8_4_sdot_partial(s2345, s6789, correction, filter);
+ int16x4_t d3 =
+ convolve8_4_sdot_partial(s3456, s78910, correction, filter);
+ uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS);
+ uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS);
store_u8x4_strided_x2(dst + 0 * dst_stride, dst_stride, d01);
store_u8x4_strided_x2(dst + 2 * dst_stride, dst_stride, d23);
@@ -354,37 +346,30 @@ void aom_convolve8_vert_neon_dotprod(const uint8_t *src, ptrdiff_t src_stride,
} while (h != 0);
} else {
const uint8x16x2_t tran_concat_tbl = vld1q_u8_x2(dot_prod_tran_concat_tbl);
- int8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi,
- s3456_lo, s3456_hi, s4567_lo, s4567_hi, s5678_lo, s5678_hi, s6789_lo,
- s6789_hi, s78910_lo, s78910_hi;
- uint8x8_t d0, d1, d2, d3;
- const uint8_t *s;
- uint8_t *d;
- int height;
do {
- height = h;
- s = src;
- d = dst;
+ int height = h;
+ const uint8_t *s = src;
+ uint8_t *d = dst;
+ uint8x8_t t0, t1, t2, t3, t4, t5, t6;
load_u8_8x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
s += 7 * src_stride;
/* Clamp sample range to [-128, 127] for 8-bit signed dot product. */
- s0 = vreinterpret_s8_u8(vsub_u8(t0, range_limit));
- s1 = vreinterpret_s8_u8(vsub_u8(t1, range_limit));
- s2 = vreinterpret_s8_u8(vsub_u8(t2, range_limit));
- s3 = vreinterpret_s8_u8(vsub_u8(t3, range_limit));
- s4 = vreinterpret_s8_u8(vsub_u8(t4, range_limit));
- s5 = vreinterpret_s8_u8(vsub_u8(t5, range_limit));
- s6 = vreinterpret_s8_u8(vsub_u8(t6, range_limit));
- s7 = vdup_n_s8(0);
- s8 = vdup_n_s8(0);
- s9 = vdup_n_s8(0);
+ int8x8_t s0 = vreinterpret_s8_u8(vsub_u8(t0, range_limit));
+ int8x8_t s1 = vreinterpret_s8_u8(vsub_u8(t1, range_limit));
+ int8x8_t s2 = vreinterpret_s8_u8(vsub_u8(t2, range_limit));
+ int8x8_t s3 = vreinterpret_s8_u8(vsub_u8(t3, range_limit));
+ int8x8_t s4 = vreinterpret_s8_u8(vsub_u8(t4, range_limit));
+ int8x8_t s5 = vreinterpret_s8_u8(vsub_u8(t5, range_limit));
+ int8x8_t s6 = vreinterpret_s8_u8(vsub_u8(t6, range_limit));
/* This operation combines a conventional transpose and the sample permute
* (see horizontal case) required before computing the dot product.
*/
+ int8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi,
+ s3456_lo, s3456_hi;
transpose_concat_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi,
tran_concat_tbl);
transpose_concat_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi,
@@ -393,23 +378,18 @@ void aom_convolve8_vert_neon_dotprod(const uint8_t *src, ptrdiff_t src_stride,
tran_concat_tbl);
transpose_concat_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi,
tran_concat_tbl);
- transpose_concat_8x4(s4, s5, s6, s7, &s4567_lo, &s4567_hi,
- tran_concat_tbl);
- transpose_concat_8x4(s5, s6, s7, s8, &s5678_lo, &s5678_hi,
- tran_concat_tbl);
- transpose_concat_8x4(s6, s7, s8, s9, &s6789_lo, &s6789_hi,
- tran_concat_tbl);
do {
uint8x8_t t7, t8, t9, t10;
-
load_u8_8x4(s, src_stride, &t7, &t8, &t9, &t10);
- s7 = vreinterpret_s8_u8(vsub_u8(t7, range_limit));
- s8 = vreinterpret_s8_u8(vsub_u8(t8, range_limit));
- s9 = vreinterpret_s8_u8(vsub_u8(t9, range_limit));
- s10 = vreinterpret_s8_u8(vsub_u8(t10, range_limit));
+ int8x8_t s7 = vreinterpret_s8_u8(vsub_u8(t7, range_limit));
+ int8x8_t s8 = vreinterpret_s8_u8(vsub_u8(t8, range_limit));
+ int8x8_t s9 = vreinterpret_s8_u8(vsub_u8(t9, range_limit));
+ int8x8_t s10 = vreinterpret_s8_u8(vsub_u8(t10, range_limit));
+ int8x16_t s4567_lo, s4567_hi, s5678_lo, s5678_hi, s6789_lo, s6789_hi,
+ s78910_lo, s78910_hi;
transpose_concat_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi,
tran_concat_tbl);
@@ -426,14 +406,14 @@ void aom_convolve8_vert_neon_dotprod(const uint8_t *src, ptrdiff_t src_stride,
s5678_hi = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[1]);
s6789_hi = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[2]);
- d0 = convolve8_8_sdot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
- correction, filter);
- d1 = convolve8_8_sdot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
- correction, filter);
- d2 = convolve8_8_sdot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
- correction, filter);
- d3 = convolve8_8_sdot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
- correction, filter);
+ uint8x8_t d0 = convolve8_8_sdot_partial(s0123_lo, s4567_lo, s0123_hi,
+ s4567_hi, correction, filter);
+ uint8x8_t d1 = convolve8_8_sdot_partial(s1234_lo, s5678_lo, s1234_hi,
+ s5678_hi, correction, filter);
+ uint8x8_t d2 = convolve8_8_sdot_partial(s2345_lo, s6789_lo, s2345_hi,
+ s6789_hi, correction, filter);
+ uint8x8_t d3 = convolve8_8_sdot_partial(s3456_lo, s78910_lo, s3456_hi,
+ s78910_hi, correction, filter);
store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
diff --git a/third_party/aom/aom_dsp/arm/aom_convolve8_neon_i8mm.c b/third_party/aom/aom_dsp/arm/aom_convolve8_neon_i8mm.c
index c314c0a192..df6e4d2ab5 100644
--- a/third_party/aom/aom_dsp/arm/aom_convolve8_neon_i8mm.c
+++ b/third_party/aom/aom_dsp/arm/aom_convolve8_neon_i8mm.c
@@ -15,7 +15,6 @@
#include <string.h>
#include "config/aom_config.h"
-#include "config/aom_dsp_rtcd.h"
#include "aom/aom_integer.h"
#include "aom_dsp/aom_dsp_common.h"
@@ -246,7 +245,6 @@ void aom_convolve8_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
int h) {
const int8x8_t filter = vmovn_s16(vld1q_s16(filter_y));
const uint8x16x3_t merge_block_tbl = vld1q_u8_x3(dot_prod_merge_block_tbl);
- uint8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
uint8x16x2_t samples_LUT;
assert((intptr_t)dst % 4 == 0);
@@ -260,31 +258,25 @@ void aom_convolve8_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
if (w == 4) {
const uint8x16_t tran_concat_tbl = vld1q_u8(dot_prod_tran_concat_tbl);
- uint8x16_t s0123, s1234, s2345, s3456, s4567, s5678, s6789, s78910;
- int16x4_t d0, d1, d2, d3;
- uint8x8_t d01, d23;
+ uint8x8_t s0, s1, s2, s3, s4, s5, s6;
load_u8_8x7(src, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
src += 7 * src_stride;
- s7 = vdup_n_u8(0);
- s8 = vdup_n_u8(0);
- s9 = vdup_n_u8(0);
-
/* This operation combines a conventional transpose and the sample permute
* (see horizontal case) required before computing the dot product.
*/
+ uint8x16_t s0123, s1234, s2345, s3456;
transpose_concat_4x4(s0, s1, s2, s3, &s0123, tran_concat_tbl);
transpose_concat_4x4(s1, s2, s3, s4, &s1234, tran_concat_tbl);
transpose_concat_4x4(s2, s3, s4, s5, &s2345, tran_concat_tbl);
transpose_concat_4x4(s3, s4, s5, s6, &s3456, tran_concat_tbl);
- transpose_concat_4x4(s4, s5, s6, s7, &s4567, tran_concat_tbl);
- transpose_concat_4x4(s5, s6, s7, s8, &s5678, tran_concat_tbl);
- transpose_concat_4x4(s6, s7, s8, s9, &s6789, tran_concat_tbl);
do {
+ uint8x8_t s7, s8, s9, s10;
load_u8_8x4(src, src_stride, &s7, &s8, &s9, &s10);
+ uint8x16_t s4567, s5678, s6789, s78910;
transpose_concat_4x4(s7, s8, s9, s10, &s78910, tran_concat_tbl);
/* Merge new data into block from previous iteration. */
@@ -294,12 +286,12 @@ void aom_convolve8_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
s5678 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
s6789 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
- d0 = convolve8_4_usdot_partial(s0123, s4567, filter);
- d1 = convolve8_4_usdot_partial(s1234, s5678, filter);
- d2 = convolve8_4_usdot_partial(s2345, s6789, filter);
- d3 = convolve8_4_usdot_partial(s3456, s78910, filter);
- d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS);
- d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS);
+ int16x4_t d0 = convolve8_4_usdot_partial(s0123, s4567, filter);
+ int16x4_t d1 = convolve8_4_usdot_partial(s1234, s5678, filter);
+ int16x4_t d2 = convolve8_4_usdot_partial(s2345, s6789, filter);
+ int16x4_t d3 = convolve8_4_usdot_partial(s3456, s78910, filter);
+ uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS);
+ uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS);
store_u8x4_strided_x2(dst + 0 * dst_stride, dst_stride, d01);
store_u8x4_strided_x2(dst + 2 * dst_stride, dst_stride, d23);
@@ -317,29 +309,21 @@ void aom_convolve8_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
} while (h != 0);
} else {
const uint8x16x2_t tran_concat_tbl = vld1q_u8_x2(dot_prod_tran_concat_tbl);
- uint8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi,
- s3456_lo, s3456_hi, s4567_lo, s4567_hi, s5678_lo, s5678_hi, s6789_lo,
- s6789_hi, s78910_lo, s78910_hi;
- uint8x8_t d0, d1, d2, d3;
- const uint8_t *s;
- uint8_t *d;
- int height;
do {
- height = h;
- s = src;
- d = dst;
+ int height = h;
+ const uint8_t *s = src;
+ uint8_t *d = dst;
+ uint8x8_t s0, s1, s2, s3, s4, s5, s6;
load_u8_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
s += 7 * src_stride;
- s7 = vdup_n_u8(0);
- s8 = vdup_n_u8(0);
- s9 = vdup_n_u8(0);
-
/* This operation combines a conventional transpose and the sample permute
* (see horizontal case) required before computing the dot product.
*/
+ uint8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi,
+ s3456_lo, s3456_hi;
transpose_concat_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi,
tran_concat_tbl);
transpose_concat_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi,
@@ -348,16 +332,13 @@ void aom_convolve8_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
tran_concat_tbl);
transpose_concat_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi,
tran_concat_tbl);
- transpose_concat_8x4(s4, s5, s6, s7, &s4567_lo, &s4567_hi,
- tran_concat_tbl);
- transpose_concat_8x4(s5, s6, s7, s8, &s5678_lo, &s5678_hi,
- tran_concat_tbl);
- transpose_concat_8x4(s6, s7, s8, s9, &s6789_lo, &s6789_hi,
- tran_concat_tbl);
do {
+ uint8x8_t s7, s8, s9, s10;
load_u8_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ uint8x16_t s4567_lo, s4567_hi, s5678_lo, s5678_hi, s6789_lo, s6789_hi,
+ s78910_lo, s78910_hi;
transpose_concat_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi,
tran_concat_tbl);
@@ -374,14 +355,14 @@ void aom_convolve8_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
s5678_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
s6789_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
- d0 = convolve8_8_usdot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
- filter);
- d1 = convolve8_8_usdot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
- filter);
- d2 = convolve8_8_usdot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
- filter);
- d3 = convolve8_8_usdot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
- filter);
+ uint8x8_t d0 = convolve8_8_usdot_partial(s0123_lo, s4567_lo, s0123_hi,
+ s4567_hi, filter);
+ uint8x8_t d1 = convolve8_8_usdot_partial(s1234_lo, s5678_lo, s1234_hi,
+ s5678_hi, filter);
+ uint8x8_t d2 = convolve8_8_usdot_partial(s2345_lo, s6789_lo, s2345_hi,
+ s6789_hi, filter);
+ uint8x8_t d3 = convolve8_8_usdot_partial(s3456_lo, s78910_lo, s3456_hi,
+ s78910_hi, filter);
store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
diff --git a/third_party/aom/aom_dsp/arm/aom_filter.h b/third_party/aom/aom_dsp/arm/aom_filter.h
new file mode 100644
index 0000000000..9972d064fc
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/aom_filter.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AOM_DSP_ARM_AOM_FILTER_H_
+#define AOM_AOM_DSP_ARM_AOM_FILTER_H_
+
+#include <stdint.h>
+
+#include "config/aom_config.h"
+#include "config/aom_dsp_rtcd.h"
+
+static INLINE int get_filter_taps_convolve8(const int16_t *filter) {
+ if (filter[0] | filter[7]) {
+ return 8;
+ }
+ if (filter[1] | filter[6]) {
+ return 6;
+ }
+ if (filter[2] | filter[5]) {
+ return 4;
+ }
+ return 2;
+}
+
+#endif // AOM_AOM_DSP_ARM_AOM_FILTER_H_
diff --git a/third_party/aom/aom_dsp/arm/aom_neon_sve2_bridge.h b/third_party/aom/aom_dsp/arm/aom_neon_sve2_bridge.h
new file mode 100644
index 0000000000..6e7d2d6365
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/aom_neon_sve2_bridge.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AOM_AOM_DSP_ARM_AOM_NEON_SVE2_BRIDGE_H_
+#define AOM_AOM_DSP_ARM_AOM_NEON_SVE2_BRIDGE_H_
+
+#include <arm_neon_sve_bridge.h>
+
+#include "config/aom_dsp_rtcd.h"
+#include "config/aom_config.h"
+
+// We can access instructions exclusive to the SVE2 instruction set from a
+// predominantly Neon context by making use of the Neon-SVE bridge intrinsics
+// to reinterpret Neon vectors as SVE vectors - with the high part of the SVE
+// vector (if it's longer than 128 bits) being "don't care".
+
+// While sub-optimal on machines that have SVE vector length > 128-bit - as the
+// remainder of the vector is unused - this approach is still beneficial when
+// compared to a Neon-only solution.
+
+static INLINE int16x8_t aom_tbl2_s16(int16x8_t s0, int16x8_t s1,
+ uint16x8_t tbl) {
+ svint16x2_t samples = svcreate2_s16(svset_neonq_s16(svundef_s16(), s0),
+ svset_neonq_s16(svundef_s16(), s1));
+ return svget_neonq_s16(
+ svtbl2_s16(samples, svset_neonq_u16(svundef_u16(), tbl)));
+}
+
+#endif // AOM_AOM_DSP_ARM_AOM_NEON_SVE2_BRIDGE_H_
diff --git a/third_party/aom/aom_dsp/arm/dot_sve.h b/third_party/aom/aom_dsp/arm/aom_neon_sve_bridge.h
index cf49f23606..3da80e22ba 100644
--- a/third_party/aom/aom_dsp/arm/dot_sve.h
+++ b/third_party/aom/aom_dsp/arm/aom_neon_sve_bridge.h
@@ -8,16 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef AOM_AOM_DSP_ARM_DOT_SVE_H_
-#define AOM_AOM_DSP_ARM_DOT_SVE_H_
+#ifndef AOM_AOM_DSP_ARM_AOM_NEON_SVE_BRIDGE_H_
+#define AOM_AOM_DSP_ARM_AOM_NEON_SVE_BRIDGE_H_
#include <arm_neon_sve_bridge.h>
#include "config/aom_dsp_rtcd.h"
#include "config/aom_config.h"
-// Dot product instructions operating on 16-bit input elements are exclusive to
-// the SVE instruction set. However, we can access these instructions from a
+// We can access instructions exclusive to the SVE instruction set from a
// predominantly Neon context by making use of the Neon-SVE bridge intrinsics
// to reinterpret Neon vectors as SVE vectors - with the high part of the SVE
// vector (if it's longer than 128 bits) being "don't care".
@@ -39,4 +38,19 @@ static INLINE int64x2_t aom_sdotq_s16(int64x2_t acc, int16x8_t x, int16x8_t y) {
svset_neonq_s16(svundef_s16(), y)));
}
-#endif // AOM_AOM_DSP_ARM_DOT_SVE_H_
+#define aom_svdot_lane_s16(sum, s0, f, lane) \
+ svget_neonq_s64(svdot_lane_s64(svset_neonq_s64(svundef_s64(), sum), \
+ svset_neonq_s16(svundef_s16(), s0), \
+ svset_neonq_s16(svundef_s16(), f), lane))
+
+static INLINE uint16x8_t aom_tbl_u16(uint16x8_t s, uint16x8_t tbl) {
+ return svget_neonq_u16(svtbl_u16(svset_neonq_u16(svundef_u16(), s),
+ svset_neonq_u16(svundef_u16(), tbl)));
+}
+
+static INLINE int16x8_t aom_tbl_s16(int16x8_t s, uint16x8_t tbl) {
+ return svget_neonq_s16(svtbl_s16(svset_neonq_s16(svundef_s16(), s),
+ svset_neonq_u16(svundef_u16(), tbl)));
+}
+
+#endif // AOM_AOM_DSP_ARM_AOM_NEON_SVE_BRIDGE_H_
diff --git a/third_party/aom/aom_dsp/arm/avg_sve.c b/third_party/aom/aom_dsp/arm/avg_sve.c
index bbf5a9447c..57a546501a 100644
--- a/third_party/aom/aom_dsp/arm/avg_sve.c
+++ b/third_party/aom/aom_dsp/arm/avg_sve.c
@@ -14,7 +14,7 @@
#include "config/aom_config.h"
#include "config/aom_dsp_rtcd.h"
#include "aom/aom_integer.h"
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
#include "aom_ports/mem.h"
diff --git a/third_party/aom/aom_dsp/arm/blk_sse_sum_sve.c b/third_party/aom/aom_dsp/arm/blk_sse_sum_sve.c
index 18bdc5dbfe..f538346d8b 100644
--- a/third_party/aom/aom_dsp/arm/blk_sse_sum_sve.c
+++ b/third_party/aom/aom_dsp/arm/blk_sse_sum_sve.c
@@ -15,7 +15,7 @@
#include "config/aom_dsp_rtcd.h"
#include "config/aom_config.h"
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
static INLINE void get_blk_sse_sum_4xh_sve(const int16_t *data, int stride,
diff --git a/third_party/aom/aom_dsp/arm/highbd_convolve8_sve.c b/third_party/aom/aom_dsp/arm/highbd_convolve8_sve.c
new file mode 100644
index 0000000000..e57c41a0b0
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/highbd_convolve8_sve.c
@@ -0,0 +1,681 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+#include <stdint.h>
+
+#include "config/aom_config.h"
+#include "config/aom_dsp_rtcd.h"
+
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
+#include "aom_dsp/arm/aom_filter.h"
+#include "aom_dsp/arm/mem_neon.h"
+
+static INLINE uint16x4_t highbd_convolve8_4_h(int16x8_t s[4], int16x8_t filter,
+ uint16x4_t max) {
+ int64x2_t sum[4];
+
+ sum[0] = aom_sdotq_s16(vdupq_n_s64(0), s[0], filter);
+ sum[1] = aom_sdotq_s16(vdupq_n_s64(0), s[1], filter);
+ sum[2] = aom_sdotq_s16(vdupq_n_s64(0), s[2], filter);
+ sum[3] = aom_sdotq_s16(vdupq_n_s64(0), s[3], filter);
+
+ int64x2_t sum01 = vpaddq_s64(sum[0], sum[1]);
+ int64x2_t sum23 = vpaddq_s64(sum[2], sum[3]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_h(int16x8_t s[8], int16x8_t filter,
+ uint16x8_t max) {
+ int64x2_t sum[8];
+
+ sum[0] = aom_sdotq_s16(vdupq_n_s64(0), s[0], filter);
+ sum[1] = aom_sdotq_s16(vdupq_n_s64(0), s[1], filter);
+ sum[2] = aom_sdotq_s16(vdupq_n_s64(0), s[2], filter);
+ sum[3] = aom_sdotq_s16(vdupq_n_s64(0), s[3], filter);
+ sum[4] = aom_sdotq_s16(vdupq_n_s64(0), s[4], filter);
+ sum[5] = aom_sdotq_s16(vdupq_n_s64(0), s[5], filter);
+ sum[6] = aom_sdotq_s16(vdupq_n_s64(0), s[6], filter);
+ sum[7] = aom_sdotq_s16(vdupq_n_s64(0), s[7], filter);
+
+ int64x2_t sum01 = vpaddq_s64(sum[0], sum[1]);
+ int64x2_t sum23 = vpaddq_s64(sum[2], sum[3]);
+ int64x2_t sum45 = vpaddq_s64(sum[4], sum[5]);
+ int64x2_t sum67 = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve8_horiz_8tap_sve(
+ const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, const int16_t *filter_x, int width, int height,
+ int bd) {
+ const int16x8_t filter = vld1q_s16(filter_x);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x4_t d0 = highbd_convolve8_4_h(s0, filter, max);
+ uint16x4_t d1 = highbd_convolve8_4_h(s1, filter, max);
+ uint16x4_t d2 = highbd_convolve8_4_h(s2, filter, max);
+ uint16x4_t d3 = highbd_convolve8_4_h(s3, filter, max);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ } else {
+ do {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_h(s0, filter, max);
+ uint16x8_t d1 = highbd_convolve8_8_h(s1, filter, max);
+ uint16x8_t d2 = highbd_convolve8_8_h(s2, filter, max);
+ uint16x8_t d3 = highbd_convolve8_8_h(s3, filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ }
+}
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[16]) = {
+ 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
+};
+
+DECLARE_ALIGNED(16, static const uint16_t, kDeinterleaveTbl[8]) = {
+ 0, 2, 4, 6, 1, 3, 5, 7,
+};
+// clang-format on
+
+static INLINE uint16x4_t highbd_convolve4_4_h(int16x8_t s, int16x8_t filter,
+ uint16x8x2_t permute_tbl,
+ uint16x4_t max) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s, permute_tbl.val[1]);
+
+ int64x2_t sum0 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), permuted_samples0, filter, 0);
+ int64x2_t sum1 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), permuted_samples1, filter, 0);
+
+ int32x4_t res_s32 = vcombine_s32(vmovn_s64(sum0), vmovn_s64(sum1));
+ uint16x4_t res = vqrshrun_n_s32(res_s32, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_h(int16x8_t s[4], int16x8_t filter,
+ uint16x8_t idx, uint16x8_t max) {
+ int64x2_t sum04 = aom_svdot_lane_s16(vdupq_n_s64(0), s[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(vdupq_n_s64(0), s[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(vdupq_n_s64(0), s[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(vdupq_n_s64(0), s[3], filter, 0);
+
+ int32x4_t res0 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t res1 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(res0, FILTER_BITS),
+ vqrshrun_n_s32(res1, FILTER_BITS));
+
+ res = aom_tbl_u16(res, idx);
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve8_horiz_4tap_sve(
+ const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, const int16_t *filter_x, int width, int height,
+ int bd) {
+ const int16x8_t filter = vcombine_s16(vld1_s16(filter_x + 2), vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_convolve4_4_h(s0, filter, permute_tbl, max);
+ uint16x4_t d1 = highbd_convolve4_4_h(s1, filter, permute_tbl, max);
+ uint16x4_t d2 = highbd_convolve4_4_h(s2, filter, permute_tbl, max);
+ uint16x4_t d3 = highbd_convolve4_4_h(s3, filter, permute_tbl, max);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_h(s0, filter, idx, max);
+ uint16x8_t d1 = highbd_convolve4_8_h(s1, filter, idx, max);
+ uint16x8_t d2 = highbd_convolve4_8_h(s2, filter, idx, max);
+ uint16x8_t d3 = highbd_convolve4_8_h(s3, filter, idx, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ }
+}
+
+void aom_highbd_convolve8_horiz_sve(const uint8_t *src8, ptrdiff_t src_stride,
+ uint8_t *dst8, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int width, int height, int bd) {
+ assert(x_step_q4 == 16);
+ assert(width >= 4 && height >= 4);
+ (void)filter_y;
+ (void)x_step_q4;
+ (void)y_step_q4;
+
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
+
+ src -= SUBPEL_TAPS / 2 - 1;
+
+ if (get_filter_taps_convolve8(filter_x) <= 4) {
+ highbd_convolve8_horiz_4tap_sve(src + 2, src_stride, dst, dst_stride,
+ filter_x, width, height, bd);
+ } else {
+ highbd_convolve8_horiz_8tap_sve(src, src_stride, dst, dst_stride, filter_x,
+ width, height, bd);
+ }
+}
+
+DECLARE_ALIGNED(16, static const uint8_t, kDotProdMergeBlockTbl[48]) = {
+ // Shift left and insert new last column in transposed 4x4 block.
+ 2, 3, 4, 5, 6, 7, 16, 17, 10, 11, 12, 13, 14, 15, 24, 25,
+ // Shift left and insert two new columns in transposed 4x4 block.
+ 4, 5, 6, 7, 16, 17, 18, 19, 12, 13, 14, 15, 24, 25, 26, 27,
+ // Shift left and insert three new columns in transposed 4x4 block.
+ 6, 7, 16, 17, 18, 19, 20, 21, 14, 15, 24, 25, 26, 27, 28, 29
+};
+
+static INLINE void transpose_concat_4x4(int16x4_t s0, int16x4_t s1,
+ int16x4_t s2, int16x4_t s3,
+ int16x8_t res[2]) {
+ // Transpose 16-bit elements and concatenate result rows as follows:
+ // s0: 00, 01, 02, 03
+ // s1: 10, 11, 12, 13
+ // s2: 20, 21, 22, 23
+ // s3: 30, 31, 32, 33
+ //
+ // res[0]: 00 10 20 30 01 11 21 31
+ // res[1]: 02 12 22 32 03 13 23 33
+
+ int16x8_t s0q = vcombine_s16(s0, vdup_n_s16(0));
+ int16x8_t s1q = vcombine_s16(s1, vdup_n_s16(0));
+ int16x8_t s2q = vcombine_s16(s2, vdup_n_s16(0));
+ int16x8_t s3q = vcombine_s16(s3, vdup_n_s16(0));
+
+ int32x4_t s01 = vreinterpretq_s32_s16(vzip1q_s16(s0q, s1q));
+ int32x4_t s23 = vreinterpretq_s32_s16(vzip1q_s16(s2q, s3q));
+
+ int32x4x2_t s0123 = vzipq_s32(s01, s23);
+
+ res[0] = vreinterpretq_s16_s32(s0123.val[0]);
+ res[1] = vreinterpretq_s16_s32(s0123.val[1]);
+}
+
+static INLINE void transpose_concat_8x4(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t s3,
+ int16x8_t res[4]) {
+ // Transpose 16-bit elements and concatenate result rows as follows:
+ // s0: 00, 01, 02, 03, 04, 05, 06, 07
+ // s1: 10, 11, 12, 13, 14, 15, 16, 17
+ // s2: 20, 21, 22, 23, 24, 25, 26, 27
+ // s3: 30, 31, 32, 33, 34, 35, 36, 37
+ //
+ // res_lo[0]: 00 10 20 30 01 11 21 31
+ // res_lo[1]: 02 12 22 32 03 13 23 33
+ // res_hi[0]: 04 14 24 34 05 15 25 35
+ // res_hi[1]: 06 16 26 36 07 17 27 37
+
+ int16x8x2_t tr01_16 = vzipq_s16(s0, s1);
+ int16x8x2_t tr23_16 = vzipq_s16(s2, s3);
+
+ int32x4x2_t tr01_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[0]),
+ vreinterpretq_s32_s16(tr23_16.val[0]));
+ int32x4x2_t tr23_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[1]),
+ vreinterpretq_s32_s16(tr23_16.val[1]));
+
+ res[0] = vreinterpretq_s16_s32(tr01_32.val[0]);
+ res[1] = vreinterpretq_s16_s32(tr01_32.val[1]);
+ res[2] = vreinterpretq_s16_s32(tr23_32.val[0]);
+ res[3] = vreinterpretq_s16_s32(tr23_32.val[1]);
+}
+
+static INLINE void aom_tbl2x4_s16(int16x8_t t0[4], int16x8_t t1[4],
+ uint8x16_t tbl, int16x8_t res[4]) {
+ int8x16x2_t samples0 = { vreinterpretq_s8_s16(t0[0]),
+ vreinterpretq_s8_s16(t1[0]) };
+ int8x16x2_t samples1 = { vreinterpretq_s8_s16(t0[1]),
+ vreinterpretq_s8_s16(t1[1]) };
+ int8x16x2_t samples2 = { vreinterpretq_s8_s16(t0[2]),
+ vreinterpretq_s8_s16(t1[2]) };
+ int8x16x2_t samples3 = { vreinterpretq_s8_s16(t0[3]),
+ vreinterpretq_s8_s16(t1[3]) };
+
+ res[0] = vreinterpretq_s16_s8(vqtbl2q_s8(samples0, tbl));
+ res[1] = vreinterpretq_s16_s8(vqtbl2q_s8(samples1, tbl));
+ res[2] = vreinterpretq_s16_s8(vqtbl2q_s8(samples2, tbl));
+ res[3] = vreinterpretq_s16_s8(vqtbl2q_s8(samples3, tbl));
+}
+
+static INLINE void aom_tbl2x2_s16(int16x8_t t0[2], int16x8_t t1[2],
+ uint8x16_t tbl, int16x8_t res[2]) {
+ int8x16x2_t samples0 = { vreinterpretq_s8_s16(t0[0]),
+ vreinterpretq_s8_s16(t1[0]) };
+ int8x16x2_t samples1 = { vreinterpretq_s8_s16(t0[1]),
+ vreinterpretq_s8_s16(t1[1]) };
+
+ res[0] = vreinterpretq_s16_s8(vqtbl2q_s8(samples0, tbl));
+ res[1] = vreinterpretq_s16_s8(vqtbl2q_s8(samples1, tbl));
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_v(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ uint16x4_t max) {
+ int64x2_t sum[2];
+
+ sum[0] = aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+ sum[0] = aom_svdot_lane_s16(sum[0], samples_hi[0], filter, 1);
+
+ sum[1] = aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+ sum[1] = aom_svdot_lane_s16(sum[1], samples_hi[1], filter, 1);
+
+ int32x4_t res_s32 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[1]));
+
+ uint16x4_t res = vqrshrun_n_s32(res_s32, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_v(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ uint16x8_t max) {
+ int64x2_t sum[4];
+
+ sum[0] = aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+ sum[0] = aom_svdot_lane_s16(sum[0], samples_hi[0], filter, 1);
+
+ sum[1] = aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+ sum[1] = aom_svdot_lane_s16(sum[1], samples_hi[1], filter, 1);
+
+ sum[2] = aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[2], filter, 0);
+ sum[2] = aom_svdot_lane_s16(sum[2], samples_hi[2], filter, 1);
+
+ sum[3] = aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[3], filter, 0);
+ sum[3] = aom_svdot_lane_s16(sum[3], samples_hi[3], filter, 1);
+
+ int32x4_t res0 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[1]));
+ int32x4_t res1 = vcombine_s32(vmovn_s64(sum[2]), vmovn_s64(sum[3]));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(res0, FILTER_BITS),
+ vqrshrun_n_s32(res1, FILTER_BITS));
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve8_vert_8tap_sve(
+ const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, const int16_t *filter_y, int width, int height,
+ int bd) {
+ const int16x8_t y_filter = vld1q_s16(filter_y);
+
+ uint8x16_t merge_block_tbl[3];
+ merge_block_tbl[0] = vld1q_u8(kDotProdMergeBlockTbl);
+ merge_block_tbl[1] = vld1q_u8(kDotProdMergeBlockTbl + 16);
+ merge_block_tbl[2] = vld1q_u8(kDotProdMergeBlockTbl + 32);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s78910[2];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s78910);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s78910, merge_block_tbl[0], s4567);
+ aom_tbl2x2_s16(s3456, s78910, merge_block_tbl[1], s5678);
+ aom_tbl2x2_s16(s3456, s78910, merge_block_tbl[2], s6789);
+
+ uint16x4_t d0 = highbd_convolve8_4_v(s0123, s4567, y_filter, max);
+ uint16x4_t d1 = highbd_convolve8_4_v(s1234, s5678, y_filter, max);
+ uint16x4_t d2 = highbd_convolve8_4_v(s2345, s6789, y_filter, max);
+ uint16x4_t d3 = highbd_convolve8_4_v(s3456, s78910, y_filter, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s78910[0];
+ s3456[1] = s78910[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[4], s5678[4], s6789[4], s78910[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s78910);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s78910, merge_block_tbl[0], s4567);
+ aom_tbl2x4_s16(s3456, s78910, merge_block_tbl[1], s5678);
+ aom_tbl2x4_s16(s3456, s78910, merge_block_tbl[2], s6789);
+
+ uint16x8_t d0 = highbd_convolve8_8_v(s0123, s4567, y_filter, max);
+ uint16x8_t d1 = highbd_convolve8_8_v(s1234, s5678, y_filter, max);
+ uint16x8_t d2 = highbd_convolve8_8_v(s2345, s6789, y_filter, max);
+ uint16x8_t d3 = highbd_convolve8_8_v(s3456, s78910, y_filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+
+ s3456[0] = s78910[0];
+ s3456[1] = s78910[1];
+ s3456[2] = s78910[2];
+ s3456[3] = s78910[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_v(int16x8_t s[2], int16x8_t filter,
+ uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), s[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), s[1], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_v(int16x8_t s[4], int16x8_t filter,
+ uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), s[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), s[1], filter, 0);
+ int64x2_t sum45 = aom_svdot_lane_s16(vdupq_n_s64(0), s[2], filter, 0);
+ int64x2_t sum67 = aom_svdot_lane_s16(vdupq_n_s64(0), s[3], filter, 0);
+
+ int32x4_t s0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t s4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(s0123, FILTER_BITS),
+ vqrshrun_n_s32(s4567, FILTER_BITS));
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve8_vert_4tap_sve(
+ const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, const int16_t *filter_y, int width, int height,
+ int bd) {
+ const int16x8_t y_filter =
+ vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0));
+
+ uint8x16_t merge_block_tbl[3];
+ merge_block_tbl[0] = vld1q_u8(kDotProdMergeBlockTbl);
+ merge_block_tbl[1] = vld1q_u8(kDotProdMergeBlockTbl + 16);
+ merge_block_tbl[2] = vld1q_u8(kDotProdMergeBlockTbl + 32);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ uint16x4_t d0 = highbd_convolve4_4_v(s0123, y_filter, max);
+ uint16x4_t d1 = highbd_convolve4_4_v(s1234, y_filter, max);
+ uint16x4_t d2 = highbd_convolve4_4_v(s2345, y_filter, max);
+ uint16x4_t d3 = highbd_convolve4_4_v(s3456, y_filter, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample
+ // permute required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ uint16x8_t d0 = highbd_convolve4_8_v(s0123, y_filter, max);
+ uint16x8_t d1 = highbd_convolve4_8_v(s1234, y_filter, max);
+ uint16x8_t d2 = highbd_convolve4_8_v(s2345, y_filter, max);
+ uint16x8_t d3 = highbd_convolve4_8_v(s3456, y_filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void aom_highbd_convolve8_vert_sve(const uint8_t *src8, ptrdiff_t src_stride,
+ uint8_t *dst8, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int width, int height, int bd) {
+ assert(y_step_q4 == 16);
+ assert(w >= 4 && h >= 4);
+ (void)filter_x;
+ (void)y_step_q4;
+ (void)x_step_q4;
+
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
+
+ src -= (SUBPEL_TAPS / 2 - 1) * src_stride;
+
+ if (get_filter_taps_convolve8(filter_y) <= 4) {
+ highbd_convolve8_vert_4tap_sve(src + 2 * src_stride, src_stride, dst,
+ dst_stride, filter_y, width, height, bd);
+ } else {
+ highbd_convolve8_vert_8tap_sve(src, src_stride, dst, dst_stride, filter_y,
+ width, height, bd);
+ }
+}
diff --git a/third_party/aom/aom_dsp/arm/highbd_sse_sve.c b/third_party/aom/aom_dsp/arm/highbd_sse_sve.c
index b267da5cfb..9ea13ab67a 100644
--- a/third_party/aom/aom_dsp/arm/highbd_sse_sve.c
+++ b/third_party/aom/aom_dsp/arm/highbd_sse_sve.c
@@ -10,7 +10,7 @@
#include <arm_neon.h>
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
#include "config/aom_dsp_rtcd.h"
diff --git a/third_party/aom/aom_dsp/arm/highbd_variance_sve.c b/third_party/aom/aom_dsp/arm/highbd_variance_sve.c
index a2c30a1688..ad1f55e367 100644
--- a/third_party/aom/aom_dsp/arm/highbd_variance_sve.c
+++ b/third_party/aom/aom_dsp/arm/highbd_variance_sve.c
@@ -16,7 +16,7 @@
#include "config/aom_dsp_rtcd.h"
#include "aom_dsp/aom_filter.h"
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/variance.h"
diff --git a/third_party/aom/aom_dsp/arm/mem_neon.h b/third_party/aom/aom_dsp/arm/mem_neon.h
index 52c7a34e3e..32a462a186 100644
--- a/third_party/aom/aom_dsp/arm/mem_neon.h
+++ b/third_party/aom/aom_dsp/arm/mem_neon.h
@@ -56,17 +56,10 @@ static INLINE uint16x8x4_t vld1q_u16_x4(const uint16_t *ptr) {
#elif defined(__GNUC__) && !defined(__clang__) // GCC 64-bit.
#if __GNUC__ < 8
-
static INLINE uint8x16x2_t vld1q_u8_x2(const uint8_t *ptr) {
uint8x16x2_t res = { { vld1q_u8(ptr + 0 * 16), vld1q_u8(ptr + 1 * 16) } };
return res;
}
-
-static INLINE uint16x8x4_t vld1q_u16_x4(const uint16_t *ptr) {
- uint16x8x4_t res = { { vld1q_u16(ptr + 0 * 8), vld1q_u16(ptr + 1 * 8),
- vld1q_u16(ptr + 2 * 8), vld1q_u16(ptr + 3 * 8) } };
- return res;
-}
#endif // __GNUC__ < 8
#if __GNUC__ < 9
@@ -76,6 +69,15 @@ static INLINE uint8x16x3_t vld1q_u8_x3(const uint8_t *ptr) {
return res;
}
#endif // __GNUC__ < 9
+
+// vld1q_u16_x4 is defined from GCC 8.5.0 and onwards.
+#if ((__GNUC__ << 8) | __GNUC_MINOR__) < 0x805
+static INLINE uint16x8x4_t vld1q_u16_x4(const uint16_t *ptr) {
+ uint16x8x4_t res = { { vld1q_u16(ptr + 0 * 8), vld1q_u16(ptr + 1 * 8),
+ vld1q_u16(ptr + 2 * 8), vld1q_u16(ptr + 3 * 8) } };
+ return res;
+}
+#endif // ((__GNUC__ << 8) | __GNUC_MINOR__) < 0x805
#endif // defined(__GNUC__) && !defined(__clang__)
static INLINE void store_u8_8x2(uint8_t *s, ptrdiff_t p, const uint8x8_t s0,
@@ -457,6 +459,16 @@ static INLINE void load_s16_4x4(const int16_t *s, ptrdiff_t p,
*s3 = vld1_s16(s);
}
+static INLINE void load_s16_4x3(const int16_t *s, ptrdiff_t p,
+ int16x4_t *const s0, int16x4_t *const s1,
+ int16x4_t *const s2) {
+ *s0 = vld1_s16(s);
+ s += p;
+ *s1 = vld1_s16(s);
+ s += p;
+ *s2 = vld1_s16(s);
+}
+
static INLINE void store_u8_8x8(uint8_t *s, ptrdiff_t p, const uint8x8_t s0,
const uint8x8_t s1, const uint8x8_t s2,
const uint8x8_t s3, const uint8x8_t s4,
@@ -525,6 +537,16 @@ static INLINE void store_u16_8x8(uint16_t *s, ptrdiff_t dst_stride,
vst1q_u16(s, s7);
}
+static INLINE void store_u16_4x3(uint16_t *s, ptrdiff_t dst_stride,
+ const uint16x4_t s0, const uint16x4_t s1,
+ const uint16x4_t s2) {
+ vst1_u16(s, s0);
+ s += dst_stride;
+ vst1_u16(s, s1);
+ s += dst_stride;
+ vst1_u16(s, s2);
+}
+
static INLINE void store_u16_4x4(uint16_t *s, ptrdiff_t dst_stride,
const uint16x4_t s0, const uint16x4_t s1,
const uint16x4_t s2, const uint16x4_t s3) {
@@ -544,6 +566,16 @@ static INLINE void store_u16_8x2(uint16_t *s, ptrdiff_t dst_stride,
vst1q_u16(s, s1);
}
+static INLINE void store_u16_8x3(uint16_t *s, ptrdiff_t dst_stride,
+ const uint16x8_t s0, const uint16x8_t s1,
+ const uint16x8_t s2) {
+ vst1q_u16(s, s0);
+ s += dst_stride;
+ vst1q_u16(s, s1);
+ s += dst_stride;
+ vst1q_u16(s, s2);
+}
+
static INLINE void store_u16_8x4(uint16_t *s, ptrdiff_t dst_stride,
const uint16x8_t s0, const uint16x8_t s1,
const uint16x8_t s2, const uint16x8_t s3) {
@@ -857,6 +889,16 @@ static INLINE void load_s16_8x4(const int16_t *s, ptrdiff_t p,
*s3 = vld1q_s16(s);
}
+static INLINE void load_s16_8x3(const int16_t *s, ptrdiff_t p,
+ int16x8_t *const s0, int16x8_t *const s1,
+ int16x8_t *const s2) {
+ *s0 = vld1q_s16(s);
+ s += p;
+ *s1 = vld1q_s16(s);
+ s += p;
+ *s2 = vld1q_s16(s);
+}
+
// Load 2 sets of 4 bytes when alignment is not guaranteed.
static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf, int stride) {
uint32_t a;
diff --git a/third_party/aom/aom_dsp/arm/sum_squares_sve.c b/third_party/aom/aom_dsp/arm/sum_squares_sve.c
index 724e43859e..c7e6dfcb02 100644
--- a/third_party/aom/aom_dsp/arm/sum_squares_sve.c
+++ b/third_party/aom/aom_dsp/arm/sum_squares_sve.c
@@ -11,7 +11,7 @@
#include <arm_neon.h>
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
#include "config/aom_dsp_rtcd.h"
diff --git a/third_party/aom/aom_dsp/flow_estimation/corner_detect.c b/third_party/aom/aom_dsp/flow_estimation/corner_detect.c
index 284d1bd7b8..44d423dcdf 100644
--- a/third_party/aom/aom_dsp/flow_estimation/corner_detect.c
+++ b/third_party/aom/aom_dsp/flow_estimation/corner_detect.c
@@ -20,6 +20,7 @@
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/flow_estimation/corner_detect.h"
#include "aom_mem/aom_mem.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/common.h"
#define FAST_BARRIER 18
@@ -39,11 +40,24 @@ CornerList *av1_alloc_corner_list(void) {
return corners;
}
-static bool compute_corner_list(const ImagePyramid *pyr, CornerList *corners) {
- const uint8_t *buf = pyr->layers[0].buffer;
- int width = pyr->layers[0].width;
- int height = pyr->layers[0].height;
- int stride = pyr->layers[0].stride;
+static bool compute_corner_list(const YV12_BUFFER_CONFIG *frame, int bit_depth,
+ int downsample_level, CornerList *corners) {
+ ImagePyramid *pyr = frame->y_pyramid;
+ const int layers =
+ aom_compute_pyramid(frame, bit_depth, downsample_level + 1, pyr);
+
+ if (layers < 0) {
+ return false;
+ }
+
+ // Clamp downsampling ratio base on max number of layers allowed
+ // for this frame size
+ downsample_level = layers - 1;
+
+ const uint8_t *buf = pyr->layers[downsample_level].buffer;
+ int width = pyr->layers[downsample_level].width;
+ int height = pyr->layers[downsample_level].height;
+ int stride = pyr->layers[downsample_level].stride;
int *scores = NULL;
int num_corners;
@@ -53,9 +67,11 @@ static bool compute_corner_list(const ImagePyramid *pyr, CornerList *corners) {
if (num_corners <= MAX_CORNERS) {
// Use all detected corners
- if (num_corners != 0) {
- memcpy(corners->corners, frame_corners_xy,
- sizeof(*frame_corners_xy) * num_corners);
+ for (int i = 0; i < num_corners; i++) {
+ corners->corners[2 * i + 0] =
+ frame_corners_xy[i].x * (1 << downsample_level);
+ corners->corners[2 * i + 1] =
+ frame_corners_xy[i].y * (1 << downsample_level);
}
corners->num_corners = num_corners;
} else {
@@ -85,8 +101,10 @@ static bool compute_corner_list(const ImagePyramid *pyr, CornerList *corners) {
for (int i = 0; i < num_corners; i++) {
if (scores[i] > threshold) {
assert(copied_corners < MAX_CORNERS);
- corners->corners[2 * copied_corners + 0] = frame_corners_xy[i].x;
- corners->corners[2 * copied_corners + 1] = frame_corners_xy[i].y;
+ corners->corners[2 * copied_corners + 0] =
+ frame_corners_xy[i].x * (1 << downsample_level);
+ corners->corners[2 * copied_corners + 1] =
+ frame_corners_xy[i].y * (1 << downsample_level);
copied_corners += 1;
}
}
@@ -99,7 +117,8 @@ static bool compute_corner_list(const ImagePyramid *pyr, CornerList *corners) {
return true;
}
-bool av1_compute_corner_list(const ImagePyramid *pyr, CornerList *corners) {
+bool av1_compute_corner_list(const YV12_BUFFER_CONFIG *frame, int bit_depth,
+ int downsample_level, CornerList *corners) {
assert(corners);
#if CONFIG_MULTITHREAD
@@ -107,7 +126,8 @@ bool av1_compute_corner_list(const ImagePyramid *pyr, CornerList *corners) {
#endif // CONFIG_MULTITHREAD
if (!corners->valid) {
- corners->valid = compute_corner_list(pyr, corners);
+ corners->valid =
+ compute_corner_list(frame, bit_depth, downsample_level, corners);
}
bool valid = corners->valid;
diff --git a/third_party/aom/aom_dsp/flow_estimation/corner_detect.h b/third_party/aom/aom_dsp/flow_estimation/corner_detect.h
index d05846ce5d..54d94309ed 100644
--- a/third_party/aom/aom_dsp/flow_estimation/corner_detect.h
+++ b/third_party/aom/aom_dsp/flow_estimation/corner_detect.h
@@ -18,7 +18,7 @@
#include <memory.h>
#include "aom_dsp/pyramid.h"
-#include "aom_util/aom_thread.h"
+#include "aom_util/aom_pthread.h"
#ifdef __cplusplus
extern "C" {
@@ -57,7 +57,8 @@ size_t av1_get_corner_list_size(void);
CornerList *av1_alloc_corner_list(void);
-bool av1_compute_corner_list(const ImagePyramid *pyr, CornerList *corners);
+bool av1_compute_corner_list(const YV12_BUFFER_CONFIG *frame, int bit_depth,
+ int downsample_level, CornerList *corners);
#ifndef NDEBUG
// Check if a corner list has already been computed.
diff --git a/third_party/aom/aom_dsp/flow_estimation/corner_match.c b/third_party/aom/aom_dsp/flow_estimation/corner_match.c
index dc7589a8c6..c78edb8910 100644
--- a/third_party/aom/aom_dsp/flow_estimation/corner_match.c
+++ b/third_party/aom/aom_dsp/flow_estimation/corner_match.c
@@ -17,62 +17,84 @@
#include "aom_dsp/flow_estimation/corner_detect.h"
#include "aom_dsp/flow_estimation/corner_match.h"
+#include "aom_dsp/flow_estimation/disflow.h"
#include "aom_dsp/flow_estimation/flow_estimation.h"
#include "aom_dsp/flow_estimation/ransac.h"
#include "aom_dsp/pyramid.h"
#include "aom_scale/yv12config.h"
-#define SEARCH_SZ 9
-#define SEARCH_SZ_BY2 ((SEARCH_SZ - 1) / 2)
-
#define THRESHOLD_NCC 0.75
-/* Compute var(frame) * MATCH_SZ_SQ over a MATCH_SZ by MATCH_SZ window of frame,
- centered at (x, y).
+/* Compute mean and standard deviation of pixels in a window of size
+ MATCH_SZ by MATCH_SZ centered at (x, y).
+ Store results into *mean and *one_over_stddev
+
+ Note: The output of this function is scaled by MATCH_SZ, as in
+ *mean = MATCH_SZ * <true mean> and
+ *one_over_stddev = 1 / (MATCH_SZ * <true stddev>)
+
+ Combined with the fact that we return 1/stddev rather than the standard
+ deviation itself, this allows us to completely avoid divisions in
+ aom_compute_correlation, which is much hotter than this function is.
+
+ Returns true if this feature point is usable, false otherwise.
*/
-static double compute_variance(const unsigned char *frame, int stride, int x,
- int y) {
+bool aom_compute_mean_stddev_c(const unsigned char *frame, int stride, int x,
+ int y, double *mean, double *one_over_stddev) {
int sum = 0;
int sumsq = 0;
- int var;
- int i, j;
- for (i = 0; i < MATCH_SZ; ++i)
- for (j = 0; j < MATCH_SZ; ++j) {
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ for (int j = 0; j < MATCH_SZ; ++j) {
sum += frame[(i + y - MATCH_SZ_BY2) * stride + (j + x - MATCH_SZ_BY2)];
sumsq += frame[(i + y - MATCH_SZ_BY2) * stride + (j + x - MATCH_SZ_BY2)] *
frame[(i + y - MATCH_SZ_BY2) * stride + (j + x - MATCH_SZ_BY2)];
}
- var = sumsq * MATCH_SZ_SQ - sum * sum;
- return (double)var;
+ }
+ *mean = (double)sum / MATCH_SZ;
+ const double variance = sumsq - (*mean) * (*mean);
+ if (variance < MIN_FEATURE_VARIANCE) {
+ *one_over_stddev = 0.0;
+ return false;
+ }
+ *one_over_stddev = 1.0 / sqrt(variance);
+ return true;
}
-/* Compute corr(frame1, frame2) * MATCH_SZ * stddev(frame1), where the
- correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
- of each image, centered at (x1, y1) and (x2, y2) respectively.
+/* Compute corr(frame1, frame2) over a window of size MATCH_SZ by MATCH_SZ.
+ To save on computation, the mean and (1 divided by the) standard deviation
+ of the window in each frame are precomputed and passed into this function
+ as arguments.
*/
-double av1_compute_cross_correlation_c(const unsigned char *frame1, int stride1,
- int x1, int y1,
- const unsigned char *frame2, int stride2,
- int x2, int y2) {
+double aom_compute_correlation_c(const unsigned char *frame1, int stride1,
+ int x1, int y1, double mean1,
+ double one_over_stddev1,
+ const unsigned char *frame2, int stride2,
+ int x2, int y2, double mean2,
+ double one_over_stddev2) {
int v1, v2;
- int sum1 = 0;
- int sum2 = 0;
- int sumsq2 = 0;
int cross = 0;
- int var2, cov;
- int i, j;
- for (i = 0; i < MATCH_SZ; ++i)
- for (j = 0; j < MATCH_SZ; ++j) {
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ for (int j = 0; j < MATCH_SZ; ++j) {
v1 = frame1[(i + y1 - MATCH_SZ_BY2) * stride1 + (j + x1 - MATCH_SZ_BY2)];
v2 = frame2[(i + y2 - MATCH_SZ_BY2) * stride2 + (j + x2 - MATCH_SZ_BY2)];
- sum1 += v1;
- sum2 += v2;
- sumsq2 += v2 * v2;
cross += v1 * v2;
}
- var2 = sumsq2 * MATCH_SZ_SQ - sum2 * sum2;
- cov = cross * MATCH_SZ_SQ - sum1 * sum2;
- return cov / sqrt((double)var2);
+ }
+
+ // Note: In theory, the calculations here "should" be
+ // covariance = cross / N^2 - mean1 * mean2
+ // correlation = covariance / (stddev1 * stddev2).
+ //
+ // However, because of the scaling in aom_compute_mean_stddev, the
+ // lines below actually calculate
+ // covariance * N^2 = cross - (mean1 * N) * (mean2 * N)
+ // correlation = (covariance * N^2) / ((stddev1 * N) * (stddev2 * N))
+ //
+ // ie. we have removed the need for a division, and still end up with the
+ // correct unscaled correlation (ie, in the range [-1, +1])
+ double covariance = cross - mean1 * mean2;
+ double correlation = covariance * (one_over_stddev1 * one_over_stddev2);
+ return correlation;
}
static int is_eligible_point(int pointx, int pointy, int width, int height) {
@@ -87,65 +109,14 @@ static int is_eligible_distance(int point1x, int point1y, int point2x,
(point1y - point2y) * (point1y - point2y)) <= thresh * thresh;
}
-static void improve_correspondence(const unsigned char *src,
- const unsigned char *ref, int width,
- int height, int src_stride, int ref_stride,
- Correspondence *correspondences,
- int num_correspondences) {
- int i;
- for (i = 0; i < num_correspondences; ++i) {
- int x, y, best_x = 0, best_y = 0;
- double best_match_ncc = 0.0;
- // For this algorithm, all points have integer coordinates.
- // It's a little more efficient to convert them to ints once,
- // before the inner loops
- int x0 = (int)correspondences[i].x;
- int y0 = (int)correspondences[i].y;
- int rx0 = (int)correspondences[i].rx;
- int ry0 = (int)correspondences[i].ry;
- for (y = -SEARCH_SZ_BY2; y <= SEARCH_SZ_BY2; ++y) {
- for (x = -SEARCH_SZ_BY2; x <= SEARCH_SZ_BY2; ++x) {
- double match_ncc;
- if (!is_eligible_point(rx0 + x, ry0 + y, width, height)) continue;
- if (!is_eligible_distance(x0, y0, rx0 + x, ry0 + y, width, height))
- continue;
- match_ncc = av1_compute_cross_correlation(src, src_stride, x0, y0, ref,
- ref_stride, rx0 + x, ry0 + y);
- if (match_ncc > best_match_ncc) {
- best_match_ncc = match_ncc;
- best_y = y;
- best_x = x;
- }
- }
- }
- correspondences[i].rx += best_x;
- correspondences[i].ry += best_y;
- }
- for (i = 0; i < num_correspondences; ++i) {
- int x, y, best_x = 0, best_y = 0;
- double best_match_ncc = 0.0;
- int x0 = (int)correspondences[i].x;
- int y0 = (int)correspondences[i].y;
- int rx0 = (int)correspondences[i].rx;
- int ry0 = (int)correspondences[i].ry;
- for (y = -SEARCH_SZ_BY2; y <= SEARCH_SZ_BY2; ++y)
- for (x = -SEARCH_SZ_BY2; x <= SEARCH_SZ_BY2; ++x) {
- double match_ncc;
- if (!is_eligible_point(x0 + x, y0 + y, width, height)) continue;
- if (!is_eligible_distance(x0 + x, y0 + y, rx0, ry0, width, height))
- continue;
- match_ncc = av1_compute_cross_correlation(
- ref, ref_stride, rx0, ry0, src, src_stride, x0 + x, y0 + y);
- if (match_ncc > best_match_ncc) {
- best_match_ncc = match_ncc;
- best_y = y;
- best_x = x;
- }
- }
- correspondences[i].x += best_x;
- correspondences[i].y += best_y;
- }
-}
+typedef struct {
+ int x;
+ int y;
+ double mean;
+ double one_over_stddev;
+ int best_match_idx;
+ double best_match_corr;
+} PointInfo;
static int determine_correspondence(const unsigned char *src,
const int *src_corners, int num_src_corners,
@@ -154,56 +125,136 @@ static int determine_correspondence(const unsigned char *src,
int width, int height, int src_stride,
int ref_stride,
Correspondence *correspondences) {
- // TODO(sarahparker) Improve this to include 2-way match
- int i, j;
+ PointInfo *src_point_info = NULL;
+ PointInfo *ref_point_info = NULL;
int num_correspondences = 0;
- for (i = 0; i < num_src_corners; ++i) {
- double best_match_ncc = 0.0;
- double template_norm;
- int best_match_j = -1;
- if (!is_eligible_point(src_corners[2 * i], src_corners[2 * i + 1], width,
- height))
+
+ src_point_info =
+ (PointInfo *)aom_calloc(num_src_corners, sizeof(*src_point_info));
+ if (!src_point_info) {
+ goto finished;
+ }
+
+ ref_point_info =
+ (PointInfo *)aom_calloc(num_ref_corners, sizeof(*ref_point_info));
+ if (!ref_point_info) {
+ goto finished;
+ }
+
+ // First pass (linear):
+ // Filter corner lists and compute per-patch means and standard deviations,
+ // for the src and ref frames independently
+ int src_point_count = 0;
+ for (int i = 0; i < num_src_corners; i++) {
+ int src_x = src_corners[2 * i];
+ int src_y = src_corners[2 * i + 1];
+ if (!is_eligible_point(src_x, src_y, width, height)) continue;
+
+ PointInfo *point = &src_point_info[src_point_count];
+ point->x = src_x;
+ point->y = src_y;
+ point->best_match_corr = THRESHOLD_NCC;
+ if (!aom_compute_mean_stddev(src, src_stride, src_x, src_y, &point->mean,
+ &point->one_over_stddev))
continue;
- for (j = 0; j < num_ref_corners; ++j) {
- double match_ncc;
- if (!is_eligible_point(ref_corners[2 * j], ref_corners[2 * j + 1], width,
- height))
- continue;
- if (!is_eligible_distance(src_corners[2 * i], src_corners[2 * i + 1],
- ref_corners[2 * j], ref_corners[2 * j + 1],
- width, height))
+ src_point_count++;
+ }
+ if (src_point_count == 0) {
+ goto finished;
+ }
+
+ int ref_point_count = 0;
+ for (int j = 0; j < num_ref_corners; j++) {
+ int ref_x = ref_corners[2 * j];
+ int ref_y = ref_corners[2 * j + 1];
+ if (!is_eligible_point(ref_x, ref_y, width, height)) continue;
+
+ PointInfo *point = &ref_point_info[ref_point_count];
+ point->x = ref_x;
+ point->y = ref_y;
+ point->best_match_corr = THRESHOLD_NCC;
+ if (!aom_compute_mean_stddev(ref, ref_stride, ref_x, ref_y, &point->mean,
+ &point->one_over_stddev))
+ continue;
+ ref_point_count++;
+ }
+ if (ref_point_count == 0) {
+ goto finished;
+ }
+
+ // Second pass (quadratic):
+ // For each pair of points, compute correlation, and use this to determine
+ // the best match of each corner, in both directions
+ for (int i = 0; i < src_point_count; ++i) {
+ PointInfo *src_point = &src_point_info[i];
+ for (int j = 0; j < ref_point_count; ++j) {
+ PointInfo *ref_point = &ref_point_info[j];
+ if (!is_eligible_distance(src_point->x, src_point->y, ref_point->x,
+ ref_point->y, width, height))
continue;
- match_ncc = av1_compute_cross_correlation(
- src, src_stride, src_corners[2 * i], src_corners[2 * i + 1], ref,
- ref_stride, ref_corners[2 * j], ref_corners[2 * j + 1]);
- if (match_ncc > best_match_ncc) {
- best_match_ncc = match_ncc;
- best_match_j = j;
+
+ double corr = aom_compute_correlation(
+ src, src_stride, src_point->x, src_point->y, src_point->mean,
+ src_point->one_over_stddev, ref, ref_stride, ref_point->x,
+ ref_point->y, ref_point->mean, ref_point->one_over_stddev);
+
+ if (corr > src_point->best_match_corr) {
+ src_point->best_match_idx = j;
+ src_point->best_match_corr = corr;
+ }
+ if (corr > ref_point->best_match_corr) {
+ ref_point->best_match_idx = i;
+ ref_point->best_match_corr = corr;
}
}
- // Note: We want to test if the best correlation is >= THRESHOLD_NCC,
- // but need to account for the normalization in
- // av1_compute_cross_correlation.
- template_norm = compute_variance(src, src_stride, src_corners[2 * i],
- src_corners[2 * i + 1]);
- if (best_match_ncc > THRESHOLD_NCC * sqrt(template_norm)) {
- correspondences[num_correspondences].x = src_corners[2 * i];
- correspondences[num_correspondences].y = src_corners[2 * i + 1];
- correspondences[num_correspondences].rx = ref_corners[2 * best_match_j];
- correspondences[num_correspondences].ry =
- ref_corners[2 * best_match_j + 1];
+ }
+
+ // Third pass (linear):
+ // Scan through source corners, generating a correspondence for each corner
+ // iff ref_best_match[src_best_match[i]] == i
+ // Then refine the generated correspondences using optical flow
+ for (int i = 0; i < src_point_count; i++) {
+ PointInfo *point = &src_point_info[i];
+
+ // Skip corners which were not matched, or which didn't find
+ // a good enough match
+ if (point->best_match_corr < THRESHOLD_NCC) continue;
+
+ PointInfo *match_point = &ref_point_info[point->best_match_idx];
+ if (match_point->best_match_idx == i) {
+ // Refine match using optical flow and store
+ const int sx = point->x;
+ const int sy = point->y;
+ const int rx = match_point->x;
+ const int ry = match_point->y;
+ double u = (double)(rx - sx);
+ double v = (double)(ry - sy);
+
+ const int patch_tl_x = sx - DISFLOW_PATCH_CENTER;
+ const int patch_tl_y = sy - DISFLOW_PATCH_CENTER;
+
+ aom_compute_flow_at_point(src, ref, patch_tl_x, patch_tl_y, width, height,
+ src_stride, &u, &v);
+
+ Correspondence *correspondence = &correspondences[num_correspondences];
+ correspondence->x = (double)sx;
+ correspondence->y = (double)sy;
+ correspondence->rx = (double)sx + u;
+ correspondence->ry = (double)sy + v;
num_correspondences++;
}
}
- improve_correspondence(src, ref, width, height, src_stride, ref_stride,
- correspondences, num_correspondences);
+
+finished:
+ aom_free(src_point_info);
+ aom_free(ref_point_info);
return num_correspondences;
}
bool av1_compute_global_motion_feature_match(
TransformationType type, YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *ref,
- int bit_depth, MotionModel *motion_models, int num_motion_models,
- bool *mem_alloc_failed) {
+ int bit_depth, int downsample_level, MotionModel *motion_models,
+ int num_motion_models, bool *mem_alloc_failed) {
int num_correspondences;
Correspondence *correspondences;
ImagePyramid *src_pyramid = src->y_pyramid;
@@ -212,19 +263,19 @@ bool av1_compute_global_motion_feature_match(
CornerList *ref_corners = ref->corners;
// Precompute information we will need about each frame
- if (!aom_compute_pyramid(src, bit_depth, src_pyramid)) {
+ if (aom_compute_pyramid(src, bit_depth, 1, src_pyramid) < 0) {
*mem_alloc_failed = true;
return false;
}
- if (!av1_compute_corner_list(src_pyramid, src_corners)) {
+ if (!av1_compute_corner_list(src, bit_depth, downsample_level, src_corners)) {
*mem_alloc_failed = true;
return false;
}
- if (!aom_compute_pyramid(ref, bit_depth, ref_pyramid)) {
+ if (aom_compute_pyramid(ref, bit_depth, 1, ref_pyramid) < 0) {
*mem_alloc_failed = true;
return false;
}
- if (!av1_compute_corner_list(ref_pyramid, ref_corners)) {
+ if (!av1_compute_corner_list(src, bit_depth, downsample_level, ref_corners)) {
*mem_alloc_failed = true;
return false;
}
diff --git a/third_party/aom/aom_dsp/flow_estimation/corner_match.h b/third_party/aom/aom_dsp/flow_estimation/corner_match.h
index 4435d2c767..77ebee2ea3 100644
--- a/third_party/aom/aom_dsp/flow_estimation/corner_match.h
+++ b/third_party/aom/aom_dsp/flow_estimation/corner_match.h
@@ -25,14 +25,20 @@
extern "C" {
#endif
-#define MATCH_SZ 13
+#define MATCH_SZ 16
#define MATCH_SZ_BY2 ((MATCH_SZ - 1) / 2)
#define MATCH_SZ_SQ (MATCH_SZ * MATCH_SZ)
+// Minimum threshold for the variance of a patch, in order for it to be
+// considered useful for matching.
+// This is evaluated against the scaled variance MATCH_SZ_SQ * sigma^2,
+// so a setting of 1 * MATCH_SZ_SQ corresponds to an unscaled variance of 1
+#define MIN_FEATURE_VARIANCE (1 * MATCH_SZ_SQ)
+
bool av1_compute_global_motion_feature_match(
TransformationType type, YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *ref,
- int bit_depth, MotionModel *motion_models, int num_motion_models,
- bool *mem_alloc_failed);
+ int bit_depth, int downsample_level, MotionModel *motion_models,
+ int num_motion_models, bool *mem_alloc_failed);
#ifdef __cplusplus
}
diff --git a/third_party/aom/aom_dsp/flow_estimation/disflow.c b/third_party/aom/aom_dsp/flow_estimation/disflow.c
index 82b531c729..f511a6eb49 100644
--- a/third_party/aom/aom_dsp/flow_estimation/disflow.c
+++ b/third_party/aom/aom_dsp/flow_estimation/disflow.c
@@ -603,9 +603,9 @@ static void upscale_flow_component(double *flow, int cur_width, int cur_height,
// make sure flow_u and flow_v start at 0
static bool compute_flow_field(const ImagePyramid *src_pyr,
- const ImagePyramid *ref_pyr, FlowField *flow) {
+ const ImagePyramid *ref_pyr, int n_levels,
+ FlowField *flow) {
bool mem_status = true;
- assert(src_pyr->n_levels == ref_pyr->n_levels);
double *flow_u = flow->u;
double *flow_v = flow->v;
@@ -613,7 +613,7 @@ static bool compute_flow_field(const ImagePyramid *src_pyr,
double *tmpbuf0;
double *tmpbuf;
- if (src_pyr->n_levels < 2) {
+ if (n_levels < 2) {
// tmpbuf not needed
tmpbuf0 = NULL;
tmpbuf = NULL;
@@ -639,7 +639,7 @@ static bool compute_flow_field(const ImagePyramid *src_pyr,
// correspondences by interpolating this flow field, and then refine the
// correspondences themselves. This is both faster and gives better output
// compared to refining the flow field at level 0 and then interpolating.
- for (int level = src_pyr->n_levels - 1; level >= 1; --level) {
+ for (int level = n_levels - 1; level >= 1; --level) {
const PyramidLayer *cur_layer = &src_pyr->layers[level];
const int cur_width = cur_layer->width;
const int cur_height = cur_layer->height;
@@ -762,29 +762,31 @@ static void free_flow_field(FlowField *flow) {
// Following the convention in flow_estimation.h, the flow vectors are computed
// at fixed points in `src` and point to the corresponding locations in `ref`,
// regardless of the temporal ordering of the frames.
-bool av1_compute_global_motion_disflow(TransformationType type,
- YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *ref, int bit_depth,
- MotionModel *motion_models,
- int num_motion_models,
- bool *mem_alloc_failed) {
+bool av1_compute_global_motion_disflow(
+ TransformationType type, YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *ref,
+ int bit_depth, int downsample_level, MotionModel *motion_models,
+ int num_motion_models, bool *mem_alloc_failed) {
// Precompute information we will need about each frame
ImagePyramid *src_pyramid = src->y_pyramid;
CornerList *src_corners = src->corners;
ImagePyramid *ref_pyramid = ref->y_pyramid;
- if (!aom_compute_pyramid(src, bit_depth, src_pyramid)) {
- *mem_alloc_failed = true;
- return false;
- }
- if (!av1_compute_corner_list(src_pyramid, src_corners)) {
+
+ const int src_layers =
+ aom_compute_pyramid(src, bit_depth, DISFLOW_PYRAMID_LEVELS, src_pyramid);
+ const int ref_layers =
+ aom_compute_pyramid(ref, bit_depth, DISFLOW_PYRAMID_LEVELS, ref_pyramid);
+
+ if (src_layers < 0 || ref_layers < 0) {
*mem_alloc_failed = true;
return false;
}
- if (!aom_compute_pyramid(ref, bit_depth, ref_pyramid)) {
+ if (!av1_compute_corner_list(src, bit_depth, downsample_level, src_corners)) {
*mem_alloc_failed = true;
return false;
}
+ assert(src_layers == ref_layers);
+
const int src_width = src_pyramid->layers[0].width;
const int src_height = src_pyramid->layers[0].height;
assert(ref_pyramid->layers[0].width == src_width);
@@ -796,7 +798,7 @@ bool av1_compute_global_motion_disflow(TransformationType type,
return false;
}
- if (!compute_flow_field(src_pyramid, ref_pyramid, flow)) {
+ if (!compute_flow_field(src_pyramid, ref_pyramid, src_layers, flow)) {
*mem_alloc_failed = true;
free_flow_field(flow);
return false;
diff --git a/third_party/aom/aom_dsp/flow_estimation/disflow.h b/third_party/aom/aom_dsp/flow_estimation/disflow.h
index ef877b638c..ac3680004d 100644
--- a/third_party/aom/aom_dsp/flow_estimation/disflow.h
+++ b/third_party/aom/aom_dsp/flow_estimation/disflow.h
@@ -15,7 +15,6 @@
#include <stdbool.h>
#include "aom_dsp/flow_estimation/flow_estimation.h"
-#include "aom_dsp/rect.h"
#include "aom_scale/yv12config.h"
#ifdef __cplusplus
@@ -92,12 +91,10 @@ typedef struct {
int stride;
} FlowField;
-bool av1_compute_global_motion_disflow(TransformationType type,
- YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *ref, int bit_depth,
- MotionModel *motion_models,
- int num_motion_models,
- bool *mem_alloc_failed);
+bool av1_compute_global_motion_disflow(
+ TransformationType type, YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *ref,
+ int bit_depth, int downsample_level, MotionModel *motion_models,
+ int num_motion_models, bool *mem_alloc_failed);
#ifdef __cplusplus
}
diff --git a/third_party/aom/aom_dsp/flow_estimation/flow_estimation.c b/third_party/aom/aom_dsp/flow_estimation/flow_estimation.c
index 0f47f86f55..96624eb863 100644
--- a/third_party/aom/aom_dsp/flow_estimation/flow_estimation.c
+++ b/third_party/aom/aom_dsp/flow_estimation/flow_estimation.c
@@ -18,14 +18,6 @@
#include "aom_ports/mem.h"
#include "aom_scale/yv12config.h"
-// For each global motion method, how many pyramid levels should we allocate?
-// Note that this is a maximum, and fewer levels will be allocated if the frame
-// is not large enough to need all of the specified levels
-const int global_motion_pyr_levels[GLOBAL_MOTION_METHODS] = {
- 1, // GLOBAL_MOTION_METHOD_FEATURE_MATCH
- 16, // GLOBAL_MOTION_METHOD_DISFLOW
-};
-
// clang-format off
const double kIdentityParams[MAX_PARAMDIM] = {
0.0, 0.0, 1.0, 0.0, 0.0, 1.0
@@ -43,17 +35,17 @@ const double kIdentityParams[MAX_PARAMDIM] = {
bool aom_compute_global_motion(TransformationType type, YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *ref, int bit_depth,
GlobalMotionMethod gm_method,
- MotionModel *motion_models,
+ int downsample_level, MotionModel *motion_models,
int num_motion_models, bool *mem_alloc_failed) {
switch (gm_method) {
case GLOBAL_MOTION_METHOD_FEATURE_MATCH:
return av1_compute_global_motion_feature_match(
- type, src, ref, bit_depth, motion_models, num_motion_models,
- mem_alloc_failed);
+ type, src, ref, bit_depth, downsample_level, motion_models,
+ num_motion_models, mem_alloc_failed);
case GLOBAL_MOTION_METHOD_DISFLOW:
- return av1_compute_global_motion_disflow(type, src, ref, bit_depth,
- motion_models, num_motion_models,
- mem_alloc_failed);
+ return av1_compute_global_motion_disflow(
+ type, src, ref, bit_depth, downsample_level, motion_models,
+ num_motion_models, mem_alloc_failed);
default: assert(0 && "Unknown global motion estimation type");
}
return false;
diff --git a/third_party/aom/aom_dsp/flow_estimation/flow_estimation.h b/third_party/aom/aom_dsp/flow_estimation/flow_estimation.h
index 2dfae24980..a38b03fc4e 100644
--- a/third_party/aom/aom_dsp/flow_estimation/flow_estimation.h
+++ b/third_party/aom/aom_dsp/flow_estimation/flow_estimation.h
@@ -61,11 +61,6 @@ typedef struct {
double rx, ry;
} Correspondence;
-// For each global motion method, how many pyramid levels should we allocate?
-// Note that this is a maximum, and fewer levels will be allocated if the frame
-// is not large enough to need all of the specified levels
-extern const int global_motion_pyr_levels[GLOBAL_MOTION_METHODS];
-
// Which global motion method should we use in practice?
// Disflow is both faster and gives better results than feature matching in
// practically all cases, so we use disflow by default
@@ -85,7 +80,7 @@ extern const double kIdentityParams[MAX_PARAMDIM];
bool aom_compute_global_motion(TransformationType type, YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *ref, int bit_depth,
GlobalMotionMethod gm_method,
- MotionModel *motion_models,
+ int downsample_level, MotionModel *motion_models,
int num_motion_models, bool *mem_alloc_failed);
#ifdef __cplusplus
diff --git a/third_party/aom/aom_dsp/flow_estimation/ransac.c b/third_party/aom/aom_dsp/flow_estimation/ransac.c
index b88a07b023..7c7bebdda4 100644
--- a/third_party/aom/aom_dsp/flow_estimation/ransac.c
+++ b/third_party/aom/aom_dsp/flow_estimation/ransac.c
@@ -29,8 +29,13 @@
#define INLIER_THRESHOLD 1.25
#define INLIER_THRESHOLD_SQUARED (INLIER_THRESHOLD * INLIER_THRESHOLD)
+
+// Number of initial models to generate
#define NUM_TRIALS 20
+// Number of times to refine the best model found
+#define NUM_REFINES 5
+
// Flag to enable functions for finding TRANSLATION type models.
//
// These modes are not considered currently due to a spec bug (see comments
@@ -39,63 +44,110 @@
// but disabled, for completeness.
#define ALLOW_TRANSLATION_MODELS 0
+typedef struct {
+ int num_inliers;
+ double sse; // Sum of squared errors of inliers
+ int *inlier_indices;
+} RANSAC_MOTION;
+
////////////////////////////////////////////////////////////////////////////////
// ransac
-typedef bool (*IsDegenerateFunc)(double *p);
-typedef bool (*FindTransformationFunc)(int points, const double *points1,
- const double *points2, double *params);
-typedef void (*ProjectPointsFunc)(const double *mat, const double *points,
- double *proj, int n, int stride_points,
- int stride_proj);
+typedef bool (*FindTransformationFunc)(const Correspondence *points,
+ const int *indices, int num_indices,
+ double *params);
+typedef void (*ScoreModelFunc)(const double *mat, const Correspondence *points,
+ int num_points, RANSAC_MOTION *model);
// vtable-like structure which stores all of the information needed by RANSAC
// for a particular model type
typedef struct {
- IsDegenerateFunc is_degenerate;
FindTransformationFunc find_transformation;
- ProjectPointsFunc project_points;
+ ScoreModelFunc score_model;
+
+ // The minimum number of points which can be passed to find_transformation
+ // to generate a model.
+ //
+ // This should be set as small as possible. This is due to an observation
+ // from section 4 of "Optimal Ransac" by A. Hast, J. Nysjö and
+ // A. Marchetti (https://dspace5.zcu.cz/bitstream/11025/6869/1/Hast.pdf):
+ // using the minimum possible number of points in the initial model maximizes
+ // the chance that all of the selected points are inliers.
+ //
+ // That paper proposes a method which can deal with models which are
+ // contaminated by outliers, which helps in cases where the inlier fraction
+ // is low. However, for our purposes, global motion only gives significant
+ // gains when the inlier fraction is high.
+ //
+ // So we do not use the method from this paper, but we do find that
+ // minimizing the number of points used for initial model fitting helps
+ // make the best use of the limited number of models we consider.
int minpts;
} RansacModelInfo;
#if ALLOW_TRANSLATION_MODELS
-static void project_points_translation(const double *mat, const double *points,
- double *proj, int n, int stride_points,
- int stride_proj) {
- int i;
- for (i = 0; i < n; ++i) {
- const double x = *(points++), y = *(points++);
- *(proj++) = x + mat[0];
- *(proj++) = y + mat[1];
- points += stride_points - 2;
- proj += stride_proj - 2;
+static void score_translation(const double *mat, const Correspondence *points,
+ int num_points, RANSAC_MOTION *model) {
+ model->num_inliers = 0;
+ model->sse = 0.0;
+
+ for (int i = 0; i < num_points; ++i) {
+ const double x1 = points[i].x;
+ const double y1 = points[i].y;
+ const double x2 = points[i].rx;
+ const double y2 = points[i].ry;
+
+ const double proj_x = x1 + mat[0];
+ const double proj_y = y1 + mat[1];
+
+ const double dx = proj_x - x2;
+ const double dy = proj_y - y2;
+ const double sse = dx * dx + dy * dy;
+
+ if (sse < INLIER_THRESHOLD_SQUARED) {
+ model->inlier_indices[model->num_inliers++] = i;
+ model->sse += sse;
+ }
}
}
#endif // ALLOW_TRANSLATION_MODELS
-static void project_points_affine(const double *mat, const double *points,
- double *proj, int n, int stride_points,
- int stride_proj) {
- int i;
- for (i = 0; i < n; ++i) {
- const double x = *(points++), y = *(points++);
- *(proj++) = mat[2] * x + mat[3] * y + mat[0];
- *(proj++) = mat[4] * x + mat[5] * y + mat[1];
- points += stride_points - 2;
- proj += stride_proj - 2;
+static void score_affine(const double *mat, const Correspondence *points,
+ int num_points, RANSAC_MOTION *model) {
+ model->num_inliers = 0;
+ model->sse = 0.0;
+
+ for (int i = 0; i < num_points; ++i) {
+ const double x1 = points[i].x;
+ const double y1 = points[i].y;
+ const double x2 = points[i].rx;
+ const double y2 = points[i].ry;
+
+ const double proj_x = mat[2] * x1 + mat[3] * y1 + mat[0];
+ const double proj_y = mat[4] * x1 + mat[5] * y1 + mat[1];
+
+ const double dx = proj_x - x2;
+ const double dy = proj_y - y2;
+ const double sse = dx * dx + dy * dy;
+
+ if (sse < INLIER_THRESHOLD_SQUARED) {
+ model->inlier_indices[model->num_inliers++] = i;
+ model->sse += sse;
+ }
}
}
#if ALLOW_TRANSLATION_MODELS
-static bool find_translation(int np, const double *pts1, const double *pts2,
- double *params) {
+static bool find_translation(const Correspondence *points, const int *indices,
+ int num_indices, double *params) {
double sumx = 0;
double sumy = 0;
- for (int i = 0; i < np; ++i) {
- double dx = *(pts2++);
- double dy = *(pts2++);
- double sx = *(pts1++);
- double sy = *(pts1++);
+ for (int i = 0; i < num_indices; ++i) {
+ int index = indices[i];
+ const double sx = points[index].x;
+ const double sy = points[index].y;
+ const double dx = points[index].rx;
+ const double dy = points[index].ry;
sumx += dx - sx;
sumy += dy - sy;
@@ -111,8 +163,8 @@ static bool find_translation(int np, const double *pts1, const double *pts2,
}
#endif // ALLOW_TRANSLATION_MODELS
-static bool find_rotzoom(int np, const double *pts1, const double *pts2,
- double *params) {
+static bool find_rotzoom(const Correspondence *points, const int *indices,
+ int num_indices, double *params) {
const int n = 4; // Size of least-squares problem
double mat[4 * 4]; // Accumulator for A'A
double y[4]; // Accumulator for A'b
@@ -120,11 +172,12 @@ static bool find_rotzoom(int np, const double *pts1, const double *pts2,
double b; // Single element of b
least_squares_init(mat, y, n);
- for (int i = 0; i < np; ++i) {
- double dx = *(pts2++);
- double dy = *(pts2++);
- double sx = *(pts1++);
- double sy = *(pts1++);
+ for (int i = 0; i < num_indices; ++i) {
+ int index = indices[i];
+ const double sx = points[index].x;
+ const double sy = points[index].y;
+ const double dx = points[index].rx;
+ const double dy = points[index].ry;
a[0] = 1;
a[1] = 0;
@@ -153,8 +206,8 @@ static bool find_rotzoom(int np, const double *pts1, const double *pts2,
return true;
}
-static bool find_affine(int np, const double *pts1, const double *pts2,
- double *params) {
+static bool find_affine(const Correspondence *points, const int *indices,
+ int num_indices, double *params) {
// Note: The least squares problem for affine models is 6-dimensional,
// but it splits into two independent 3-dimensional subproblems.
// Solving these two subproblems separately and recombining at the end
@@ -174,11 +227,12 @@ static bool find_affine(int np, const double *pts1, const double *pts2,
least_squares_init(mat[0], y[0], n);
least_squares_init(mat[1], y[1], n);
- for (int i = 0; i < np; ++i) {
- double dx = *(pts2++);
- double dy = *(pts2++);
- double sx = *(pts1++);
- double sy = *(pts1++);
+ for (int i = 0; i < num_indices; ++i) {
+ int index = indices[i];
+ const double sx = points[index].x;
+ const double sy = points[index].y;
+ const double dx = points[index].rx;
+ const double dy = points[index].ry;
a[0][0] = 1;
a[0][1] = sx;
@@ -211,12 +265,6 @@ static bool find_affine(int np, const double *pts1, const double *pts2,
return true;
}
-typedef struct {
- int num_inliers;
- double sse; // Sum of squared errors of inliers
- int *inlier_indices;
-} RANSAC_MOTION;
-
// Return -1 if 'a' is a better motion, 1 if 'b' is better, 0 otherwise.
static int compare_motions(const void *arg_a, const void *arg_b) {
const RANSAC_MOTION *motion_a = (RANSAC_MOTION *)arg_a;
@@ -234,15 +282,6 @@ static bool is_better_motion(const RANSAC_MOTION *motion_a,
return compare_motions(motion_a, motion_b) < 0;
}
-static void copy_points_at_indices(double *dest, const double *src,
- const int *indices, int num_points) {
- for (int i = 0; i < num_points; ++i) {
- const int index = indices[i];
- dest[i * 2] = src[index * 2];
- dest[i * 2 + 1] = src[index * 2 + 1];
- }
-}
-
// Returns true on success, false on error
static bool ransac_internal(const Correspondence *matched_points, int npoints,
MotionModel *motion_models, int num_desired_motions,
@@ -257,10 +296,6 @@ static bool ransac_internal(const Correspondence *matched_points, int npoints,
int indices[MAX_MINPTS] = { 0 };
- double *points1, *points2;
- double *corners1, *corners2;
- double *projected_corners;
-
// Store information for the num_desired_motions best transformations found
// and the worst motion among them, as well as the motion currently under
// consideration.
@@ -271,18 +306,19 @@ static bool ransac_internal(const Correspondence *matched_points, int npoints,
// currently under consideration.
double params_this_motion[MAX_PARAMDIM];
+ // Initialize output models, as a fallback in case we can't find a model
+ for (i = 0; i < num_desired_motions; i++) {
+ memcpy(motion_models[i].params, kIdentityParams,
+ MAX_PARAMDIM * sizeof(*(motion_models[i].params)));
+ motion_models[i].num_inliers = 0;
+ }
+
if (npoints < minpts * MINPTS_MULTIPLIER || npoints == 0) {
return false;
}
int min_inliers = AOMMAX((int)(MIN_INLIER_PROB * npoints), minpts);
- points1 = (double *)aom_malloc(sizeof(*points1) * npoints * 2);
- points2 = (double *)aom_malloc(sizeof(*points2) * npoints * 2);
- corners1 = (double *)aom_malloc(sizeof(*corners1) * npoints * 2);
- corners2 = (double *)aom_malloc(sizeof(*corners2) * npoints * 2);
- projected_corners =
- (double *)aom_malloc(sizeof(*projected_corners) * npoints * 2);
motions =
(RANSAC_MOTION *)aom_calloc(num_desired_motions, sizeof(RANSAC_MOTION));
@@ -295,8 +331,7 @@ static bool ransac_internal(const Correspondence *matched_points, int npoints,
int *inlier_buffer = (int *)aom_malloc(sizeof(*inlier_buffer) * npoints *
(num_desired_motions + 1));
- if (!(points1 && points2 && corners1 && corners2 && projected_corners &&
- motions && inlier_buffer)) {
+ if (!(motions && inlier_buffer)) {
ret_val = false;
*mem_alloc_failed = true;
goto finish_ransac;
@@ -311,50 +346,22 @@ static bool ransac_internal(const Correspondence *matched_points, int npoints,
memset(&current_motion, 0, sizeof(current_motion));
current_motion.inlier_indices = inlier_buffer + num_desired_motions * npoints;
- for (i = 0; i < npoints; ++i) {
- corners1[2 * i + 0] = matched_points[i].x;
- corners1[2 * i + 1] = matched_points[i].y;
- corners2[2 * i + 0] = matched_points[i].rx;
- corners2[2 * i + 1] = matched_points[i].ry;
- }
-
for (int trial_count = 0; trial_count < NUM_TRIALS; trial_count++) {
lcg_pick(npoints, minpts, indices, &seed);
- copy_points_at_indices(points1, corners1, indices, minpts);
- copy_points_at_indices(points2, corners2, indices, minpts);
-
- if (model_info->is_degenerate(points1)) {
- continue;
- }
-
- if (!model_info->find_transformation(minpts, points1, points2,
+ if (!model_info->find_transformation(matched_points, indices, minpts,
params_this_motion)) {
continue;
}
- model_info->project_points(params_this_motion, corners1, projected_corners,
- npoints, 2, 2);
-
- current_motion.num_inliers = 0;
- double sse = 0.0;
- for (i = 0; i < npoints; ++i) {
- double dx = projected_corners[i * 2] - corners2[i * 2];
- double dy = projected_corners[i * 2 + 1] - corners2[i * 2 + 1];
- double squared_error = dx * dx + dy * dy;
-
- if (squared_error < INLIER_THRESHOLD_SQUARED) {
- current_motion.inlier_indices[current_motion.num_inliers++] = i;
- sse += squared_error;
- }
- }
+ model_info->score_model(params_this_motion, matched_points, npoints,
+ &current_motion);
if (current_motion.num_inliers < min_inliers) {
// Reject models with too few inliers
continue;
}
- current_motion.sse = sse;
if (is_better_motion(&current_motion, worst_kept_motion)) {
// This motion is better than the worst currently kept motion. Remember
// the inlier points and sse. The parameters for each kept motion
@@ -386,86 +393,98 @@ static bool ransac_internal(const Correspondence *matched_points, int npoints,
// Sort the motions, best first.
qsort(motions, num_desired_motions, sizeof(RANSAC_MOTION), compare_motions);
- // Recompute the motions using only the inliers.
+ // Refine each of the best N models using iterative estimation.
+ //
+ // The idea here is loosely based on the iterative method from
+ // "Locally Optimized RANSAC" by O. Chum, J. Matas and Josef Kittler:
+ // https://cmp.felk.cvut.cz/ftp/articles/matas/chum-dagm03.pdf
+ //
+ // However, we implement a simpler version than their proposal, and simply
+ // refit the model repeatedly until the number of inliers stops increasing,
+ // with a cap on the number of iterations to defend against edge cases which
+ // only improve very slowly.
for (i = 0; i < num_desired_motions; ++i) {
- int num_inliers = motions[i].num_inliers;
- if (num_inliers > 0) {
- assert(num_inliers >= minpts);
-
- copy_points_at_indices(points1, corners1, motions[i].inlier_indices,
- num_inliers);
- copy_points_at_indices(points2, corners2, motions[i].inlier_indices,
- num_inliers);
-
- if (!model_info->find_transformation(num_inliers, points1, points2,
- motion_models[i].params)) {
- // In the unlikely event that this model fitting fails,
- // we don't have a good fallback. So just clear the output
- // model and move on
- memcpy(motion_models[i].params, kIdentityParams,
- MAX_PARAMDIM * sizeof(*(motion_models[i].params)));
- motion_models[i].num_inliers = 0;
- continue;
+ if (motions[i].num_inliers <= 0) {
+ // Output model has already been initialized to the identity model,
+ // so just skip setup
+ continue;
+ }
+
+ bool bad_model = false;
+ for (int refine_count = 0; refine_count < NUM_REFINES; refine_count++) {
+ int num_inliers = motions[i].num_inliers;
+ assert(num_inliers >= min_inliers);
+
+ if (!model_info->find_transformation(matched_points,
+ motions[i].inlier_indices,
+ num_inliers, params_this_motion)) {
+ // In the unlikely event that this model fitting fails, we don't have a
+ // good fallback. So leave this model set to the identity model
+ bad_model = true;
+ break;
}
- // Populate inliers array
- for (int j = 0; j < num_inliers; j++) {
- int index = motions[i].inlier_indices[j];
- const Correspondence *corr = &matched_points[index];
- motion_models[i].inliers[2 * j + 0] = (int)rint(corr->x);
- motion_models[i].inliers[2 * j + 1] = (int)rint(corr->y);
+ // Score the newly generated model
+ model_info->score_model(params_this_motion, matched_points, npoints,
+ &current_motion);
+
+ // At this point, there are three possibilities:
+ // 1) If we found more inliers, keep refining.
+ // 2) If we found the same number of inliers but a lower SSE, we want to
+ // keep the new model, but further refinement is unlikely to gain much.
+ // So commit to this new model
+ // 3) It is possible, but very unlikely, that the new model will have
+ // fewer inliers. If it does happen, we probably just lost a few
+ // borderline inliers. So treat the same as case (2).
+ if (current_motion.num_inliers > motions[i].num_inliers) {
+ motions[i].num_inliers = current_motion.num_inliers;
+ motions[i].sse = current_motion.sse;
+ int *tmp = motions[i].inlier_indices;
+ motions[i].inlier_indices = current_motion.inlier_indices;
+ current_motion.inlier_indices = tmp;
+ } else {
+ // Refined model is no better, so stop
+ // This shouldn't be significantly worse than the previous model,
+ // so it's fine to use the parameters in params_this_motion.
+ // This saves us from having to cache the previous iteration's params.
+ break;
}
- motion_models[i].num_inliers = num_inliers;
- } else {
- memcpy(motion_models[i].params, kIdentityParams,
- MAX_PARAMDIM * sizeof(*(motion_models[i].params)));
- motion_models[i].num_inliers = 0;
}
+
+ if (bad_model) continue;
+
+ // Fill in output struct
+ memcpy(motion_models[i].params, params_this_motion,
+ MAX_PARAMDIM * sizeof(*motion_models[i].params));
+ for (int j = 0; j < motions[i].num_inliers; j++) {
+ int index = motions[i].inlier_indices[j];
+ const Correspondence *corr = &matched_points[index];
+ motion_models[i].inliers[2 * j + 0] = (int)rint(corr->x);
+ motion_models[i].inliers[2 * j + 1] = (int)rint(corr->y);
+ }
+ motion_models[i].num_inliers = motions[i].num_inliers;
}
finish_ransac:
aom_free(inlier_buffer);
aom_free(motions);
- aom_free(projected_corners);
- aom_free(corners2);
- aom_free(corners1);
- aom_free(points2);
- aom_free(points1);
return ret_val;
}
-static bool is_collinear3(double *p1, double *p2, double *p3) {
- static const double collinear_eps = 1e-3;
- const double v =
- (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0]);
- return fabs(v) < collinear_eps;
-}
-
-#if ALLOW_TRANSLATION_MODELS
-static bool is_degenerate_translation(double *p) {
- return (p[0] - p[2]) * (p[0] - p[2]) + (p[1] - p[3]) * (p[1] - p[3]) <= 2;
-}
-#endif // ALLOW_TRANSLATION_MODELS
-
-static bool is_degenerate_affine(double *p) {
- return is_collinear3(p, p + 2, p + 4);
-}
-
static const RansacModelInfo ransac_model_info[TRANS_TYPES] = {
// IDENTITY
- { NULL, NULL, NULL, 0 },
+ { NULL, NULL, 0 },
// TRANSLATION
#if ALLOW_TRANSLATION_MODELS
- { is_degenerate_translation, find_translation, project_points_translation,
- 3 },
+ { find_translation, score_translation, 1 },
#else
- { NULL, NULL, NULL, 0 },
+ { NULL, NULL, 0 },
#endif
// ROTZOOM
- { is_degenerate_affine, find_rotzoom, project_points_affine, 3 },
+ { find_rotzoom, score_affine, 2 },
// AFFINE
- { is_degenerate_affine, find_affine, project_points_affine, 3 },
+ { find_affine, score_affine, 3 },
};
// Returns true on success, false on error
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c
index 87c76fa13b..ff69ae75f5 100644
--- a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c
@@ -17,64 +17,112 @@
#include "aom_ports/mem.h"
#include "aom_dsp/flow_estimation/corner_match.h"
-DECLARE_ALIGNED(16, static const uint8_t,
- byte_mask[16]) = { 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 0, 0, 0 };
-#if MATCH_SZ != 13
-#error "Need to change byte_mask in corner_match_sse4.c if MATCH_SZ != 13"
+DECLARE_ALIGNED(32, static const uint16_t, ones_array[16]) = { 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1 };
+
+#if MATCH_SZ != 16
+#error "Need to apply pixel mask in corner_match_avx2.c if MATCH_SZ != 16"
#endif
-/* Compute corr(frame1, frame2) * MATCH_SZ * stddev(frame1), where the
-correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
-of each image, centered at (x1, y1) and (x2, y2) respectively.
+/* Compute mean and standard deviation of pixels in a window of size
+ MATCH_SZ by MATCH_SZ centered at (x, y).
+ Store results into *mean and *one_over_stddev
+
+ Note: The output of this function is scaled by MATCH_SZ, as in
+ *mean = MATCH_SZ * <true mean> and
+ *one_over_stddev = 1 / (MATCH_SZ * <true stddev>)
+
+ Combined with the fact that we return 1/stddev rather than the standard
+ deviation itself, this allows us to completely avoid divisions in
+ aom_compute_correlation, which is much hotter than this function is.
+
+ Returns true if this feature point is usable, false otherwise.
*/
-double av1_compute_cross_correlation_avx2(const unsigned char *frame1,
- int stride1, int x1, int y1,
- const unsigned char *frame2,
- int stride2, int x2, int y2) {
- int i, stride1_i = 0, stride2_i = 0;
- __m256i temp1, sum_vec, sumsq2_vec, cross_vec, v, v1_1, v2_1;
- const __m128i mask = _mm_load_si128((__m128i *)byte_mask);
- const __m256i zero = _mm256_setzero_si256();
- __m128i v1, v2;
-
- sum_vec = zero;
- sumsq2_vec = zero;
- cross_vec = zero;
+bool aom_compute_mean_stddev_avx2(const unsigned char *frame, int stride, int x,
+ int y, double *mean,
+ double *one_over_stddev) {
+ __m256i sum_vec = _mm256_setzero_si256();
+ __m256i sumsq_vec = _mm256_setzero_si256();
+
+ frame += (y - MATCH_SZ_BY2) * stride + (x - MATCH_SZ_BY2);
+
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ const __m256i v = _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i *)frame));
+
+ sum_vec = _mm256_add_epi16(sum_vec, v);
+ sumsq_vec = _mm256_add_epi32(sumsq_vec, _mm256_madd_epi16(v, v));
+
+ frame += stride;
+ }
+
+ // Reduce sum_vec and sumsq_vec into single values
+ // Start by reducing each vector to 8x32-bit values, hadd() to perform 8
+ // additions, sum vertically to do 4 more, then the last 2 in scalar code.
+ const __m256i ones = _mm256_load_si256((__m256i *)ones_array);
+ const __m256i partial_sum = _mm256_madd_epi16(sum_vec, ones);
+ const __m256i tmp_8x32 = _mm256_hadd_epi32(partial_sum, sumsq_vec);
+ const __m128i tmp_4x32 = _mm_add_epi32(_mm256_extracti128_si256(tmp_8x32, 0),
+ _mm256_extracti128_si256(tmp_8x32, 1));
+ const int sum =
+ _mm_extract_epi32(tmp_4x32, 0) + _mm_extract_epi32(tmp_4x32, 1);
+ const int sumsq =
+ _mm_extract_epi32(tmp_4x32, 2) + _mm_extract_epi32(tmp_4x32, 3);
+
+ *mean = (double)sum / MATCH_SZ;
+ const double variance = sumsq - (*mean) * (*mean);
+ if (variance < MIN_FEATURE_VARIANCE) {
+ *one_over_stddev = 0.0;
+ return false;
+ }
+ *one_over_stddev = 1.0 / sqrt(variance);
+ return true;
+}
+
+/* Compute corr(frame1, frame2) over a window of size MATCH_SZ by MATCH_SZ.
+ To save on computation, the mean and (1 divided by the) standard deviation
+ of the window in each frame are precomputed and passed into this function
+ as arguments.
+*/
+double aom_compute_correlation_avx2(const unsigned char *frame1, int stride1,
+ int x1, int y1, double mean1,
+ double one_over_stddev1,
+ const unsigned char *frame2, int stride2,
+ int x2, int y2, double mean2,
+ double one_over_stddev2) {
+ __m256i cross_vec = _mm256_setzero_si256();
frame1 += (y1 - MATCH_SZ_BY2) * stride1 + (x1 - MATCH_SZ_BY2);
frame2 += (y2 - MATCH_SZ_BY2) * stride2 + (x2 - MATCH_SZ_BY2);
- for (i = 0; i < MATCH_SZ; ++i) {
- v1 = _mm_and_si128(_mm_loadu_si128((__m128i *)&frame1[stride1_i]), mask);
- v1_1 = _mm256_cvtepu8_epi16(v1);
- v2 = _mm_and_si128(_mm_loadu_si128((__m128i *)&frame2[stride2_i]), mask);
- v2_1 = _mm256_cvtepu8_epi16(v2);
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ const __m256i v1 = _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i *)frame1));
+ const __m256i v2 = _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i *)frame2));
- v = _mm256_insertf128_si256(_mm256_castsi128_si256(v1), v2, 1);
- sumsq2_vec = _mm256_add_epi32(sumsq2_vec, _mm256_madd_epi16(v2_1, v2_1));
+ cross_vec = _mm256_add_epi32(cross_vec, _mm256_madd_epi16(v1, v2));
- sum_vec = _mm256_add_epi16(sum_vec, _mm256_sad_epu8(v, zero));
- cross_vec = _mm256_add_epi32(cross_vec, _mm256_madd_epi16(v1_1, v2_1));
- stride1_i += stride1;
- stride2_i += stride2;
+ frame1 += stride1;
+ frame2 += stride2;
}
- __m256i sum_vec1 = _mm256_srli_si256(sum_vec, 8);
- sum_vec = _mm256_add_epi32(sum_vec, sum_vec1);
- int sum1_acc = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_vec));
- int sum2_acc = _mm256_extract_epi32(sum_vec, 4);
-
- __m256i unp_low = _mm256_unpacklo_epi64(sumsq2_vec, cross_vec);
- __m256i unp_hig = _mm256_unpackhi_epi64(sumsq2_vec, cross_vec);
- temp1 = _mm256_add_epi32(unp_low, unp_hig);
-
- __m128i low_sumsq = _mm256_castsi256_si128(temp1);
- low_sumsq = _mm_add_epi32(low_sumsq, _mm256_extractf128_si256(temp1, 1));
- low_sumsq = _mm_add_epi32(low_sumsq, _mm_srli_epi64(low_sumsq, 32));
- int sumsq2_acc = _mm_cvtsi128_si32(low_sumsq);
- int cross_acc = _mm_extract_epi32(low_sumsq, 2);
-
- int var2 = sumsq2_acc * MATCH_SZ_SQ - sum2_acc * sum2_acc;
- int cov = cross_acc * MATCH_SZ_SQ - sum1_acc * sum2_acc;
- return cov / sqrt((double)var2);
+
+ // Sum cross_vec into a single value
+ const __m128i tmp = _mm_add_epi32(_mm256_extracti128_si256(cross_vec, 0),
+ _mm256_extracti128_si256(cross_vec, 1));
+ const int cross = _mm_extract_epi32(tmp, 0) + _mm_extract_epi32(tmp, 1) +
+ _mm_extract_epi32(tmp, 2) + _mm_extract_epi32(tmp, 3);
+
+ // Note: In theory, the calculations here "should" be
+ // covariance = cross / N^2 - mean1 * mean2
+ // correlation = covariance / (stddev1 * stddev2).
+ //
+ // However, because of the scaling in aom_compute_mean_stddev, the
+ // lines below actually calculate
+ // covariance * N^2 = cross - (mean1 * N) * (mean2 * N)
+ // correlation = (covariance * N^2) / ((stddev1 * N) * (stddev2 * N))
+ //
+ // ie. we have removed the need for a division, and still end up with the
+ // correct unscaled correlation (ie, in the range [-1, +1])
+ const double covariance = cross - mean1 * mean2;
+ const double correlation = covariance * (one_over_stddev1 * one_over_stddev2);
+ return correlation;
}
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c
index b3cb5bc5fd..bff7db6d2f 100644
--- a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c
@@ -21,84 +21,125 @@
#include "aom_ports/mem.h"
#include "aom_dsp/flow_estimation/corner_match.h"
-DECLARE_ALIGNED(16, static const uint8_t,
- byte_mask[16]) = { 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 0, 0, 0 };
-#if MATCH_SZ != 13
-#error "Need to change byte_mask in corner_match_sse4.c if MATCH_SZ != 13"
+DECLARE_ALIGNED(16, static const uint16_t, ones_array[8]) = { 1, 1, 1, 1,
+ 1, 1, 1, 1 };
+
+#if MATCH_SZ != 16
+#error "Need to apply pixel mask in corner_match_sse4.c if MATCH_SZ != 16"
#endif
-/* Compute corr(frame1, frame2) * MATCH_SZ * stddev(frame1), where the
- correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
- of each image, centered at (x1, y1) and (x2, y2) respectively.
+/* Compute mean and standard deviation of pixels in a window of size
+ MATCH_SZ by MATCH_SZ centered at (x, y).
+ Store results into *mean and *one_over_stddev
+
+ Note: The output of this function is scaled by MATCH_SZ, as in
+ *mean = MATCH_SZ * <true mean> and
+ *one_over_stddev = 1 / (MATCH_SZ * <true stddev>)
+
+ Combined with the fact that we return 1/stddev rather than the standard
+ deviation itself, this allows us to completely avoid divisions in
+ aom_compute_correlation, which is much hotter than this function is.
+
+ Returns true if this feature point is usable, false otherwise.
+*/
+bool aom_compute_mean_stddev_sse4_1(const unsigned char *frame, int stride,
+ int x, int y, double *mean,
+ double *one_over_stddev) {
+ // 8 16-bit partial sums of pixels
+ // Each lane sums at most 2*MATCH_SZ pixels, which can have values up to 255,
+ // and is therefore at most 2*MATCH_SZ*255, which is > 2^8 but < 2^16.
+ // Thus this value is safe to store in 16 bits.
+ __m128i sum_vec = _mm_setzero_si128();
+
+ // 8 32-bit partial sums of squares
+ __m128i sumsq_vec_l = _mm_setzero_si128();
+ __m128i sumsq_vec_r = _mm_setzero_si128();
+
+ frame += (y - MATCH_SZ_BY2) * stride + (x - MATCH_SZ_BY2);
+
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ const __m128i v = _mm_loadu_si128((__m128i *)frame);
+ const __m128i v_l = _mm_cvtepu8_epi16(v);
+ const __m128i v_r = _mm_cvtepu8_epi16(_mm_srli_si128(v, 8));
+
+ sum_vec = _mm_add_epi16(sum_vec, _mm_add_epi16(v_l, v_r));
+ sumsq_vec_l = _mm_add_epi32(sumsq_vec_l, _mm_madd_epi16(v_l, v_l));
+ sumsq_vec_r = _mm_add_epi32(sumsq_vec_r, _mm_madd_epi16(v_r, v_r));
+
+ frame += stride;
+ }
+
+ // Reduce sum_vec and sumsq_vec into single values
+ // Start by reducing each vector to 4x32-bit values, hadd() to perform four
+ // additions, then perform the last two additions in scalar code.
+ const __m128i ones = _mm_load_si128((__m128i *)ones_array);
+ const __m128i partial_sum = _mm_madd_epi16(sum_vec, ones);
+ const __m128i partial_sumsq = _mm_add_epi32(sumsq_vec_l, sumsq_vec_r);
+ const __m128i tmp = _mm_hadd_epi32(partial_sum, partial_sumsq);
+ const int sum = _mm_extract_epi32(tmp, 0) + _mm_extract_epi32(tmp, 1);
+ const int sumsq = _mm_extract_epi32(tmp, 2) + _mm_extract_epi32(tmp, 3);
+
+ *mean = (double)sum / MATCH_SZ;
+ const double variance = sumsq - (*mean) * (*mean);
+ if (variance < MIN_FEATURE_VARIANCE) {
+ *one_over_stddev = 0.0;
+ return false;
+ }
+ *one_over_stddev = 1.0 / sqrt(variance);
+ return true;
+}
+
+/* Compute corr(frame1, frame2) over a window of size MATCH_SZ by MATCH_SZ.
+ To save on computation, the mean and (1 divided by the) standard deviation
+ of the window in each frame are precomputed and passed into this function
+ as arguments.
*/
-double av1_compute_cross_correlation_sse4_1(const unsigned char *frame1,
- int stride1, int x1, int y1,
- const unsigned char *frame2,
- int stride2, int x2, int y2) {
- int i;
- // 2 16-bit partial sums in lanes 0, 4 (== 2 32-bit partial sums in lanes 0,
- // 2)
- __m128i sum1_vec = _mm_setzero_si128();
- __m128i sum2_vec = _mm_setzero_si128();
- // 4 32-bit partial sums of squares
- __m128i sumsq2_vec = _mm_setzero_si128();
- __m128i cross_vec = _mm_setzero_si128();
-
- const __m128i mask = _mm_load_si128((__m128i *)byte_mask);
- const __m128i zero = _mm_setzero_si128();
+double aom_compute_correlation_sse4_1(const unsigned char *frame1, int stride1,
+ int x1, int y1, double mean1,
+ double one_over_stddev1,
+ const unsigned char *frame2, int stride2,
+ int x2, int y2, double mean2,
+ double one_over_stddev2) {
+ // 8 32-bit partial sums of products
+ __m128i cross_vec_l = _mm_setzero_si128();
+ __m128i cross_vec_r = _mm_setzero_si128();
frame1 += (y1 - MATCH_SZ_BY2) * stride1 + (x1 - MATCH_SZ_BY2);
frame2 += (y2 - MATCH_SZ_BY2) * stride2 + (x2 - MATCH_SZ_BY2);
- for (i = 0; i < MATCH_SZ; ++i) {
- const __m128i v1 =
- _mm_and_si128(_mm_loadu_si128((__m128i *)&frame1[i * stride1]), mask);
- const __m128i v2 =
- _mm_and_si128(_mm_loadu_si128((__m128i *)&frame2[i * stride2]), mask);
-
- // Using the 'sad' intrinsic here is a bit faster than adding
- // v1_l + v1_r and v2_l + v2_r, plus it avoids the need for a 16->32 bit
- // conversion step later, for a net speedup of ~10%
- sum1_vec = _mm_add_epi16(sum1_vec, _mm_sad_epu8(v1, zero));
- sum2_vec = _mm_add_epi16(sum2_vec, _mm_sad_epu8(v2, zero));
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ const __m128i v1 = _mm_loadu_si128((__m128i *)frame1);
+ const __m128i v2 = _mm_loadu_si128((__m128i *)frame2);
const __m128i v1_l = _mm_cvtepu8_epi16(v1);
const __m128i v1_r = _mm_cvtepu8_epi16(_mm_srli_si128(v1, 8));
const __m128i v2_l = _mm_cvtepu8_epi16(v2);
const __m128i v2_r = _mm_cvtepu8_epi16(_mm_srli_si128(v2, 8));
- sumsq2_vec = _mm_add_epi32(
- sumsq2_vec,
- _mm_add_epi32(_mm_madd_epi16(v2_l, v2_l), _mm_madd_epi16(v2_r, v2_r)));
- cross_vec = _mm_add_epi32(
- cross_vec,
- _mm_add_epi32(_mm_madd_epi16(v1_l, v2_l), _mm_madd_epi16(v1_r, v2_r)));
+ cross_vec_l = _mm_add_epi32(cross_vec_l, _mm_madd_epi16(v1_l, v2_l));
+ cross_vec_r = _mm_add_epi32(cross_vec_r, _mm_madd_epi16(v1_r, v2_r));
+
+ frame1 += stride1;
+ frame2 += stride2;
}
- // Now we can treat the four registers (sum1_vec, sum2_vec, sumsq2_vec,
- // cross_vec)
- // as holding 4 32-bit elements each, which we want to sum horizontally.
- // We do this by transposing and then summing vertically.
- __m128i tmp_0 = _mm_unpacklo_epi32(sum1_vec, sum2_vec);
- __m128i tmp_1 = _mm_unpackhi_epi32(sum1_vec, sum2_vec);
- __m128i tmp_2 = _mm_unpacklo_epi32(sumsq2_vec, cross_vec);
- __m128i tmp_3 = _mm_unpackhi_epi32(sumsq2_vec, cross_vec);
-
- __m128i tmp_4 = _mm_unpacklo_epi64(tmp_0, tmp_2);
- __m128i tmp_5 = _mm_unpackhi_epi64(tmp_0, tmp_2);
- __m128i tmp_6 = _mm_unpacklo_epi64(tmp_1, tmp_3);
- __m128i tmp_7 = _mm_unpackhi_epi64(tmp_1, tmp_3);
-
- __m128i res =
- _mm_add_epi32(_mm_add_epi32(tmp_4, tmp_5), _mm_add_epi32(tmp_6, tmp_7));
-
- int sum1 = _mm_extract_epi32(res, 0);
- int sum2 = _mm_extract_epi32(res, 1);
- int sumsq2 = _mm_extract_epi32(res, 2);
- int cross = _mm_extract_epi32(res, 3);
-
- int var2 = sumsq2 * MATCH_SZ_SQ - sum2 * sum2;
- int cov = cross * MATCH_SZ_SQ - sum1 * sum2;
- return cov / sqrt((double)var2);
+ // Sum cross_vec into a single value
+ const __m128i tmp = _mm_add_epi32(cross_vec_l, cross_vec_r);
+ const int cross = _mm_extract_epi32(tmp, 0) + _mm_extract_epi32(tmp, 1) +
+ _mm_extract_epi32(tmp, 2) + _mm_extract_epi32(tmp, 3);
+
+ // Note: In theory, the calculations here "should" be
+ // covariance = cross / N^2 - mean1 * mean2
+ // correlation = covariance / (stddev1 * stddev2).
+ //
+ // However, because of the scaling in aom_compute_mean_stddev, the
+ // lines below actually calculate
+ // covariance * N^2 = cross - (mean1 * N) * (mean2 * N)
+ // correlation = (covariance * N^2) / ((stddev1 * N) * (stddev2 * N))
+ //
+ // ie. we have removed the need for a division, and still end up with the
+ // correct unscaled correlation (ie, in the range [-1, +1])
+ const double covariance = cross - mean1 * mean2;
+ const double correlation = covariance * (one_over_stddev1 * one_over_stddev2);
+ return correlation;
}
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/disflow_avx2.c b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_avx2.c
new file mode 100644
index 0000000000..ad5a1bd7c6
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_avx2.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <math.h>
+#include <immintrin.h>
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/flow_estimation/disflow.h"
+#include "aom_dsp/x86/synonyms.h"
+#include "aom_dsp/x86/synonyms_avx2.h"
+
+#include "config/aom_dsp_rtcd.h"
+
+#if DISFLOW_PATCH_SIZE != 8
+#error "Need to change disflow_avx2.c if DISFLOW_PATCH_SIZE != 8"
+#endif
+
+// Compute horizontal and vertical kernels and return them packed into a
+// register. The coefficient ordering is:
+// h0, h1, v0, v1, h2, h3, v2, v3
+// This is chosen because it takes less work than fully separating the kernels,
+// but it is separated enough that we can pick out each coefficient pair in the
+// main compute_flow_at_point function
+static INLINE __m128i compute_cubic_kernels(double u, double v) {
+ const __m128d x = _mm_set_pd(v, u);
+
+ const __m128d x2 = _mm_mul_pd(x, x);
+ const __m128d x3 = _mm_mul_pd(x2, x);
+
+ // Macro to multiply a value v by a constant coefficient c
+#define MULC(c, v) _mm_mul_pd(_mm_set1_pd(c), v)
+
+ // Compute floating-point kernel
+ // Note: To ensure results are bit-identical to the C code, we need to perform
+ // exactly the same sequence of operations here as in the C code.
+ __m128d k0 = _mm_sub_pd(_mm_add_pd(MULC(-0.5, x), x2), MULC(0.5, x3));
+ __m128d k1 =
+ _mm_add_pd(_mm_sub_pd(_mm_set1_pd(1.0), MULC(2.5, x2)), MULC(1.5, x3));
+ __m128d k2 =
+ _mm_sub_pd(_mm_add_pd(MULC(0.5, x), MULC(2.0, x2)), MULC(1.5, x3));
+ __m128d k3 = _mm_add_pd(MULC(-0.5, x2), MULC(0.5, x3));
+#undef MULC
+
+ // Integerize
+ __m128d prec = _mm_set1_pd((double)(1 << DISFLOW_INTERP_BITS));
+
+ k0 = _mm_round_pd(_mm_mul_pd(k0, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k1 = _mm_round_pd(_mm_mul_pd(k1, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k2 = _mm_round_pd(_mm_mul_pd(k2, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k3 = _mm_round_pd(_mm_mul_pd(k3, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ const __m128i c0 = _mm_cvtpd_epi32(k0);
+ const __m128i c1 = _mm_cvtpd_epi32(k1);
+ const __m128i c2 = _mm_cvtpd_epi32(k2);
+ const __m128i c3 = _mm_cvtpd_epi32(k3);
+
+ // Rearrange results and convert down to 16 bits, giving the target output
+ // ordering
+ const __m128i c01 = _mm_unpacklo_epi32(c0, c1);
+ const __m128i c23 = _mm_unpacklo_epi32(c2, c3);
+ return _mm_packs_epi32(c01, c23);
+}
+
+// Compare two regions of width x height pixels, one rooted at position
+// (x, y) in src and the other at (x + u, y + v) in ref.
+// This function returns the sum of squared pixel differences between
+// the two regions.
+//
+// TODO(rachelbarker): Test speed/quality impact of using bilinear interpolation
+// instad of bicubic interpolation
+static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
+ int width, int height, int stride, int x,
+ int y, double u, double v,
+ const int16_t *dx, const int16_t *dy,
+ int *b) {
+ const __m256i zero = _mm256_setzero_si256();
+
+ // Accumulate 8 32-bit partial sums for each element of b
+ // These will be flattened at the end.
+ __m256i b0_acc = _mm256_setzero_si256();
+ __m256i b1_acc = _mm256_setzero_si256();
+
+ // Split offset into integer and fractional parts, and compute cubic
+ // interpolation kernels
+ const int u_int = (int)floor(u);
+ const int v_int = (int)floor(v);
+ const double u_frac = u - floor(u);
+ const double v_frac = v - floor(v);
+
+ const __m128i kernels = compute_cubic_kernels(u_frac, v_frac);
+
+ // Storage for intermediate values between the two convolution directions
+ // In the AVX2 implementation, this needs a dummy row at the end, because
+ // we generate 2 rows at a time but the total number of rows is odd.
+ // So we generate one more row than we actually need.
+ DECLARE_ALIGNED(32, int16_t,
+ tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 4)]);
+ int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE; // Offset by one row
+
+ // Clamp coordinates so that all pixels we fetch will remain within the
+ // allocated border region, but allow them to go far enough out that
+ // the border pixels' values do not change.
+ // Since we are calculating an 8x8 block, the bottom-right pixel
+ // in the block has coordinates (x0 + 7, y0 + 7). Then, the cubic
+ // interpolation has 4 taps, meaning that the output of pixel
+ // (x_w, y_w) depends on the pixels in the range
+ // ([x_w - 1, x_w + 2], [y_w - 1, y_w + 2]).
+ //
+ // Thus the most extreme coordinates which will be fetched are
+ // (x0 - 1, y0 - 1) and (x0 + 9, y0 + 9).
+ const int x0 = clamp(x + u_int, -9, width);
+ const int y0 = clamp(y + v_int, -9, height);
+
+ // Horizontal convolution
+
+ // Prepare the kernel vectors
+ // We split the kernel into two vectors with kernel indices:
+ // 0, 1, 0, 1, 0, 1, 0, 1, and
+ // 2, 3, 2, 3, 2, 3, 2, 3
+ __m256i h_kernel_01 = _mm256_broadcastd_epi32(kernels);
+ __m256i h_kernel_23 = _mm256_broadcastd_epi32(_mm_srli_si128(kernels, 8));
+
+ __m256i round_const_h = _mm256_set1_epi32(1 << (DISFLOW_INTERP_BITS - 6 - 1));
+
+ for (int i = -1; i < DISFLOW_PATCH_SIZE + 2; i += 2) {
+ const int y_w = y0 + i;
+ const uint8_t *ref_row = &ref[y_w * stride + (x0 - 1)];
+ int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
+
+ // Load this row of pixels.
+ // For an 8x8 patch, we need to load the 8 image pixels + 3 extras,
+ // for a total of 11 pixels. Here we load 16 pixels, but only use
+ // the first 11.
+ __m256i row =
+ yy_loadu2_128((__m128i *)(ref_row + stride), (__m128i *)ref_row);
+
+ // Expand pixels to int16s
+ // We must use unpacks here, as we have one row in each 128-bit lane
+ // and want to handle each of those independently.
+ // This is in contrast to _mm256_cvtepu8_epi16(), which takes a single
+ // 128-bit input and widens it to 256 bits.
+ __m256i px_0to7_i16 = _mm256_unpacklo_epi8(row, zero);
+ __m256i px_4to10_i16 =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row, 4), zero);
+
+ // Compute first four outputs
+ // input pixels 0, 1, 1, 2, 2, 3, 3, 4
+ // * kernel 0, 1, 0, 1, 0, 1, 0, 1
+ __m256i px0 =
+ _mm256_unpacklo_epi16(px_0to7_i16, _mm256_srli_si256(px_0to7_i16, 2));
+ // input pixels 2, 3, 3, 4, 4, 5, 5, 6
+ // * kernel 2, 3, 2, 3, 2, 3, 2, 3
+ __m256i px1 = _mm256_unpacklo_epi16(_mm256_srli_si256(px_0to7_i16, 4),
+ _mm256_srli_si256(px_0to7_i16, 6));
+ // Convolve with kernel and sum 2x2 boxes to form first 4 outputs
+ __m256i sum0 = _mm256_add_epi32(_mm256_madd_epi16(px0, h_kernel_01),
+ _mm256_madd_epi16(px1, h_kernel_23));
+
+ __m256i out0 = _mm256_srai_epi32(_mm256_add_epi32(sum0, round_const_h),
+ DISFLOW_INTERP_BITS - 6);
+
+ // Compute second four outputs
+ __m256i px2 =
+ _mm256_unpacklo_epi16(px_4to10_i16, _mm256_srli_si256(px_4to10_i16, 2));
+ __m256i px3 = _mm256_unpacklo_epi16(_mm256_srli_si256(px_4to10_i16, 4),
+ _mm256_srli_si256(px_4to10_i16, 6));
+ __m256i sum1 = _mm256_add_epi32(_mm256_madd_epi16(px2, h_kernel_01),
+ _mm256_madd_epi16(px3, h_kernel_23));
+
+ // Round by just enough bits that the result is
+ // guaranteed to fit into an i16. Then the next stage can use 16 x 16 -> 32
+ // bit multiplies, which should be a fair bit faster than 32 x 32 -> 32
+ // as it does now
+ // This means shifting down so we have 6 extra bits, for a maximum value
+ // of +18360, which can occur if u_frac == 0.5 and the input pixels are
+ // {0, 255, 255, 0}.
+ __m256i out1 = _mm256_srai_epi32(_mm256_add_epi32(sum1, round_const_h),
+ DISFLOW_INTERP_BITS - 6);
+
+ _mm256_storeu_si256((__m256i *)tmp_row, _mm256_packs_epi32(out0, out1));
+ }
+
+ // Vertical convolution
+ const int round_bits = DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2;
+ __m256i round_const_v = _mm256_set1_epi32(1 << (round_bits - 1));
+
+ __m256i v_kernel_01 = _mm256_broadcastd_epi32(_mm_srli_si128(kernels, 4));
+ __m256i v_kernel_23 = _mm256_broadcastd_epi32(_mm_srli_si128(kernels, 12));
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i += 2) {
+ int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
+
+ // Load 5 rows of 8 x 16-bit values, and pack into 4 registers
+ // holding rows {0, 1}, {1, 2}, {2, 3}, {3, 4}
+ __m128i row0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
+ __m128i row1 = _mm_loadu_si128((__m128i *)tmp_row);
+ __m128i row2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
+ __m128i row3 =
+ _mm_loadu_si128((__m128i *)(tmp_row + 2 * DISFLOW_PATCH_SIZE));
+ __m128i row4 =
+ _mm_loadu_si128((__m128i *)(tmp_row + 3 * DISFLOW_PATCH_SIZE));
+
+ __m256i px0 = _mm256_set_m128i(row1, row0);
+ __m256i px1 = _mm256_set_m128i(row2, row1);
+ __m256i px2 = _mm256_set_m128i(row3, row2);
+ __m256i px3 = _mm256_set_m128i(row4, row3);
+
+ // We want to calculate px0 * v_kernel[0] + px1 * v_kernel[1] + ... ,
+ // but each multiply expands its output to 32 bits. So we need to be
+ // a little clever about how we do this
+ __m256i sum0 = _mm256_add_epi32(
+ _mm256_madd_epi16(_mm256_unpacklo_epi16(px0, px1), v_kernel_01),
+ _mm256_madd_epi16(_mm256_unpacklo_epi16(px2, px3), v_kernel_23));
+ __m256i sum1 = _mm256_add_epi32(
+ _mm256_madd_epi16(_mm256_unpackhi_epi16(px0, px1), v_kernel_01),
+ _mm256_madd_epi16(_mm256_unpackhi_epi16(px2, px3), v_kernel_23));
+
+ __m256i sum0_rounded =
+ _mm256_srai_epi32(_mm256_add_epi32(sum0, round_const_v), round_bits);
+ __m256i sum1_rounded =
+ _mm256_srai_epi32(_mm256_add_epi32(sum1, round_const_v), round_bits);
+
+ __m256i warped = _mm256_packs_epi32(sum0_rounded, sum1_rounded);
+ __m128i src_pixels_u8 = xx_loadu_2x64(&src[(y + i + 1) * stride + x],
+ &src[(y + i) * stride + x]);
+ __m256i src_pixels =
+ _mm256_slli_epi16(_mm256_cvtepu8_epi16(src_pixels_u8), 3);
+
+ // Calculate delta from the target patch
+ __m256i dt = _mm256_sub_epi16(warped, src_pixels);
+
+ // Load 2x8 elements each of dx and dt, to pair with the 2x8 elements of dt
+ // that we have just computed. Then compute 2x8 partial sums of dx * dt
+ // and dy * dt, implicitly sum to give 2x4 partial sums of each, and
+ // accumulate.
+ __m256i dx_row = _mm256_loadu_si256((__m256i *)&dx[i * DISFLOW_PATCH_SIZE]);
+ __m256i dy_row = _mm256_loadu_si256((__m256i *)&dy[i * DISFLOW_PATCH_SIZE]);
+ b0_acc = _mm256_add_epi32(b0_acc, _mm256_madd_epi16(dx_row, dt));
+ b1_acc = _mm256_add_epi32(b1_acc, _mm256_madd_epi16(dy_row, dt));
+ }
+
+ // Flatten the two sets of partial sums to find the final value of b
+ // We need to set b[0] = sum(b0_acc), b[1] = sum(b1_acc).
+ // We need to do 14 additions in total; a `hadd` instruction can take care
+ // of eight of them, then a vertical sum can do four more, leaving two
+ // scalar additions.
+ __m256i partial_sum_256 = _mm256_hadd_epi32(b0_acc, b1_acc);
+ __m128i partial_sum =
+ _mm_add_epi32(_mm256_extracti128_si256(partial_sum_256, 0),
+ _mm256_extracti128_si256(partial_sum_256, 1));
+ b[0] = _mm_extract_epi32(partial_sum, 0) + _mm_extract_epi32(partial_sum, 1);
+ b[1] = _mm_extract_epi32(partial_sum, 2) + _mm_extract_epi32(partial_sum, 3);
+}
+
+// Compute the x and y gradients of the source patch in a single pass,
+// and store into dx and dy respectively.
+static INLINE void sobel_filter(const uint8_t *src, int src_stride, int16_t *dx,
+ int16_t *dy) {
+ const __m256i zero = _mm256_setzero_si256();
+
+ // Loop setup: Load the first two rows (of 10 input rows) and apply
+ // the horizontal parts of the two filters
+ __m256i row_m1_0 =
+ yy_loadu2_128((__m128i *)(src - 1), (__m128i *)(src - src_stride - 1));
+ __m256i row_m1_0_a = _mm256_unpacklo_epi8(row_m1_0, zero);
+ __m256i row_m1_0_b =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row_m1_0, 1), zero);
+ __m256i row_m1_0_c =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row_m1_0, 2), zero);
+
+ __m256i row_m1_0_hsmooth =
+ _mm256_add_epi16(_mm256_add_epi16(row_m1_0_a, row_m1_0_c),
+ _mm256_slli_epi16(row_m1_0_b, 1));
+ __m256i row_m1_0_hdiff = _mm256_sub_epi16(row_m1_0_a, row_m1_0_c);
+
+ // Main loop: For each pair of output rows (i, i+1):
+ // * Load rows (i+1, i+2) and apply both horizontal filters
+ // * Apply vertical filters and store results
+ // * Shift rows for next iteration
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i += 2) {
+ // Load rows (i+1, i+2) and apply both horizontal filters
+ const __m256i row_p1_p2 =
+ yy_loadu2_128((__m128i *)(src + (i + 2) * src_stride - 1),
+ (__m128i *)(src + (i + 1) * src_stride - 1));
+ const __m256i row_p1_p2_a = _mm256_unpacklo_epi8(row_p1_p2, zero);
+ const __m256i row_p1_p2_b =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row_p1_p2, 1), zero);
+ const __m256i row_p1_p2_c =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row_p1_p2, 2), zero);
+
+ const __m256i row_p1_p2_hsmooth =
+ _mm256_add_epi16(_mm256_add_epi16(row_p1_p2_a, row_p1_p2_c),
+ _mm256_slli_epi16(row_p1_p2_b, 1));
+ const __m256i row_p1_p2_hdiff = _mm256_sub_epi16(row_p1_p2_a, row_p1_p2_c);
+
+ // Apply vertical filters and store results
+ // dx = vertical smooth(horizontal diff(input))
+ // dy = vertical diff(horizontal smooth(input))
+ const __m256i row_0_p1_hdiff =
+ _mm256_permute2x128_si256(row_m1_0_hdiff, row_p1_p2_hdiff, 0x21);
+ const __m256i dx_row =
+ _mm256_add_epi16(_mm256_add_epi16(row_m1_0_hdiff, row_p1_p2_hdiff),
+ _mm256_slli_epi16(row_0_p1_hdiff, 1));
+ const __m256i dy_row =
+ _mm256_sub_epi16(row_m1_0_hsmooth, row_p1_p2_hsmooth);
+
+ _mm256_storeu_si256((__m256i *)(dx + i * DISFLOW_PATCH_SIZE), dx_row);
+ _mm256_storeu_si256((__m256i *)(dy + i * DISFLOW_PATCH_SIZE), dy_row);
+
+ // Shift rows for next iteration
+ // This allows a lot of work to be reused, reducing the number of
+ // horizontal filtering operations from 2*3*8 = 48 to 2*10 = 20
+ row_m1_0_hsmooth = row_p1_p2_hsmooth;
+ row_m1_0_hdiff = row_p1_p2_hdiff;
+ }
+}
+
+static INLINE void compute_flow_matrix(const int16_t *dx, int dx_stride,
+ const int16_t *dy, int dy_stride,
+ double *M) {
+ __m256i acc[4] = { 0 };
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i += 2) {
+ __m256i dx_row = _mm256_loadu_si256((__m256i *)&dx[i * dx_stride]);
+ __m256i dy_row = _mm256_loadu_si256((__m256i *)&dy[i * dy_stride]);
+
+ acc[0] = _mm256_add_epi32(acc[0], _mm256_madd_epi16(dx_row, dx_row));
+ acc[1] = _mm256_add_epi32(acc[1], _mm256_madd_epi16(dx_row, dy_row));
+ // Don't compute acc[2], as it should be equal to acc[1]
+ acc[3] = _mm256_add_epi32(acc[3], _mm256_madd_epi16(dy_row, dy_row));
+ }
+
+ // Condense sums
+ __m256i partial_sum_0 = _mm256_hadd_epi32(acc[0], acc[1]);
+ __m256i partial_sum_1 = _mm256_hadd_epi32(acc[1], acc[3]);
+ __m256i result_256 = _mm256_hadd_epi32(partial_sum_0, partial_sum_1);
+ __m128i result = _mm_add_epi32(_mm256_extracti128_si256(result_256, 0),
+ _mm256_extracti128_si256(result_256, 1));
+
+ // Apply regularization
+ // We follow the standard regularization method of adding `k * I` before
+ // inverting. This ensures that the matrix will be invertible.
+ //
+ // Setting the regularization strength k to 1 seems to work well here, as
+ // typical values coming from the other equations are very large (1e5 to
+ // 1e6, with an upper limit of around 6e7, at the time of writing).
+ // It also preserves the property that all matrix values are whole numbers,
+ // which is convenient for integerized SIMD implementation.
+ result = _mm_add_epi32(result, _mm_set_epi32(1, 0, 0, 1));
+
+ // Convert results to doubles and store
+ _mm256_storeu_pd(M, _mm256_cvtepi32_pd(result));
+}
+
+// Try to invert the matrix M
+// Note: Due to the nature of how a least-squares matrix is constructed, all of
+// the eigenvalues will be >= 0, and therefore det M >= 0 as well.
+// The regularization term `+ k * I` further ensures that det M >= k^2.
+// As mentioned in compute_flow_matrix(), here we use k = 1, so det M >= 1.
+// So we don't have to worry about non-invertible matrices here.
+static INLINE void invert_2x2(const double *M, double *M_inv) {
+ double det = (M[0] * M[3]) - (M[1] * M[2]);
+ assert(det >= 1);
+ const double det_inv = 1 / det;
+
+ M_inv[0] = M[3] * det_inv;
+ M_inv[1] = -M[1] * det_inv;
+ M_inv[2] = -M[2] * det_inv;
+ M_inv[3] = M[0] * det_inv;
+}
+
+void aom_compute_flow_at_point_avx2(const uint8_t *src, const uint8_t *ref,
+ int x, int y, int width, int height,
+ int stride, double *u, double *v) {
+ DECLARE_ALIGNED(32, double, M[4]);
+ DECLARE_ALIGNED(32, double, M_inv[4]);
+ DECLARE_ALIGNED(32, int16_t, dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
+ DECLARE_ALIGNED(32, int16_t, dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
+ int b[2];
+
+ // Compute gradients within this patch
+ const uint8_t *src_patch = &src[y * stride + x];
+ sobel_filter(src_patch, stride, dx, dy);
+
+ compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M);
+ invert_2x2(M, M_inv);
+
+ for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) {
+ compute_flow_vector(src, ref, width, height, stride, x, y, *u, *v, dx, dy,
+ b);
+
+ // Solve flow equations to find a better estimate for the flow vector
+ // at this point
+ const double step_u = M_inv[0] * b[0] + M_inv[1] * b[1];
+ const double step_v = M_inv[2] * b[0] + M_inv[3] * b[1];
+ *u += fclamp(step_u * DISFLOW_STEP_SIZE, -2, 2);
+ *v += fclamp(step_v * DISFLOW_STEP_SIZE, -2, 2);
+
+ if (fabs(step_u) + fabs(step_v) < DISFLOW_STEP_SIZE_THRESOLD) {
+ // Stop iteration when we're close to convergence
+ break;
+ }
+ }
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c
index 2c5effd638..e0a4bd040c 100644
--- a/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c
@@ -1,13 +1,12 @@
/*
- * Copyright (c) 2022, Alliance for Open Media. All rights reserved
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
*
- * This source code is subject to the terms of the BSD 3-Clause Clear License
- * and the Alliance for Open Media Patent License 1.0. If the BSD 3-Clause Clear
- * License was not distributed with this source code in the LICENSE file, you
- * can obtain it at aomedia.org/license/software-license/bsd-3-c-c/. If the
- * Alliance for Open Media Patent License 1.0 was not distributed with this
- * source code in the PATENTS file, you can obtain it at
- * aomedia.org/license/patent-license/.
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <assert.h>
@@ -20,46 +19,59 @@
#include "config/aom_dsp_rtcd.h"
-// Internal cross-check against C code
-// If you set this to 1 and compile in debug mode, then the outputs of the two
-// convolution stages will be checked against the plain C version of the code,
-// and an assertion will be fired if the results differ.
-#define CHECK_RESULTS 0
-
-// Note: Max sum(+ve coefficients) = 1.125 * scale
-static INLINE void get_cubic_kernel_dbl(double x, double kernel[4]) {
- // Check that the fractional position is in range.
- //
- // Note: x is calculated from, e.g., `u_frac = u - floor(u)`.
- // Mathematically, this implies that 0 <= x < 1. However, in practice it is
- // possible to have x == 1 due to floating point rounding. This is fine,
- // and we still interpolate correctly if we allow x = 1.
- assert(0 <= x && x <= 1);
-
- double x2 = x * x;
- double x3 = x2 * x;
- kernel[0] = -0.5 * x + x2 - 0.5 * x3;
- kernel[1] = 1.0 - 2.5 * x2 + 1.5 * x3;
- kernel[2] = 0.5 * x + 2.0 * x2 - 1.5 * x3;
- kernel[3] = -0.5 * x2 + 0.5 * x3;
-}
-
-static INLINE void get_cubic_kernel_int(double x, int16_t kernel[4]) {
- double kernel_dbl[4];
- get_cubic_kernel_dbl(x, kernel_dbl);
-
- kernel[0] = (int16_t)rint(kernel_dbl[0] * (1 << DISFLOW_INTERP_BITS));
- kernel[1] = (int16_t)rint(kernel_dbl[1] * (1 << DISFLOW_INTERP_BITS));
- kernel[2] = (int16_t)rint(kernel_dbl[2] * (1 << DISFLOW_INTERP_BITS));
- kernel[3] = (int16_t)rint(kernel_dbl[3] * (1 << DISFLOW_INTERP_BITS));
-}
-
-#if CHECK_RESULTS
-static INLINE int get_cubic_value_int(const int *p, const int16_t kernel[4]) {
- return kernel[0] * p[0] + kernel[1] * p[1] + kernel[2] * p[2] +
- kernel[3] * p[3];
+#if DISFLOW_PATCH_SIZE != 8
+#error "Need to change disflow_sse4.c if DISFLOW_PATCH_SIZE != 8"
+#endif
+
+// Compute horizontal and vertical kernels and return them packed into a
+// register. The coefficient ordering is:
+// h0, h1, v0, v1, h2, h3, v2, v3
+// This is chosen because it takes less work than fully separating the kernels,
+// but it is separated enough that we can pick out each coefficient pair in the
+// main compute_flow_at_point function
+static INLINE __m128i compute_cubic_kernels(double u, double v) {
+ const __m128d x = _mm_set_pd(v, u);
+
+ const __m128d x2 = _mm_mul_pd(x, x);
+ const __m128d x3 = _mm_mul_pd(x2, x);
+
+ // Macro to multiply a value v by a constant coefficient c
+#define MULC(c, v) _mm_mul_pd(_mm_set1_pd(c), v)
+
+ // Compute floating-point kernel
+ // Note: To ensure results are bit-identical to the C code, we need to perform
+ // exactly the same sequence of operations here as in the C code.
+ __m128d k0 = _mm_sub_pd(_mm_add_pd(MULC(-0.5, x), x2), MULC(0.5, x3));
+ __m128d k1 =
+ _mm_add_pd(_mm_sub_pd(_mm_set1_pd(1.0), MULC(2.5, x2)), MULC(1.5, x3));
+ __m128d k2 =
+ _mm_sub_pd(_mm_add_pd(MULC(0.5, x), MULC(2.0, x2)), MULC(1.5, x3));
+ __m128d k3 = _mm_add_pd(MULC(-0.5, x2), MULC(0.5, x3));
+#undef MULC
+
+ // Integerize
+ __m128d prec = _mm_set1_pd((double)(1 << DISFLOW_INTERP_BITS));
+
+ k0 = _mm_round_pd(_mm_mul_pd(k0, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k1 = _mm_round_pd(_mm_mul_pd(k1, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k2 = _mm_round_pd(_mm_mul_pd(k2, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k3 = _mm_round_pd(_mm_mul_pd(k3, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ const __m128i c0 = _mm_cvtpd_epi32(k0);
+ const __m128i c1 = _mm_cvtpd_epi32(k1);
+ const __m128i c2 = _mm_cvtpd_epi32(k2);
+ const __m128i c3 = _mm_cvtpd_epi32(k3);
+
+ // Rearrange results and convert down to 16 bits, giving the target output
+ // ordering
+ const __m128i c01 = _mm_unpacklo_epi32(c0, c1);
+ const __m128i c23 = _mm_unpacklo_epi32(c2, c3);
+ return _mm_packs_epi32(c01, c23);
}
-#endif // CHECK_RESULTS
// Compare two regions of width x height pixels, one rooted at position
// (x, y) in src and the other at (x + u, y + v) in ref.
@@ -80,10 +92,6 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
// These will be flattened at the end.
__m128i b0_acc = _mm_setzero_si128();
__m128i b1_acc = _mm_setzero_si128();
-#if CHECK_RESULTS
- // Also keep a running sum using the C algorithm, for cross-checking
- int c_result[2] = { 0 };
-#endif // CHECK_RESULTS
// Split offset into integer and fractional parts, and compute cubic
// interpolation kernels
@@ -92,13 +100,11 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
const double u_frac = u - floor(u);
const double v_frac = v - floor(v);
- int16_t h_kernel[4];
- int16_t v_kernel[4];
- get_cubic_kernel_int(u_frac, h_kernel);
- get_cubic_kernel_int(v_frac, v_kernel);
+ const __m128i kernels = compute_cubic_kernels(u_frac, v_frac);
// Storage for intermediate values between the two convolution directions
- int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 3)];
+ DECLARE_ALIGNED(16, int16_t,
+ tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 3)]);
int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE; // Offset by one row
// Clamp coordinates so that all pixels we fetch will remain within the
@@ -121,8 +127,8 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
// We split the kernel into two vectors with kernel indices:
// 0, 1, 0, 1, 0, 1, 0, 1, and
// 2, 3, 2, 3, 2, 3, 2, 3
- __m128i h_kernel_01 = xx_set2_epi16(h_kernel[0], h_kernel[1]);
- __m128i h_kernel_23 = xx_set2_epi16(h_kernel[2], h_kernel[3]);
+ __m128i h_kernel_01 = _mm_set1_epi32(_mm_extract_epi32(kernels, 0));
+ __m128i h_kernel_23 = _mm_set1_epi32(_mm_extract_epi32(kernels, 2));
__m128i round_const_h = _mm_set1_epi32(1 << (DISFLOW_INTERP_BITS - 6 - 1));
@@ -141,10 +147,6 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
__m128i px_0to7_i16 = _mm_cvtepu8_epi16(row);
__m128i px_4to10_i16 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 4));
- // Relevant multiply instruction
- // This multiplies pointwise, then sums in pairs.
- //_mm_madd_epi16();
-
// Compute first four outputs
// input pixels 0, 1, 1, 2, 2, 3, 3, 4
// * kernel 0, 1, 0, 1, 0, 1, 0, 1
@@ -180,43 +182,14 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
DISFLOW_INTERP_BITS - 6);
_mm_storeu_si128((__m128i *)tmp_row, _mm_packs_epi32(out0, out1));
-
-#if CHECK_RESULTS && !defined(NDEBUG)
- // Cross-check
- for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) {
- const int x_w = x0 + j;
- int arr[4];
-
- arr[0] = (int)ref[y_w * stride + (x_w - 1)];
- arr[1] = (int)ref[y_w * stride + (x_w + 0)];
- arr[2] = (int)ref[y_w * stride + (x_w + 1)];
- arr[3] = (int)ref[y_w * stride + (x_w + 2)];
-
- // Apply kernel and round, keeping 6 extra bits of precision.
- //
- // 6 is the maximum allowable number of extra bits which will avoid
- // the intermediate values overflowing an int16_t. The most extreme
- // intermediate value occurs when:
- // * The input pixels are [0, 255, 255, 0]
- // * u_frac = 0.5
- // In this case, the un-scaled output is 255 * 1.125 = 286.875.
- // As an integer with 6 fractional bits, that is 18360, which fits
- // in an int16_t. But with 7 fractional bits it would be 36720,
- // which is too large.
- const int c_value = ROUND_POWER_OF_TWO(get_cubic_value_int(arr, h_kernel),
- DISFLOW_INTERP_BITS - 6);
- (void)c_value; // Suppress warnings
- assert(tmp_row[j] == c_value);
- }
-#endif // CHECK_RESULTS
}
// Vertical convolution
const int round_bits = DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2;
__m128i round_const_v = _mm_set1_epi32(1 << (round_bits - 1));
- __m128i v_kernel_01 = xx_set2_epi16(v_kernel[0], v_kernel[1]);
- __m128i v_kernel_23 = xx_set2_epi16(v_kernel[2], v_kernel[3]);
+ __m128i v_kernel_01 = _mm_set1_epi32(_mm_extract_epi32(kernels, 1));
+ __m128i v_kernel_23 = _mm_set1_epi32(_mm_extract_epi32(kernels, 3));
for (int i = 0; i < DISFLOW_PATCH_SIZE; ++i) {
int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
@@ -259,30 +232,6 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
__m128i dy_row = _mm_loadu_si128((__m128i *)&dy[i * DISFLOW_PATCH_SIZE]);
b0_acc = _mm_add_epi32(b0_acc, _mm_madd_epi16(dx_row, dt));
b1_acc = _mm_add_epi32(b1_acc, _mm_madd_epi16(dy_row, dt));
-
-#if CHECK_RESULTS
- int16_t dt_arr[8];
- memcpy(dt_arr, &dt, 8 * sizeof(*dt_arr));
- for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) {
- int16_t *p = &tmp[i * DISFLOW_PATCH_SIZE + j];
- int arr[4] = { p[-DISFLOW_PATCH_SIZE], p[0], p[DISFLOW_PATCH_SIZE],
- p[2 * DISFLOW_PATCH_SIZE] };
- const int result = get_cubic_value_int(arr, v_kernel);
-
- // Apply kernel and round.
- // This time, we have to round off the 6 extra bits which were kept
- // earlier, but we also want to keep DISFLOW_DERIV_SCALE_LOG2 extra bits
- // of precision to match the scale of the dx and dy arrays.
- const int c_warped = ROUND_POWER_OF_TWO(result, round_bits);
- const int c_src_px = src[(x + j) + (y + i) * stride] << 3;
- const int c_dt = c_warped - c_src_px;
-
- assert(dt_arr[j] == c_dt);
-
- c_result[0] += dx[i * DISFLOW_PATCH_SIZE + j] * c_dt;
- c_result[1] += dy[i * DISFLOW_PATCH_SIZE + j] * c_dt;
- }
-#endif // CHECK_RESULTS
}
// Flatten the two sets of partial sums to find the final value of b
@@ -292,156 +241,66 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
__m128i partial_sum = _mm_hadd_epi32(b0_acc, b1_acc);
b[0] = _mm_extract_epi32(partial_sum, 0) + _mm_extract_epi32(partial_sum, 1);
b[1] = _mm_extract_epi32(partial_sum, 2) + _mm_extract_epi32(partial_sum, 3);
-
-#if CHECK_RESULTS
- assert(b[0] == c_result[0]);
- assert(b[1] == c_result[1]);
-#endif // CHECK_RESULTS
}
-static INLINE void sobel_filter_x(const uint8_t *src, int src_stride,
- int16_t *dst, int dst_stride) {
- int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)];
- int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE;
-#if CHECK_RESULTS
- const int taps = 3;
-#endif // CHECK_RESULTS
-
- // Horizontal filter
- // As the kernel is simply {1, 0, -1}, we implement this as simply
- // out[x] = image[x-1] - image[x+1]
- // rather than doing a "proper" convolution operation
- for (int y = -1; y < DISFLOW_PATCH_SIZE + 1; ++y) {
- const uint8_t *src_row = src + y * src_stride;
- int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
-
- // Load pixels and expand to 16 bits
- __m128i row = _mm_loadu_si128((__m128i *)(src_row - 1));
- __m128i px0 = _mm_cvtepu8_epi16(row);
- __m128i px2 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 2));
-
- __m128i out = _mm_sub_epi16(px0, px2);
-
- // Store to intermediate array
- _mm_storeu_si128((__m128i *)tmp_row, out);
-
-#if CHECK_RESULTS
- // Cross-check
- static const int16_t h_kernel[3] = { 1, 0, -1 };
- for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
- int sum = 0;
- for (int k = 0; k < taps; ++k) {
- sum += h_kernel[k] * src_row[x + k - 1];
- }
- (void)sum;
- assert(tmp_row[x] == sum);
- }
-#endif // CHECK_RESULTS
- }
-
- // Vertical filter
- // Here the kernel is {1, 2, 1}, which can be implemented
- // with simple sums rather than multiplies and adds.
- // In order to minimize dependency chains, we evaluate in the order
- // (image[y - 1] + image[y + 1]) + (image[y] << 1)
- // This way, the first addition and the shift can happen in parallel
- for (int y = 0; y < DISFLOW_PATCH_SIZE; ++y) {
- const int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
- int16_t *dst_row = dst + y * dst_stride;
-
- __m128i px0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
- __m128i px1 = _mm_loadu_si128((__m128i *)tmp_row);
- __m128i px2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
-
- __m128i out =
- _mm_add_epi16(_mm_add_epi16(px0, px2), _mm_slli_epi16(px1, 1));
-
- _mm_storeu_si128((__m128i *)dst_row, out);
-
-#if CHECK_RESULTS
- static const int16_t v_kernel[3] = { 1, 2, 1 };
- for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
- int sum = 0;
- for (int k = 0; k < taps; ++k) {
- sum += v_kernel[k] * tmp[(y + k - 1) * DISFLOW_PATCH_SIZE + x];
- }
- (void)sum;
- assert(dst_row[x] == sum);
- }
-#endif // CHECK_RESULTS
- }
-}
-
-static INLINE void sobel_filter_y(const uint8_t *src, int src_stride,
- int16_t *dst, int dst_stride) {
- int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)];
- int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE;
-#if CHECK_RESULTS
- const int taps = 3;
-#endif // CHECK_RESULTS
-
- // Horizontal filter
- // Here the kernel is {1, 2, 1}, which can be implemented
- // with simple sums rather than multiplies and adds.
- // In order to minimize dependency chains, we evaluate in the order
- // (image[y - 1] + image[y + 1]) + (image[y] << 1)
- // This way, the first addition and the shift can happen in parallel
- for (int y = -1; y < DISFLOW_PATCH_SIZE + 1; ++y) {
- const uint8_t *src_row = src + y * src_stride;
- int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
-
- // Load pixels and expand to 16 bits
- __m128i row = _mm_loadu_si128((__m128i *)(src_row - 1));
- __m128i px0 = _mm_cvtepu8_epi16(row);
- __m128i px1 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 1));
- __m128i px2 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 2));
-
- __m128i out =
- _mm_add_epi16(_mm_add_epi16(px0, px2), _mm_slli_epi16(px1, 1));
-
- // Store to intermediate array
- _mm_storeu_si128((__m128i *)tmp_row, out);
-
-#if CHECK_RESULTS
- // Cross-check
- static const int16_t h_kernel[3] = { 1, 2, 1 };
- for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
- int sum = 0;
- for (int k = 0; k < taps; ++k) {
- sum += h_kernel[k] * src_row[x + k - 1];
- }
- (void)sum;
- assert(tmp_row[x] == sum);
- }
-#endif // CHECK_RESULTS
- }
-
- // Vertical filter
- // As the kernel is simply {1, 0, -1}, we implement this as simply
- // out[x] = image[x-1] - image[x+1]
- // rather than doing a "proper" convolution operation
- for (int y = 0; y < DISFLOW_PATCH_SIZE; ++y) {
- const int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
- int16_t *dst_row = dst + y * dst_stride;
-
- __m128i px0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
- __m128i px2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
-
- __m128i out = _mm_sub_epi16(px0, px2);
-
- _mm_storeu_si128((__m128i *)dst_row, out);
-
-#if CHECK_RESULTS
- static const int16_t v_kernel[3] = { 1, 0, -1 };
- for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
- int sum = 0;
- for (int k = 0; k < taps; ++k) {
- sum += v_kernel[k] * tmp[(y + k - 1) * DISFLOW_PATCH_SIZE + x];
- }
- (void)sum;
- assert(dst_row[x] == sum);
- }
-#endif // CHECK_RESULTS
+// Compute the x and y gradients of the source patch in a single pass,
+// and store into dx and dy respectively.
+static INLINE void sobel_filter(const uint8_t *src, int src_stride, int16_t *dx,
+ int16_t *dy) {
+ // Loop setup: Load the first two rows (of 10 input rows) and apply
+ // the horizontal parts of the two filters
+ __m128i row_m1 = _mm_loadu_si128((__m128i *)(src - src_stride - 1));
+ __m128i row_m1_a = _mm_cvtepu8_epi16(row_m1);
+ __m128i row_m1_b = _mm_cvtepu8_epi16(_mm_srli_si128(row_m1, 1));
+ __m128i row_m1_c = _mm_cvtepu8_epi16(_mm_srli_si128(row_m1, 2));
+
+ __m128i row_m1_hsmooth = _mm_add_epi16(_mm_add_epi16(row_m1_a, row_m1_c),
+ _mm_slli_epi16(row_m1_b, 1));
+ __m128i row_m1_hdiff = _mm_sub_epi16(row_m1_a, row_m1_c);
+
+ __m128i row = _mm_loadu_si128((__m128i *)(src - 1));
+ __m128i row_a = _mm_cvtepu8_epi16(row);
+ __m128i row_b = _mm_cvtepu8_epi16(_mm_srli_si128(row, 1));
+ __m128i row_c = _mm_cvtepu8_epi16(_mm_srli_si128(row, 2));
+
+ __m128i row_hsmooth =
+ _mm_add_epi16(_mm_add_epi16(row_a, row_c), _mm_slli_epi16(row_b, 1));
+ __m128i row_hdiff = _mm_sub_epi16(row_a, row_c);
+
+ // Main loop: For each of the 8 output rows:
+ // * Load row i+1 and apply both horizontal filters
+ // * Apply vertical filters and store results
+ // * Shift rows for next iteration
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
+ // Load row i+1 and apply both horizontal filters
+ const __m128i row_p1 =
+ _mm_loadu_si128((__m128i *)(src + (i + 1) * src_stride - 1));
+ const __m128i row_p1_a = _mm_cvtepu8_epi16(row_p1);
+ const __m128i row_p1_b = _mm_cvtepu8_epi16(_mm_srli_si128(row_p1, 1));
+ const __m128i row_p1_c = _mm_cvtepu8_epi16(_mm_srli_si128(row_p1, 2));
+
+ const __m128i row_p1_hsmooth = _mm_add_epi16(
+ _mm_add_epi16(row_p1_a, row_p1_c), _mm_slli_epi16(row_p1_b, 1));
+ const __m128i row_p1_hdiff = _mm_sub_epi16(row_p1_a, row_p1_c);
+
+ // Apply vertical filters and store results
+ // dx = vertical smooth(horizontal diff(input))
+ // dy = vertical diff(horizontal smooth(input))
+ const __m128i dx_row =
+ _mm_add_epi16(_mm_add_epi16(row_m1_hdiff, row_p1_hdiff),
+ _mm_slli_epi16(row_hdiff, 1));
+ const __m128i dy_row = _mm_sub_epi16(row_m1_hsmooth, row_p1_hsmooth);
+
+ _mm_storeu_si128((__m128i *)(dx + i * DISFLOW_PATCH_SIZE), dx_row);
+ _mm_storeu_si128((__m128i *)(dy + i * DISFLOW_PATCH_SIZE), dy_row);
+
+ // Shift rows for next iteration
+ // This allows a lot of work to be reused, reducing the number of
+ // horizontal filtering operations from 2*3*8 = 48 to 2*10 = 20
+ row_m1_hsmooth = row_hsmooth;
+ row_m1_hdiff = row_hdiff;
+ row_hsmooth = row_p1_hsmooth;
+ row_hdiff = row_p1_hdiff;
}
}
@@ -476,30 +335,6 @@ static INLINE void compute_flow_matrix(const int16_t *dx, int dx_stride,
// which is convenient for integerized SIMD implementation.
result = _mm_add_epi32(result, _mm_set_epi32(1, 0, 0, 1));
-#if CHECK_RESULTS
- int tmp[4] = { 0 };
-
- for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
- for (int j = 0; j < DISFLOW_PATCH_SIZE; j++) {
- tmp[0] += dx[i * dx_stride + j] * dx[i * dx_stride + j];
- tmp[1] += dx[i * dx_stride + j] * dy[i * dy_stride + j];
- // Don't compute tmp[2], as it should be equal to tmp[1]
- tmp[3] += dy[i * dy_stride + j] * dy[i * dy_stride + j];
- }
- }
-
- // Apply regularization
- tmp[0] += 1;
- tmp[3] += 1;
-
- tmp[2] = tmp[1];
-
- assert(tmp[0] == _mm_extract_epi32(result, 0));
- assert(tmp[1] == _mm_extract_epi32(result, 1));
- assert(tmp[2] == _mm_extract_epi32(result, 2));
- assert(tmp[3] == _mm_extract_epi32(result, 3));
-#endif // CHECK_RESULTS
-
// Convert results to doubles and store
_mm_storeu_pd(M, _mm_cvtepi32_pd(result));
_mm_storeu_pd(M + 2, _mm_cvtepi32_pd(_mm_srli_si128(result, 8)));
@@ -525,16 +360,15 @@ static INLINE void invert_2x2(const double *M, double *M_inv) {
void aom_compute_flow_at_point_sse4_1(const uint8_t *src, const uint8_t *ref,
int x, int y, int width, int height,
int stride, double *u, double *v) {
- double M[4];
- double M_inv[4];
+ DECLARE_ALIGNED(16, double, M[4]);
+ DECLARE_ALIGNED(16, double, M_inv[4]);
+ DECLARE_ALIGNED(16, int16_t, dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
+ DECLARE_ALIGNED(16, int16_t, dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
int b[2];
- int16_t dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
- int16_t dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
// Compute gradients within this patch
const uint8_t *src_patch = &src[y * stride + x];
- sobel_filter_x(src_patch, stride, dx, DISFLOW_PATCH_SIZE);
- sobel_filter_y(src_patch, stride, dy, DISFLOW_PATCH_SIZE);
+ sobel_filter(src_patch, stride, dx, dy);
compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M);
invert_2x2(M, M_inv);
diff --git a/third_party/aom/aom_dsp/mathutils.h b/third_party/aom/aom_dsp/mathutils.h
index cbb6cf491f..26635fc4d1 100644
--- a/third_party/aom/aom_dsp/mathutils.h
+++ b/third_party/aom/aom_dsp/mathutils.h
@@ -17,7 +17,6 @@
#include <string.h>
#include "aom_dsp/aom_dsp_common.h"
-#include "aom_mem/aom_mem.h"
static const double TINY_NEAR_ZERO = 1.0E-16;
diff --git a/third_party/aom/aom_dsp/noise_model.c b/third_party/aom/aom_dsp/noise_model.c
index 065ec9a106..947dfd3c7a 100644
--- a/third_party/aom/aom_dsp/noise_model.c
+++ b/third_party/aom/aom_dsp/noise_model.c
@@ -19,6 +19,8 @@
#include "aom_dsp/noise_model.h"
#include "aom_dsp/noise_util.h"
#include "aom_mem/aom_mem.h"
+#include "aom_ports/mem.h"
+#include "aom_scale/yv12config.h"
#define kLowPolyNumParams 3
@@ -1555,7 +1557,7 @@ void aom_denoise_and_model_free(struct aom_denoise_and_model_t *ctx) {
}
static int denoise_and_model_realloc_if_necessary(
- struct aom_denoise_and_model_t *ctx, YV12_BUFFER_CONFIG *sd) {
+ struct aom_denoise_and_model_t *ctx, const YV12_BUFFER_CONFIG *sd) {
if (ctx->width == sd->y_width && ctx->height == sd->y_height &&
ctx->y_stride == sd->y_stride && ctx->uv_stride == sd->uv_stride)
return 1;
@@ -1624,7 +1626,7 @@ static int denoise_and_model_realloc_if_necessary(
// TODO(aomedia:3151): Handle a monochrome image (sd->u_buffer and sd->v_buffer
// are null pointers) correctly.
int aom_denoise_and_model_run(struct aom_denoise_and_model_t *ctx,
- YV12_BUFFER_CONFIG *sd,
+ const YV12_BUFFER_CONFIG *sd,
aom_film_grain_t *film_grain, int apply_denoise) {
const int block_size = ctx->block_size;
const int use_highbd = (sd->flags & YV12_FLAG_HIGHBITDEPTH) != 0;
diff --git a/third_party/aom/aom_dsp/noise_model.h b/third_party/aom/aom_dsp/noise_model.h
index 8228aeacfc..5b2d7efe29 100644
--- a/third_party/aom/aom_dsp/noise_model.h
+++ b/third_party/aom/aom_dsp/noise_model.h
@@ -297,14 +297,14 @@ struct aom_denoise_and_model_t;
* aom_denoise_and_model_alloc that holds some
* buffers for denoising and the current noise
* estimate.
- * \param[in,out] buf The raw input buffer to be denoised.
+ * \param[in,out] sd The raw input buffer to be denoised.
* \param[out] grain Output film grain parameters
* \param[in] apply_denoise Whether or not to apply the denoising to the
* frame that will be encoded
*/
int aom_denoise_and_model_run(struct aom_denoise_and_model_t *ctx,
- YV12_BUFFER_CONFIG *buf, aom_film_grain_t *grain,
- int apply_denoise);
+ const YV12_BUFFER_CONFIG *sd,
+ aom_film_grain_t *grain, int apply_denoise);
/*!\brief Allocates a context that can be used for denoising and noise modeling.
*
diff --git a/third_party/aom/aom_dsp/pyramid.c b/third_party/aom/aom_dsp/pyramid.c
index 324a18baea..5de001dbd5 100644
--- a/third_party/aom/aom_dsp/pyramid.c
+++ b/third_party/aom/aom_dsp/pyramid.c
@@ -12,7 +12,7 @@
#include "aom_dsp/pyramid.h"
#include "aom_mem/aom_mem.h"
#include "aom_ports/bitops.h"
-#include "aom_util/aom_thread.h"
+#include "aom_util/aom_pthread.h"
// TODO(rachelbarker): Move needed code from av1/ to aom_dsp/
#include "av1/common/resize.h"
@@ -26,18 +26,16 @@
// levels. This is counted in the size checked against the max allocation
// limit
// * Then calls aom_alloc_pyramid() to actually create the pyramid
-// * Pyramid is initially marked as invalid (no data)
-// * Whenever pyramid is needed, we check the valid flag. If set, use existing
-// data. If not set, compute full pyramid
-// * Whenever frame buffer is reused, clear the valid flag
+// * Pyramid is initially marked as containing no valid data
+// * Each pyramid layer is computed on-demand, the first time it is requested
+// * Whenever frame buffer is reused, reset the counter of filled levels.
+// This invalidates all of the existing pyramid levels.
// * Whenever frame buffer is resized, reallocate pyramid
-size_t aom_get_pyramid_alloc_size(int width, int height, int n_levels,
- bool image_is_16bit) {
- // Limit number of levels on small frames
+size_t aom_get_pyramid_alloc_size(int width, int height, bool image_is_16bit) {
+ // Allocate the maximum possible number of layers for this width and height
const int msb = get_msb(AOMMIN(width, height));
- const int max_levels = AOMMAX(msb - MIN_PYRAMID_SIZE_LOG2, 1);
- n_levels = AOMMIN(n_levels, max_levels);
+ const int n_levels = AOMMAX(msb - MIN_PYRAMID_SIZE_LOG2, 1);
size_t alloc_size = 0;
alloc_size += sizeof(ImagePyramid);
@@ -100,12 +98,10 @@ size_t aom_get_pyramid_alloc_size(int width, int height, int n_levels,
return alloc_size;
}
-ImagePyramid *aom_alloc_pyramid(int width, int height, int n_levels,
- bool image_is_16bit) {
- // Limit number of levels on small frames
+ImagePyramid *aom_alloc_pyramid(int width, int height, bool image_is_16bit) {
+ // Allocate the maximum possible number of layers for this width and height
const int msb = get_msb(AOMMIN(width, height));
- const int max_levels = AOMMAX(msb - MIN_PYRAMID_SIZE_LOG2, 1);
- n_levels = AOMMIN(n_levels, max_levels);
+ const int n_levels = AOMMAX(msb - MIN_PYRAMID_SIZE_LOG2, 1);
ImagePyramid *pyr = aom_calloc(1, sizeof(*pyr));
if (!pyr) {
@@ -118,8 +114,8 @@ ImagePyramid *aom_alloc_pyramid(int width, int height, int n_levels,
return NULL;
}
- pyr->valid = false;
- pyr->n_levels = n_levels;
+ pyr->max_levels = n_levels;
+ pyr->filled_levels = 0;
// Compute sizes and offsets for each pyramid level
// These are gathered up first, so that we can allocate all pyramid levels
@@ -248,46 +244,67 @@ static INLINE void fill_border(uint8_t *img_buf, const int width,
}
}
-// Compute coarse to fine pyramids for a frame
+// Compute downsampling pyramid for a frame
+//
+// This function will ensure that the first `n_levels` levels of the pyramid
+// are filled, unless the frame is too small to have this many levels.
+// In that case, we will fill all available levels and then stop.
+//
+// Returns the actual number of levels filled, capped at n_levels,
+// or -1 on error.
+//
// This must only be called while holding frame_pyr->mutex
-static INLINE bool fill_pyramid(const YV12_BUFFER_CONFIG *frame, int bit_depth,
- ImagePyramid *frame_pyr) {
- int n_levels = frame_pyr->n_levels;
+static INLINE int fill_pyramid(const YV12_BUFFER_CONFIG *frame, int bit_depth,
+ int n_levels, ImagePyramid *frame_pyr) {
+ int already_filled_levels = frame_pyr->filled_levels;
+
+ // This condition should already be enforced by aom_compute_pyramid
+ assert(n_levels <= frame_pyr->max_levels);
+
+ if (already_filled_levels >= n_levels) {
+ return n_levels;
+ }
+
const int frame_width = frame->y_crop_width;
const int frame_height = frame->y_crop_height;
const int frame_stride = frame->y_stride;
assert((frame_width >> n_levels) >= 0);
assert((frame_height >> n_levels) >= 0);
- PyramidLayer *first_layer = &frame_pyr->layers[0];
- if (frame->flags & YV12_FLAG_HIGHBITDEPTH) {
- // For frames stored in a 16-bit buffer, we need to downconvert to 8 bits
- assert(first_layer->width == frame_width);
- assert(first_layer->height == frame_height);
-
- uint16_t *frame_buffer = CONVERT_TO_SHORTPTR(frame->y_buffer);
- uint8_t *pyr_buffer = first_layer->buffer;
- int pyr_stride = first_layer->stride;
- for (int y = 0; y < frame_height; y++) {
- uint16_t *frame_row = frame_buffer + y * frame_stride;
- uint8_t *pyr_row = pyr_buffer + y * pyr_stride;
- for (int x = 0; x < frame_width; x++) {
- pyr_row[x] = frame_row[x] >> (bit_depth - 8);
+ if (already_filled_levels == 0) {
+ // Fill in largest level from the original image
+ PyramidLayer *first_layer = &frame_pyr->layers[0];
+ if (frame->flags & YV12_FLAG_HIGHBITDEPTH) {
+ // For frames stored in a 16-bit buffer, we need to downconvert to 8 bits
+ assert(first_layer->width == frame_width);
+ assert(first_layer->height == frame_height);
+
+ uint16_t *frame_buffer = CONVERT_TO_SHORTPTR(frame->y_buffer);
+ uint8_t *pyr_buffer = first_layer->buffer;
+ int pyr_stride = first_layer->stride;
+ for (int y = 0; y < frame_height; y++) {
+ uint16_t *frame_row = frame_buffer + y * frame_stride;
+ uint8_t *pyr_row = pyr_buffer + y * pyr_stride;
+ for (int x = 0; x < frame_width; x++) {
+ pyr_row[x] = frame_row[x] >> (bit_depth - 8);
+ }
}
+
+ fill_border(pyr_buffer, frame_width, frame_height, pyr_stride);
+ } else {
+ // For frames stored in an 8-bit buffer, we don't need to copy anything -
+ // we can just reference the original image buffer
+ first_layer->buffer = frame->y_buffer;
+ first_layer->width = frame_width;
+ first_layer->height = frame_height;
+ first_layer->stride = frame_stride;
}
- fill_border(pyr_buffer, frame_width, frame_height, pyr_stride);
- } else {
- // For frames stored in an 8-bit buffer, we need to configure the first
- // pyramid layer to point at the original image buffer
- first_layer->buffer = frame->y_buffer;
- first_layer->width = frame_width;
- first_layer->height = frame_height;
- first_layer->stride = frame_stride;
+ already_filled_levels = 1;
}
// Fill in the remaining levels through progressive downsampling
- for (int level = 1; level < n_levels; ++level) {
+ for (int level = already_filled_levels; level < n_levels; ++level) {
PyramidLayer *prev_layer = &frame_pyr->layers[level - 1];
uint8_t *prev_buffer = prev_layer->buffer;
int prev_stride = prev_layer->stride;
@@ -314,11 +331,16 @@ static INLINE bool fill_pyramid(const YV12_BUFFER_CONFIG *frame, int bit_depth,
// TODO(rachelbarker): Use optimized downsample-by-2 function
if (!av1_resize_plane(prev_buffer, this_height << 1, this_width << 1,
prev_stride, this_buffer, this_height, this_width,
- this_stride))
- return false;
+ this_stride)) {
+ // If we can't allocate memory, we'll have to terminate early
+ frame_pyr->filled_levels = n_levels;
+ return -1;
+ }
fill_border(this_buffer, this_width, this_height, this_stride);
}
- return true;
+
+ frame_pyr->filled_levels = n_levels;
+ return n_levels;
}
// Fill out a downsampling pyramid for a given frame.
@@ -327,63 +349,72 @@ static INLINE bool fill_pyramid(const YV12_BUFFER_CONFIG *frame, int bit_depth,
// regardless of the input bit depth. Additional levels are then downscaled
// by powers of 2.
//
-// For small input frames, the number of levels actually constructed
-// will be limited so that the smallest image is at least MIN_PYRAMID_SIZE
-// pixels along each side.
+// This function will ensure that the first `n_levels` levels of the pyramid
+// are filled, unless the frame is too small to have this many levels.
+// In that case, we will fill all available levels and then stop.
+// No matter how small the frame is, at least one level is guaranteed
+// to be filled.
//
-// However, if the input frame has a side of length < MIN_PYRAMID_SIZE,
-// we will still construct the top level.
-bool aom_compute_pyramid(const YV12_BUFFER_CONFIG *frame, int bit_depth,
- ImagePyramid *pyr) {
+// Returns the actual number of levels filled, capped at n_levels,
+// or -1 on error.
+int aom_compute_pyramid(const YV12_BUFFER_CONFIG *frame, int bit_depth,
+ int n_levels, ImagePyramid *pyr) {
assert(pyr);
// Per the comments in the ImagePyramid struct, we must take this mutex
- // before reading or writing the "valid" flag, and hold it while computing
- // the pyramid, to ensure proper behaviour if multiple threads call this
- // function simultaneously
+ // before reading or writing the filled_levels field, and hold it while
+ // computing any additional pyramid levels, to ensure proper behaviour
+ // when multithreading is used
#if CONFIG_MULTITHREAD
pthread_mutex_lock(&pyr->mutex);
#endif // CONFIG_MULTITHREAD
- if (!pyr->valid) {
- pyr->valid = fill_pyramid(frame, bit_depth, pyr);
+ n_levels = AOMMIN(n_levels, pyr->max_levels);
+ int result = n_levels;
+ if (pyr->filled_levels < n_levels) {
+ // Compute any missing levels that we need
+ result = fill_pyramid(frame, bit_depth, n_levels, pyr);
}
- bool valid = pyr->valid;
-
- // At this point, the pyramid is guaranteed to be valid, and can be safely
- // read from without holding the mutex any more
+ // At this point, as long as result >= 0, the requested number of pyramid
+ // levels are guaranteed to be valid, and can be safely read from without
+ // holding the mutex any further
+ assert(IMPLIES(result >= 0, pyr->filled_levels >= n_levels));
#if CONFIG_MULTITHREAD
pthread_mutex_unlock(&pyr->mutex);
#endif // CONFIG_MULTITHREAD
- return valid;
+ return result;
}
#ifndef NDEBUG
-// Check if a pyramid has already been computed.
+// Check if a pyramid has already been computed to at least n levels
// This is mostly a debug helper - as it is necessary to hold pyr->mutex
-// while reading the valid flag, we cannot just write:
-// assert(pyr->valid);
+// while reading the number of already-computed levels, we cannot just write:
+// assert(pyr->filled_levels >= n_levels);
// This function allows the check to be correctly written as:
-// assert(aom_is_pyramid_valid(pyr));
-bool aom_is_pyramid_valid(ImagePyramid *pyr) {
+// assert(aom_is_pyramid_valid(pyr, n_levels));
+//
+// Note: This deliberately does not restrict n_levels based on the maximum
+// number of permitted levels for the frame size. This allows the check to
+// catch cases where the caller forgets to handle the case where
+// max_levels is less than the requested number of levels
+bool aom_is_pyramid_valid(ImagePyramid *pyr, int n_levels) {
assert(pyr);
// Per the comments in the ImagePyramid struct, we must take this mutex
- // before reading or writing the "valid" flag, and hold it while computing
- // the pyramid, to ensure proper behaviour if multiple threads call this
- // function simultaneously
+ // before reading or writing the filled_levels field, to ensure proper
+ // behaviour when multithreading is used
#if CONFIG_MULTITHREAD
pthread_mutex_lock(&pyr->mutex);
#endif // CONFIG_MULTITHREAD
- bool valid = pyr->valid;
+ bool result = (pyr->filled_levels >= n_levels);
#if CONFIG_MULTITHREAD
pthread_mutex_unlock(&pyr->mutex);
#endif // CONFIG_MULTITHREAD
- return valid;
+ return result;
}
#endif
@@ -394,7 +425,7 @@ void aom_invalidate_pyramid(ImagePyramid *pyr) {
#if CONFIG_MULTITHREAD
pthread_mutex_lock(&pyr->mutex);
#endif // CONFIG_MULTITHREAD
- pyr->valid = false;
+ pyr->filled_levels = 0;
#if CONFIG_MULTITHREAD
pthread_mutex_unlock(&pyr->mutex);
#endif // CONFIG_MULTITHREAD
diff --git a/third_party/aom/aom_dsp/pyramid.h b/third_party/aom/aom_dsp/pyramid.h
index 9442a1ff08..745bb7e525 100644
--- a/third_party/aom/aom_dsp/pyramid.h
+++ b/third_party/aom/aom_dsp/pyramid.h
@@ -19,7 +19,7 @@
#include "config/aom_config.h"
#include "aom_scale/yv12config.h"
-#include "aom_util/aom_thread.h"
+#include "aom_util/aom_pthread.h"
#ifdef __cplusplus
extern "C" {
@@ -57,23 +57,31 @@ typedef struct image_pyramid {
// same time
//
// Semantics:
- // * This mutex must be held whenever reading or writing the `valid` flag
+ // * This mutex must be held whenever reading or writing the
+ // `filled_levels` field
//
// * This mutex must also be held while computing the image pyramid,
// to ensure that only one thread may do so at a time.
//
- // * However, once you have read the valid flag and seen a true value,
- // it is safe to drop the mutex and read from the remaining fields.
- // This is because, once the image pyramid is computed, its contents
+ // * However, once you have read the filled_levels field and observed
+ // a value N, it is safe to drop the mutex and read from the remaining
+ // fields, including the first N pyramid levels (but no higher).
+ // Note that filled_levels must be read once and cached in a local variable
+ // in order for this to be safe - it cannot be re-read without retaking
+ // the mutex.
+ //
+ // This works because, once the image pyramid is computed, its contents
// will not be changed until the parent frame buffer is recycled,
// which will not happen until there are no more outstanding references
// to the frame buffer.
pthread_mutex_t mutex;
#endif
- // Flag indicating whether the pyramid contains valid data
- bool valid;
- // Number of allocated/filled levels in this pyramid
- int n_levels;
+ // Maximum number of levels for the given frame size
+ // We always allocate enough memory for this many levels, as the memory
+ // cost of higher levels of the pyramid is minimal.
+ int max_levels;
+ // Number of levels which currently hold valid data
+ int filled_levels;
// Pointer to allocated buffer
uint8_t *buffer_alloc;
// Data for each level
@@ -82,11 +90,9 @@ typedef struct image_pyramid {
PyramidLayer *layers;
} ImagePyramid;
-size_t aom_get_pyramid_alloc_size(int width, int height, int n_levels,
- bool image_is_16bit);
+size_t aom_get_pyramid_alloc_size(int width, int height, bool image_is_16bit);
-ImagePyramid *aom_alloc_pyramid(int width, int height, int n_levels,
- bool image_is_16bit);
+ImagePyramid *aom_alloc_pyramid(int width, int height, bool image_is_16bit);
// Fill out a downsampling pyramid for a given frame.
//
@@ -94,23 +100,28 @@ ImagePyramid *aom_alloc_pyramid(int width, int height, int n_levels,
// regardless of the input bit depth. Additional levels are then downscaled
// by powers of 2.
//
-// For small input frames, the number of levels actually constructed
-// will be limited so that the smallest image is at least MIN_PYRAMID_SIZE
-// pixels along each side.
+// This function will ensure that the first `n_levels` levels of the pyramid
+// are filled, unless the frame is too small to have this many levels.
+// In that case, we will fill all available levels and then stop.
//
-// However, if the input frame has a side of length < MIN_PYRAMID_SIZE,
-// we will still construct the top level.
-bool aom_compute_pyramid(const YV12_BUFFER_CONFIG *frame, int bit_depth,
- ImagePyramid *pyr);
+// Returns the actual number of levels filled, capped at n_levels,
+// or -1 on error.
+int aom_compute_pyramid(const YV12_BUFFER_CONFIG *frame, int bit_depth,
+ int n_levels, ImagePyramid *pyr);
#ifndef NDEBUG
-// Check if a pyramid has already been computed.
+// Check if a pyramid has already been computed to at least n levels
// This is mostly a debug helper - as it is necessary to hold pyr->mutex
-// while reading the valid flag, we cannot just write:
-// assert(pyr->valid);
+// while reading the number of already-computed levels, we cannot just write:
+// assert(pyr->filled_levels >= n_levels);
// This function allows the check to be correctly written as:
-// assert(aom_is_pyramid_valid(pyr));
-bool aom_is_pyramid_valid(ImagePyramid *pyr);
+// assert(aom_is_pyramid_valid(pyr, n_levels));
+//
+// Note: This deliberately does not restrict n_levels based on the maximum
+// number of permitted levels for the frame size. This allows the check to
+// catch cases where the caller forgets to handle the case where
+// max_levels is less than the requested number of levels
+bool aom_is_pyramid_valid(ImagePyramid *pyr, int n_levels);
#endif
// Mark a pyramid as no longer containing valid data.
diff --git a/third_party/aom/aom_dsp/rect.h b/third_party/aom/aom_dsp/rect.h
deleted file mode 100644
index 11bdaca979..0000000000
--- a/third_party/aom/aom_dsp/rect.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2022, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_AOM_DSP_RECT_H_
-#define AOM_AOM_DSP_RECT_H_
-
-#include "config/aom_config.h"
-
-#include <stdbool.h>
-
-// Struct representing a rectangle of pixels.
-// The axes are inclusive-exclusive, ie. the point (top, left) is included
-// in the rectangle but (bottom, right) is not.
-typedef struct {
- int left, right, top, bottom;
-} PixelRect;
-
-static INLINE int rect_width(const PixelRect *r) { return r->right - r->left; }
-
-static INLINE int rect_height(const PixelRect *r) { return r->bottom - r->top; }
-
-static INLINE bool is_inside_rect(const int x, const int y,
- const PixelRect *r) {
- return (r->left <= x && x < r->right) && (r->top <= y && y < r->bottom);
-}
-
-#endif // AOM_AOM_DSP_RECT_H_
diff --git a/third_party/aom/aom_dsp/variance.c b/third_party/aom/aom_dsp/variance.c
index f02c3077ae..6cdd58492a 100644
--- a/third_party/aom/aom_dsp/variance.c
+++ b/third_party/aom/aom_dsp/variance.c
@@ -10,7 +10,6 @@
*/
#include <assert.h>
#include <stdlib.h>
-#include <string.h>
#include "config/aom_config.h"
#include "config/aom_dsp_rtcd.h"
@@ -70,12 +69,10 @@ uint32_t aom_sse_odd_size(const uint8_t *a, int a_stride, const uint8_t *b,
// taps should sum to FILTER_WEIGHT. pixel_step defines whether the filter is
// applied horizontally (pixel_step = 1) or vertically (pixel_step = stride).
// It defines the offset required to move from one input to the next.
-void aom_var_filter_block2d_bil_first_pass_c(const uint8_t *a, uint16_t *b,
- unsigned int src_pixels_per_line,
- unsigned int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- const uint8_t *filter) {
+static void var_filter_block2d_bil_first_pass_c(
+ const uint8_t *a, uint16_t *b, unsigned int src_pixels_per_line,
+ unsigned int pixel_step, unsigned int output_height,
+ unsigned int output_width, const uint8_t *filter) {
unsigned int i, j;
for (i = 0; i < output_height; ++i) {
@@ -100,12 +97,10 @@ void aom_var_filter_block2d_bil_first_pass_c(const uint8_t *a, uint16_t *b,
// filter is applied horizontally (pixel_step = 1) or vertically
// (pixel_step = stride). It defines the offset required to move from one input
// to the next. Output is 8-bit.
-void aom_var_filter_block2d_bil_second_pass_c(const uint16_t *a, uint8_t *b,
- unsigned int src_pixels_per_line,
- unsigned int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- const uint8_t *filter) {
+static void var_filter_block2d_bil_second_pass_c(
+ const uint16_t *a, uint8_t *b, unsigned int src_pixels_per_line,
+ unsigned int pixel_step, unsigned int output_height,
+ unsigned int output_width, const uint8_t *filter) {
unsigned int i, j;
for (i = 0; i < output_height; ++i) {
@@ -129,19 +124,19 @@ void aom_var_filter_block2d_bil_second_pass_c(const uint16_t *a, uint8_t *b,
return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
}
-#define SUBPIX_VAR(W, H) \
- uint32_t aom_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- \
- aom_var_filter_block2d_bil_first_pass_c(a, fdata3, a_stride, 1, H + 1, W, \
- bilinear_filters_2t[xoffset]); \
- aom_var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
- bilinear_filters_2t[yoffset]); \
- \
- return aom_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \
+#define SUBPIX_VAR(W, H) \
+ uint32_t aom_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *a, int a_stride, int xoffset, int yoffset, \
+ const uint8_t *b, int b_stride, uint32_t *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint8_t temp2[H * W]; \
+ \
+ var_filter_block2d_bil_first_pass_c(a, fdata3, a_stride, 1, H + 1, W, \
+ bilinear_filters_2t[xoffset]); \
+ var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
+ bilinear_filters_2t[yoffset]); \
+ \
+ return aom_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \
}
#define SUBPIX_AVG_VAR(W, H) \
@@ -153,10 +148,10 @@ void aom_var_filter_block2d_bil_second_pass_c(const uint16_t *a, uint8_t *b,
uint8_t temp2[H * W]; \
DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
\
- aom_var_filter_block2d_bil_first_pass_c(a, fdata3, a_stride, 1, H + 1, W, \
- bilinear_filters_2t[xoffset]); \
- aom_var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
- bilinear_filters_2t[yoffset]); \
+ var_filter_block2d_bil_first_pass_c(a, fdata3, a_stride, 1, H + 1, W, \
+ bilinear_filters_2t[xoffset]); \
+ var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
+ bilinear_filters_2t[yoffset]); \
\
aom_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \
\
@@ -170,10 +165,10 @@ void aom_var_filter_block2d_bil_second_pass_c(const uint16_t *a, uint8_t *b,
uint8_t temp2[H * W]; \
DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
\
- aom_var_filter_block2d_bil_first_pass_c(a, fdata3, a_stride, 1, H + 1, W, \
- bilinear_filters_2t[xoffset]); \
- aom_var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
- bilinear_filters_2t[yoffset]); \
+ var_filter_block2d_bil_first_pass_c(a, fdata3, a_stride, 1, H + 1, W, \
+ bilinear_filters_2t[xoffset]); \
+ var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
+ bilinear_filters_2t[yoffset]); \
\
aom_dist_wtd_comp_avg_pred(temp3, second_pred, W, H, temp2, W, jcp_param); \
\
@@ -730,24 +725,24 @@ void aom_comp_mask_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
}
}
-#define MASK_SUBPIX_VAR(W, H) \
- unsigned int aom_masked_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
- const uint8_t *msk, int msk_stride, int invert_mask, \
- unsigned int *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
- \
- aom_var_filter_block2d_bil_first_pass_c(src, fdata3, src_stride, 1, H + 1, \
- W, bilinear_filters_2t[xoffset]); \
- aom_var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
- bilinear_filters_2t[yoffset]); \
- \
- aom_comp_mask_pred_c(temp3, second_pred, W, H, temp2, W, msk, msk_stride, \
- invert_mask); \
- return aom_variance##W##x##H##_c(temp3, W, ref, ref_stride, sse); \
+#define MASK_SUBPIX_VAR(W, H) \
+ unsigned int aom_masked_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint8_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
+ \
+ var_filter_block2d_bil_first_pass_c(src, fdata3, src_stride, 1, H + 1, W, \
+ bilinear_filters_2t[xoffset]); \
+ var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
+ bilinear_filters_2t[yoffset]); \
+ \
+ aom_comp_mask_pred_c(temp3, second_pred, W, H, temp2, W, msk, msk_stride, \
+ invert_mask); \
+ return aom_variance##W##x##H##_c(temp3, W, ref, ref_stride, sse); \
}
MASK_SUBPIX_VAR(4, 4)
@@ -924,19 +919,19 @@ static INLINE void obmc_variance(const uint8_t *pre, int pre_stride,
return *sse - (unsigned int)(((int64_t)sum * sum) / (W * H)); \
}
-#define OBMC_SUBPIX_VAR(W, H) \
- unsigned int aom_obmc_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *pre, int pre_stride, int xoffset, int yoffset, \
- const int32_t *wsrc, const int32_t *mask, unsigned int *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- \
- aom_var_filter_block2d_bil_first_pass_c(pre, fdata3, pre_stride, 1, H + 1, \
- W, bilinear_filters_2t[xoffset]); \
- aom_var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
- bilinear_filters_2t[yoffset]); \
- \
- return aom_obmc_variance##W##x##H##_c(temp2, W, wsrc, mask, sse); \
+#define OBMC_SUBPIX_VAR(W, H) \
+ unsigned int aom_obmc_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *pre, int pre_stride, int xoffset, int yoffset, \
+ const int32_t *wsrc, const int32_t *mask, unsigned int *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint8_t temp2[H * W]; \
+ \
+ var_filter_block2d_bil_first_pass_c(pre, fdata3, pre_stride, 1, H + 1, W, \
+ bilinear_filters_2t[xoffset]); \
+ var_filter_block2d_bil_second_pass_c(fdata3, temp2, W, W, H, W, \
+ bilinear_filters_2t[yoffset]); \
+ \
+ return aom_obmc_variance##W##x##H##_c(temp2, W, wsrc, mask, sse); \
}
OBMC_VAR(4, 4)
diff --git a/third_party/aom/aom_dsp/x86/aom_asm_stubs.c b/third_party/aom/aom_dsp/x86/aom_asm_stubs.c
index b08ec2546b..6c7fdd6eb1 100644
--- a/third_party/aom/aom_dsp/x86/aom_asm_stubs.c
+++ b/third_party/aom/aom_dsp/x86/aom_asm_stubs.c
@@ -15,40 +15,6 @@
#include "aom_dsp/x86/convolve.h"
#if HAVE_SSE2
-filter8_1dfunction aom_filter_block1d16_v8_sse2;
-filter8_1dfunction aom_filter_block1d16_h8_sse2;
-filter8_1dfunction aom_filter_block1d8_v8_sse2;
-filter8_1dfunction aom_filter_block1d8_h8_sse2;
-filter8_1dfunction aom_filter_block1d4_v8_sse2;
-filter8_1dfunction aom_filter_block1d4_h8_sse2;
-filter8_1dfunction aom_filter_block1d16_v4_sse2;
-filter8_1dfunction aom_filter_block1d16_h4_sse2;
-
-filter8_1dfunction aom_filter_block1d8_h4_sse2;
-filter8_1dfunction aom_filter_block1d8_v4_sse2;
-filter8_1dfunction aom_filter_block1d4_h4_sse2;
-filter8_1dfunction aom_filter_block1d4_v4_sse2;
-
-filter8_1dfunction aom_filter_block1d16_v2_sse2;
-filter8_1dfunction aom_filter_block1d16_h2_sse2;
-filter8_1dfunction aom_filter_block1d8_v2_sse2;
-filter8_1dfunction aom_filter_block1d8_h2_sse2;
-filter8_1dfunction aom_filter_block1d4_v2_sse2;
-filter8_1dfunction aom_filter_block1d4_h2_sse2;
-
-// void aom_convolve8_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
-// uint8_t *dst, ptrdiff_t dst_stride,
-// const int16_t *filter_x, int x_step_q4,
-// const int16_t *filter_y, int y_step_q4,
-// int w, int h);
-// void aom_convolve8_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
-// uint8_t *dst, ptrdiff_t dst_stride,
-// const int16_t *filter_x, int x_step_q4,
-// const int16_t *filter_y, int y_step_q4,
-// int w, int h);
-FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , sse2)
-FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , sse2)
-
#if CONFIG_AV1_HIGHBITDEPTH
highbd_filter8_1dfunction aom_highbd_filter_block1d16_v8_sse2;
highbd_filter8_1dfunction aom_highbd_filter_block1d16_h8_sse2;
diff --git a/third_party/aom/aom_dsp/x86/aom_subpixel_8t_intrin_sse2.c b/third_party/aom/aom_dsp/x86/aom_subpixel_8t_intrin_sse2.c
deleted file mode 100644
index 5c36b68727..0000000000
--- a/third_party/aom/aom_dsp/x86/aom_subpixel_8t_intrin_sse2.c
+++ /dev/null
@@ -1,569 +0,0 @@
-/*
- * Copyright (c) 2018, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <emmintrin.h> // SSE2
-
-#include "config/aom_dsp_rtcd.h"
-#include "aom_dsp/x86/convolve.h"
-#include "aom_ports/mem.h"
-
-void aom_filter_block1d16_h4_sse2(const uint8_t *src_ptr,
- ptrdiff_t src_pixels_per_line,
- uint8_t *output_ptr, ptrdiff_t output_pitch,
- uint32_t output_height,
- const int16_t *filter) {
- __m128i filtersReg;
- __m128i addFilterReg32;
- __m128i secondFilters, thirdFilters;
- __m128i srcRegFilt32b1_1, srcRegFilt32b1_2, srcRegFilt32b2_1,
- srcRegFilt32b2_2;
- __m128i srcReg32b1, srcReg32b2;
- unsigned int i;
- src_ptr -= 3;
- addFilterReg32 = _mm_set1_epi16(32);
- filtersReg = _mm_loadu_si128((const __m128i *)filter);
- filtersReg = _mm_srai_epi16(filtersReg, 1);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp_0 = _mm_unpacklo_epi32(filtersReg, filtersReg);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp_1 = _mm_unpackhi_epi32(filtersReg, filtersReg);
-
- secondFilters = _mm_unpackhi_epi64(tmp_0, tmp_0); // coeffs 2 3 2 3 2 3 2 3
- thirdFilters = _mm_unpacklo_epi64(tmp_1, tmp_1); // coeffs 4 5 4 5 4 5 4 5
-
- for (i = output_height; i > 0; i -= 1) {
- srcReg32b1 = _mm_loadu_si128((const __m128i *)src_ptr);
-
- __m128i ss_2 = _mm_srli_si128(srcReg32b1, 2);
- __m128i ss_4 = _mm_srli_si128(srcReg32b1, 4);
- __m128i ss_1_1 = _mm_unpacklo_epi8(ss_2, _mm_setzero_si128());
- __m128i ss_2_1 = _mm_unpacklo_epi8(ss_4, _mm_setzero_si128());
- __m128i d1 = _mm_madd_epi16(ss_1_1, secondFilters);
- __m128i d2 = _mm_madd_epi16(ss_2_1, thirdFilters);
- srcRegFilt32b1_1 = _mm_add_epi32(d1, d2);
-
- __m128i ss_1 = _mm_srli_si128(srcReg32b1, 3);
- __m128i ss_3 = _mm_srli_si128(srcReg32b1, 5);
- __m128i ss_1_2 = _mm_unpacklo_epi8(ss_1, _mm_setzero_si128());
- __m128i ss_2_2 = _mm_unpacklo_epi8(ss_3, _mm_setzero_si128());
- d1 = _mm_madd_epi16(ss_1_2, secondFilters);
- d2 = _mm_madd_epi16(ss_2_2, thirdFilters);
- srcRegFilt32b1_2 = _mm_add_epi32(d1, d2);
-
- __m128i res_lo = _mm_unpacklo_epi32(srcRegFilt32b1_1, srcRegFilt32b1_2);
- __m128i res_hi = _mm_unpackhi_epi32(srcRegFilt32b1_1, srcRegFilt32b1_2);
- srcRegFilt32b1_1 = _mm_packs_epi32(res_lo, res_hi);
-
- // reading stride of the next 16 bytes
- // (part of it was being read by earlier read)
- srcReg32b2 = _mm_loadu_si128((const __m128i *)(src_ptr + 8));
-
- ss_2 = _mm_srli_si128(srcReg32b2, 2);
- ss_4 = _mm_srli_si128(srcReg32b2, 4);
- ss_1_1 = _mm_unpacklo_epi8(ss_2, _mm_setzero_si128());
- ss_2_1 = _mm_unpacklo_epi8(ss_4, _mm_setzero_si128());
- d1 = _mm_madd_epi16(ss_1_1, secondFilters);
- d2 = _mm_madd_epi16(ss_2_1, thirdFilters);
- srcRegFilt32b2_1 = _mm_add_epi32(d1, d2);
-
- ss_1 = _mm_srli_si128(srcReg32b2, 3);
- ss_3 = _mm_srli_si128(srcReg32b2, 5);
- ss_1_2 = _mm_unpacklo_epi8(ss_1, _mm_setzero_si128());
- ss_2_2 = _mm_unpacklo_epi8(ss_3, _mm_setzero_si128());
- d1 = _mm_madd_epi16(ss_1_2, secondFilters);
- d2 = _mm_madd_epi16(ss_2_2, thirdFilters);
- srcRegFilt32b2_2 = _mm_add_epi32(d1, d2);
-
- res_lo = _mm_unpacklo_epi32(srcRegFilt32b2_1, srcRegFilt32b2_2);
- res_hi = _mm_unpackhi_epi32(srcRegFilt32b2_1, srcRegFilt32b2_2);
- srcRegFilt32b2_1 = _mm_packs_epi32(res_lo, res_hi);
-
- // shift by 6 bit each 16 bit
- srcRegFilt32b1_1 = _mm_adds_epi16(srcRegFilt32b1_1, addFilterReg32);
- srcRegFilt32b2_1 = _mm_adds_epi16(srcRegFilt32b2_1, addFilterReg32);
- srcRegFilt32b1_1 = _mm_srai_epi16(srcRegFilt32b1_1, 6);
- srcRegFilt32b2_1 = _mm_srai_epi16(srcRegFilt32b2_1, 6);
-
- // shrink to 8 bit each 16 bits, the first lane contain the first
- // convolve result and the second lane contain the second convolve result
- srcRegFilt32b1_1 = _mm_packus_epi16(srcRegFilt32b1_1, srcRegFilt32b2_1);
-
- src_ptr += src_pixels_per_line;
-
- _mm_store_si128((__m128i *)output_ptr, srcRegFilt32b1_1);
-
- output_ptr += output_pitch;
- }
-}
-
-void aom_filter_block1d16_v4_sse2(const uint8_t *src_ptr, ptrdiff_t src_pitch,
- uint8_t *output_ptr, ptrdiff_t out_pitch,
- uint32_t output_height,
- const int16_t *filter) {
- __m128i filtersReg;
- __m128i srcReg2, srcReg3, srcReg4, srcReg5, srcReg6;
- __m128i srcReg23_lo, srcReg23_hi, srcReg34_lo, srcReg34_hi;
- __m128i srcReg45_lo, srcReg45_hi, srcReg56_lo, srcReg56_hi;
- __m128i resReg23_lo, resReg34_lo, resReg45_lo, resReg56_lo;
- __m128i resReg23_hi, resReg34_hi, resReg45_hi, resReg56_hi;
- __m128i resReg23_45_lo, resReg34_56_lo, resReg23_45_hi, resReg34_56_hi;
- __m128i resReg23_45, resReg34_56;
- __m128i addFilterReg32, secondFilters, thirdFilters;
- __m128i tmp_0, tmp_1;
- unsigned int i;
- ptrdiff_t src_stride, dst_stride;
-
- addFilterReg32 = _mm_set1_epi16(32);
- filtersReg = _mm_loadu_si128((const __m128i *)filter);
- filtersReg = _mm_srai_epi16(filtersReg, 1);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp0 = _mm_unpacklo_epi32(filtersReg, filtersReg);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp1 = _mm_unpackhi_epi32(filtersReg, filtersReg);
-
- secondFilters = _mm_unpackhi_epi64(tmp0, tmp0); // coeffs 2 3 2 3 2 3 2 3
- thirdFilters = _mm_unpacklo_epi64(tmp1, tmp1); // coeffs 4 5 4 5 4 5 4 5
-
- // multiply the size of the source and destination stride by two
- src_stride = src_pitch << 1;
- dst_stride = out_pitch << 1;
-
- srcReg2 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 2));
- srcReg3 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 3));
- srcReg23_lo = _mm_unpacklo_epi8(srcReg2, srcReg3);
- srcReg23_hi = _mm_unpackhi_epi8(srcReg2, srcReg3);
- __m128i resReg23_lo_1 = _mm_unpacklo_epi8(srcReg23_lo, _mm_setzero_si128());
- __m128i resReg23_lo_2 = _mm_unpackhi_epi8(srcReg23_lo, _mm_setzero_si128());
- __m128i resReg23_hi_1 = _mm_unpacklo_epi8(srcReg23_hi, _mm_setzero_si128());
- __m128i resReg23_hi_2 = _mm_unpackhi_epi8(srcReg23_hi, _mm_setzero_si128());
-
- srcReg4 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 4));
- srcReg34_lo = _mm_unpacklo_epi8(srcReg3, srcReg4);
- srcReg34_hi = _mm_unpackhi_epi8(srcReg3, srcReg4);
- __m128i resReg34_lo_1 = _mm_unpacklo_epi8(srcReg34_lo, _mm_setzero_si128());
- __m128i resReg34_lo_2 = _mm_unpackhi_epi8(srcReg34_lo, _mm_setzero_si128());
- __m128i resReg34_hi_1 = _mm_unpacklo_epi8(srcReg34_hi, _mm_setzero_si128());
- __m128i resReg34_hi_2 = _mm_unpackhi_epi8(srcReg34_hi, _mm_setzero_si128());
-
- for (i = output_height; i > 1; i -= 2) {
- srcReg5 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 5));
-
- srcReg45_lo = _mm_unpacklo_epi8(srcReg4, srcReg5);
- srcReg45_hi = _mm_unpackhi_epi8(srcReg4, srcReg5);
-
- srcReg6 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 6));
-
- srcReg56_lo = _mm_unpacklo_epi8(srcReg5, srcReg6);
- srcReg56_hi = _mm_unpackhi_epi8(srcReg5, srcReg6);
-
- // multiply 2 adjacent elements with the filter and add the result
-
- tmp_0 = _mm_madd_epi16(resReg23_lo_1, secondFilters);
- tmp_1 = _mm_madd_epi16(resReg23_lo_2, secondFilters);
- resReg23_lo = _mm_packs_epi32(tmp_0, tmp_1);
-
- tmp_0 = _mm_madd_epi16(resReg34_lo_1, secondFilters);
- tmp_1 = _mm_madd_epi16(resReg34_lo_2, secondFilters);
- resReg34_lo = _mm_packs_epi32(tmp_0, tmp_1);
-
- __m128i resReg45_lo_1 = _mm_unpacklo_epi8(srcReg45_lo, _mm_setzero_si128());
- __m128i resReg45_lo_2 = _mm_unpackhi_epi8(srcReg45_lo, _mm_setzero_si128());
- tmp_0 = _mm_madd_epi16(resReg45_lo_1, thirdFilters);
- tmp_1 = _mm_madd_epi16(resReg45_lo_2, thirdFilters);
- resReg45_lo = _mm_packs_epi32(tmp_0, tmp_1);
-
- __m128i resReg56_lo_1 = _mm_unpacklo_epi8(srcReg56_lo, _mm_setzero_si128());
- __m128i resReg56_lo_2 = _mm_unpackhi_epi8(srcReg56_lo, _mm_setzero_si128());
- tmp_0 = _mm_madd_epi16(resReg56_lo_1, thirdFilters);
- tmp_1 = _mm_madd_epi16(resReg56_lo_2, thirdFilters);
- resReg56_lo = _mm_packs_epi32(tmp_0, tmp_1);
-
- // add and saturate the results together
- resReg23_45_lo = _mm_adds_epi16(resReg23_lo, resReg45_lo);
- resReg34_56_lo = _mm_adds_epi16(resReg34_lo, resReg56_lo);
-
- // multiply 2 adjacent elements with the filter and add the result
-
- tmp_0 = _mm_madd_epi16(resReg23_hi_1, secondFilters);
- tmp_1 = _mm_madd_epi16(resReg23_hi_2, secondFilters);
- resReg23_hi = _mm_packs_epi32(tmp_0, tmp_1);
-
- tmp_0 = _mm_madd_epi16(resReg34_hi_1, secondFilters);
- tmp_1 = _mm_madd_epi16(resReg34_hi_2, secondFilters);
- resReg34_hi = _mm_packs_epi32(tmp_0, tmp_1);
-
- __m128i resReg45_hi_1 = _mm_unpacklo_epi8(srcReg45_hi, _mm_setzero_si128());
- __m128i resReg45_hi_2 = _mm_unpackhi_epi8(srcReg45_hi, _mm_setzero_si128());
- tmp_0 = _mm_madd_epi16(resReg45_hi_1, thirdFilters);
- tmp_1 = _mm_madd_epi16(resReg45_hi_2, thirdFilters);
- resReg45_hi = _mm_packs_epi32(tmp_0, tmp_1);
-
- __m128i resReg56_hi_1 = _mm_unpacklo_epi8(srcReg56_hi, _mm_setzero_si128());
- __m128i resReg56_hi_2 = _mm_unpackhi_epi8(srcReg56_hi, _mm_setzero_si128());
- tmp_0 = _mm_madd_epi16(resReg56_hi_1, thirdFilters);
- tmp_1 = _mm_madd_epi16(resReg56_hi_2, thirdFilters);
- resReg56_hi = _mm_packs_epi32(tmp_0, tmp_1);
-
- // add and saturate the results together
- resReg23_45_hi = _mm_adds_epi16(resReg23_hi, resReg45_hi);
- resReg34_56_hi = _mm_adds_epi16(resReg34_hi, resReg56_hi);
-
- // shift by 6 bit each 16 bit
- resReg23_45_lo = _mm_adds_epi16(resReg23_45_lo, addFilterReg32);
- resReg34_56_lo = _mm_adds_epi16(resReg34_56_lo, addFilterReg32);
- resReg23_45_hi = _mm_adds_epi16(resReg23_45_hi, addFilterReg32);
- resReg34_56_hi = _mm_adds_epi16(resReg34_56_hi, addFilterReg32);
- resReg23_45_lo = _mm_srai_epi16(resReg23_45_lo, 6);
- resReg34_56_lo = _mm_srai_epi16(resReg34_56_lo, 6);
- resReg23_45_hi = _mm_srai_epi16(resReg23_45_hi, 6);
- resReg34_56_hi = _mm_srai_epi16(resReg34_56_hi, 6);
-
- // shrink to 8 bit each 16 bits, the first lane contain the first
- // convolve result and the second lane contain the second convolve
- // result
- resReg23_45 = _mm_packus_epi16(resReg23_45_lo, resReg23_45_hi);
- resReg34_56 = _mm_packus_epi16(resReg34_56_lo, resReg34_56_hi);
-
- src_ptr += src_stride;
-
- _mm_store_si128((__m128i *)output_ptr, (resReg23_45));
- _mm_store_si128((__m128i *)(output_ptr + out_pitch), (resReg34_56));
-
- output_ptr += dst_stride;
-
- // save part of the registers for next strides
- resReg23_lo_1 = resReg45_lo_1;
- resReg23_lo_2 = resReg45_lo_2;
- resReg23_hi_1 = resReg45_hi_1;
- resReg23_hi_2 = resReg45_hi_2;
- resReg34_lo_1 = resReg56_lo_1;
- resReg34_lo_2 = resReg56_lo_2;
- resReg34_hi_1 = resReg56_hi_1;
- resReg34_hi_2 = resReg56_hi_2;
- srcReg4 = srcReg6;
- }
-}
-
-void aom_filter_block1d8_h4_sse2(const uint8_t *src_ptr,
- ptrdiff_t src_pixels_per_line,
- uint8_t *output_ptr, ptrdiff_t output_pitch,
- uint32_t output_height,
- const int16_t *filter) {
- __m128i filtersReg;
- __m128i addFilterReg32;
- __m128i secondFilters, thirdFilters;
- __m128i srcRegFilt32b1_1, srcRegFilt32b1_2;
- __m128i srcReg32b1;
- unsigned int i;
- src_ptr -= 3;
- addFilterReg32 = _mm_set1_epi16(32);
- filtersReg = _mm_loadu_si128((const __m128i *)filter);
- filtersReg = _mm_srai_epi16(filtersReg, 1);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp_0 = _mm_unpacklo_epi32(filtersReg, filtersReg);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp_1 = _mm_unpackhi_epi32(filtersReg, filtersReg);
-
- secondFilters = _mm_unpackhi_epi64(tmp_0, tmp_0); // coeffs 2 3 2 3 2 3 2 3
- thirdFilters = _mm_unpacklo_epi64(tmp_1, tmp_1); // coeffs 4 5 4 5 4 5 4 5
-
- for (i = output_height; i > 0; i -= 1) {
- srcReg32b1 = _mm_loadu_si128((const __m128i *)src_ptr);
-
- __m128i ss_2 = _mm_srli_si128(srcReg32b1, 2);
- __m128i ss_4 = _mm_srli_si128(srcReg32b1, 4);
- ss_2 = _mm_unpacklo_epi8(ss_2, _mm_setzero_si128());
- ss_4 = _mm_unpacklo_epi8(ss_4, _mm_setzero_si128());
- __m128i d1 = _mm_madd_epi16(ss_2, secondFilters);
- __m128i d2 = _mm_madd_epi16(ss_4, thirdFilters);
- srcRegFilt32b1_1 = _mm_add_epi32(d1, d2);
-
- __m128i ss_3 = _mm_srli_si128(srcReg32b1, 3);
- __m128i ss_5 = _mm_srli_si128(srcReg32b1, 5);
- ss_3 = _mm_unpacklo_epi8(ss_3, _mm_setzero_si128());
- ss_5 = _mm_unpacklo_epi8(ss_5, _mm_setzero_si128());
- d1 = _mm_madd_epi16(ss_3, secondFilters);
- d2 = _mm_madd_epi16(ss_5, thirdFilters);
- srcRegFilt32b1_2 = _mm_add_epi32(d1, d2);
-
- __m128i res_lo = _mm_unpacklo_epi32(srcRegFilt32b1_1, srcRegFilt32b1_2);
- __m128i res_hi = _mm_unpackhi_epi32(srcRegFilt32b1_1, srcRegFilt32b1_2);
- srcRegFilt32b1_1 = _mm_packs_epi32(res_lo, res_hi);
-
- // shift by 6 bit each 16 bit
- srcRegFilt32b1_1 = _mm_adds_epi16(srcRegFilt32b1_1, addFilterReg32);
- srcRegFilt32b1_1 = _mm_srai_epi16(srcRegFilt32b1_1, 6);
-
- // shrink to 8 bit each 16 bits, the first lane contain the first
- // convolve result and the second lane contain the second convolve result
- srcRegFilt32b1_1 = _mm_packus_epi16(srcRegFilt32b1_1, _mm_setzero_si128());
-
- src_ptr += src_pixels_per_line;
-
- _mm_storel_epi64((__m128i *)output_ptr, srcRegFilt32b1_1);
-
- output_ptr += output_pitch;
- }
-}
-
-void aom_filter_block1d8_v4_sse2(const uint8_t *src_ptr, ptrdiff_t src_pitch,
- uint8_t *output_ptr, ptrdiff_t out_pitch,
- uint32_t output_height,
- const int16_t *filter) {
- __m128i filtersReg;
- __m128i srcReg2, srcReg3, srcReg4, srcReg5, srcReg6;
- __m128i srcReg23_lo, srcReg34_lo;
- __m128i srcReg45_lo, srcReg56_lo;
- __m128i resReg23_lo, resReg34_lo, resReg45_lo, resReg56_lo;
- __m128i resReg23_45_lo, resReg34_56_lo;
- __m128i resReg23_45, resReg34_56;
- __m128i addFilterReg32, secondFilters, thirdFilters;
- __m128i tmp_0, tmp_1;
- unsigned int i;
- ptrdiff_t src_stride, dst_stride;
-
- addFilterReg32 = _mm_set1_epi16(32);
- filtersReg = _mm_loadu_si128((const __m128i *)filter);
- filtersReg = _mm_srai_epi16(filtersReg, 1);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp0 = _mm_unpacklo_epi32(filtersReg, filtersReg);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp1 = _mm_unpackhi_epi32(filtersReg, filtersReg);
-
- secondFilters = _mm_unpackhi_epi64(tmp0, tmp0); // coeffs 2 3 2 3 2 3 2 3
- thirdFilters = _mm_unpacklo_epi64(tmp1, tmp1); // coeffs 4 5 4 5 4 5 4 5
-
- // multiply the size of the source and destination stride by two
- src_stride = src_pitch << 1;
- dst_stride = out_pitch << 1;
-
- srcReg2 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 2));
- srcReg3 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 3));
- srcReg23_lo = _mm_unpacklo_epi8(srcReg2, srcReg3);
- __m128i resReg23_lo_1 = _mm_unpacklo_epi8(srcReg23_lo, _mm_setzero_si128());
- __m128i resReg23_lo_2 = _mm_unpackhi_epi8(srcReg23_lo, _mm_setzero_si128());
-
- srcReg4 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 4));
- srcReg34_lo = _mm_unpacklo_epi8(srcReg3, srcReg4);
- __m128i resReg34_lo_1 = _mm_unpacklo_epi8(srcReg34_lo, _mm_setzero_si128());
- __m128i resReg34_lo_2 = _mm_unpackhi_epi8(srcReg34_lo, _mm_setzero_si128());
-
- for (i = output_height; i > 1; i -= 2) {
- srcReg5 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 5));
- srcReg45_lo = _mm_unpacklo_epi8(srcReg4, srcReg5);
-
- srcReg6 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 6));
- srcReg56_lo = _mm_unpacklo_epi8(srcReg5, srcReg6);
-
- // multiply 2 adjacent elements with the filter and add the result
-
- tmp_0 = _mm_madd_epi16(resReg23_lo_1, secondFilters);
- tmp_1 = _mm_madd_epi16(resReg23_lo_2, secondFilters);
- resReg23_lo = _mm_packs_epi32(tmp_0, tmp_1);
-
- tmp_0 = _mm_madd_epi16(resReg34_lo_1, secondFilters);
- tmp_1 = _mm_madd_epi16(resReg34_lo_2, secondFilters);
- resReg34_lo = _mm_packs_epi32(tmp_0, tmp_1);
-
- __m128i resReg45_lo_1 = _mm_unpacklo_epi8(srcReg45_lo, _mm_setzero_si128());
- __m128i resReg45_lo_2 = _mm_unpackhi_epi8(srcReg45_lo, _mm_setzero_si128());
- tmp_0 = _mm_madd_epi16(resReg45_lo_1, thirdFilters);
- tmp_1 = _mm_madd_epi16(resReg45_lo_2, thirdFilters);
- resReg45_lo = _mm_packs_epi32(tmp_0, tmp_1);
-
- __m128i resReg56_lo_1 = _mm_unpacklo_epi8(srcReg56_lo, _mm_setzero_si128());
- __m128i resReg56_lo_2 = _mm_unpackhi_epi8(srcReg56_lo, _mm_setzero_si128());
- tmp_0 = _mm_madd_epi16(resReg56_lo_1, thirdFilters);
- tmp_1 = _mm_madd_epi16(resReg56_lo_2, thirdFilters);
- resReg56_lo = _mm_packs_epi32(tmp_0, tmp_1);
-
- // add and saturate the results together
- resReg23_45_lo = _mm_adds_epi16(resReg23_lo, resReg45_lo);
- resReg34_56_lo = _mm_adds_epi16(resReg34_lo, resReg56_lo);
-
- // shift by 6 bit each 16 bit
- resReg23_45_lo = _mm_adds_epi16(resReg23_45_lo, addFilterReg32);
- resReg34_56_lo = _mm_adds_epi16(resReg34_56_lo, addFilterReg32);
- resReg23_45_lo = _mm_srai_epi16(resReg23_45_lo, 6);
- resReg34_56_lo = _mm_srai_epi16(resReg34_56_lo, 6);
-
- // shrink to 8 bit each 16 bits, the first lane contain the first
- // convolve result and the second lane contain the second convolve
- // result
- resReg23_45 = _mm_packus_epi16(resReg23_45_lo, _mm_setzero_si128());
- resReg34_56 = _mm_packus_epi16(resReg34_56_lo, _mm_setzero_si128());
-
- src_ptr += src_stride;
-
- _mm_storel_epi64((__m128i *)output_ptr, (resReg23_45));
- _mm_storel_epi64((__m128i *)(output_ptr + out_pitch), (resReg34_56));
-
- output_ptr += dst_stride;
-
- // save part of the registers for next strides
- resReg23_lo_1 = resReg45_lo_1;
- resReg23_lo_2 = resReg45_lo_2;
- resReg34_lo_1 = resReg56_lo_1;
- resReg34_lo_2 = resReg56_lo_2;
- srcReg4 = srcReg6;
- }
-}
-
-void aom_filter_block1d4_h4_sse2(const uint8_t *src_ptr,
- ptrdiff_t src_pixels_per_line,
- uint8_t *output_ptr, ptrdiff_t output_pitch,
- uint32_t output_height,
- const int16_t *filter) {
- __m128i filtersReg;
- __m128i addFilterReg32;
- __m128i secondFilters, thirdFilters;
- __m128i srcRegFilt32b1_1;
- __m128i srcReg32b1;
- unsigned int i;
- src_ptr -= 3;
- addFilterReg32 = _mm_set1_epi16(32);
- filtersReg = _mm_loadu_si128((const __m128i *)filter);
- filtersReg = _mm_srai_epi16(filtersReg, 1);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp_0 = _mm_unpacklo_epi32(filtersReg, filtersReg);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp_1 = _mm_unpackhi_epi32(filtersReg, filtersReg);
-
- secondFilters = _mm_unpackhi_epi64(tmp_0, tmp_0); // coeffs 2 3 2 3 2 3 2 3
- thirdFilters = _mm_unpacklo_epi64(tmp_1, tmp_1); // coeffs 4 5 4 5 4 5 4 5
-
- for (i = output_height; i > 0; i -= 1) {
- srcReg32b1 = _mm_loadu_si128((const __m128i *)src_ptr);
-
- __m128i ss_2 = _mm_srli_si128(srcReg32b1, 2);
- __m128i ss_3 = _mm_srli_si128(srcReg32b1, 3);
- __m128i ss_4 = _mm_srli_si128(srcReg32b1, 4);
- __m128i ss_5 = _mm_srli_si128(srcReg32b1, 5);
-
- ss_2 = _mm_unpacklo_epi8(ss_2, _mm_setzero_si128());
- ss_3 = _mm_unpacklo_epi8(ss_3, _mm_setzero_si128());
- ss_4 = _mm_unpacklo_epi8(ss_4, _mm_setzero_si128());
- ss_5 = _mm_unpacklo_epi8(ss_5, _mm_setzero_si128());
-
- __m128i ss_1_1 = _mm_unpacklo_epi32(ss_2, ss_3);
- __m128i ss_1_2 = _mm_unpacklo_epi32(ss_4, ss_5);
-
- __m128i d1 = _mm_madd_epi16(ss_1_1, secondFilters);
- __m128i d2 = _mm_madd_epi16(ss_1_2, thirdFilters);
- srcRegFilt32b1_1 = _mm_add_epi32(d1, d2);
-
- srcRegFilt32b1_1 = _mm_packs_epi32(srcRegFilt32b1_1, _mm_setzero_si128());
-
- // shift by 6 bit each 16 bit
- srcRegFilt32b1_1 = _mm_adds_epi16(srcRegFilt32b1_1, addFilterReg32);
- srcRegFilt32b1_1 = _mm_srai_epi16(srcRegFilt32b1_1, 6);
-
- // shrink to 8 bit each 16 bits, the first lane contain the first
- // convolve result and the second lane contain the second convolve result
- srcRegFilt32b1_1 = _mm_packus_epi16(srcRegFilt32b1_1, _mm_setzero_si128());
-
- src_ptr += src_pixels_per_line;
-
- *((int *)(output_ptr)) = _mm_cvtsi128_si32(srcRegFilt32b1_1);
-
- output_ptr += output_pitch;
- }
-}
-
-void aom_filter_block1d4_v4_sse2(const uint8_t *src_ptr, ptrdiff_t src_pitch,
- uint8_t *output_ptr, ptrdiff_t out_pitch,
- uint32_t output_height,
- const int16_t *filter) {
- __m128i filtersReg;
- __m128i srcReg2, srcReg3, srcReg4, srcReg5, srcReg6;
- __m128i srcReg23, srcReg34, srcReg45, srcReg56;
- __m128i resReg23_34, resReg45_56;
- __m128i resReg23_34_45_56;
- __m128i addFilterReg32, secondFilters, thirdFilters;
- __m128i tmp_0, tmp_1;
- unsigned int i;
- ptrdiff_t src_stride, dst_stride;
-
- addFilterReg32 = _mm_set1_epi16(32);
- filtersReg = _mm_loadu_si128((const __m128i *)filter);
- filtersReg = _mm_srai_epi16(filtersReg, 1);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp0 = _mm_unpacklo_epi32(filtersReg, filtersReg);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp1 = _mm_unpackhi_epi32(filtersReg, filtersReg);
-
- secondFilters = _mm_unpackhi_epi64(tmp0, tmp0); // coeffs 2 3 2 3 2 3 2 3
- thirdFilters = _mm_unpacklo_epi64(tmp1, tmp1); // coeffs 4 5 4 5 4 5 4 5
-
- // multiply the size of the source and destination stride by two
- src_stride = src_pitch << 1;
- dst_stride = out_pitch << 1;
-
- srcReg2 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 2));
- srcReg3 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 3));
- srcReg23 = _mm_unpacklo_epi8(srcReg2, srcReg3);
- __m128i resReg23 = _mm_unpacklo_epi8(srcReg23, _mm_setzero_si128());
-
- srcReg4 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 4));
- srcReg34 = _mm_unpacklo_epi8(srcReg3, srcReg4);
- __m128i resReg34 = _mm_unpacklo_epi8(srcReg34, _mm_setzero_si128());
-
- for (i = output_height; i > 1; i -= 2) {
- srcReg5 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 5));
- srcReg45 = _mm_unpacklo_epi8(srcReg4, srcReg5);
- srcReg6 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 6));
- srcReg56 = _mm_unpacklo_epi8(srcReg5, srcReg6);
-
- // multiply 2 adjacent elements with the filter and add the result
- tmp_0 = _mm_madd_epi16(resReg23, secondFilters);
- tmp_1 = _mm_madd_epi16(resReg34, secondFilters);
- resReg23_34 = _mm_packs_epi32(tmp_0, tmp_1);
-
- __m128i resReg45 = _mm_unpacklo_epi8(srcReg45, _mm_setzero_si128());
- __m128i resReg56 = _mm_unpacklo_epi8(srcReg56, _mm_setzero_si128());
-
- tmp_0 = _mm_madd_epi16(resReg45, thirdFilters);
- tmp_1 = _mm_madd_epi16(resReg56, thirdFilters);
- resReg45_56 = _mm_packs_epi32(tmp_0, tmp_1);
-
- // add and saturate the results together
- resReg23_34_45_56 = _mm_adds_epi16(resReg23_34, resReg45_56);
-
- // shift by 6 bit each 16 bit
- resReg23_34_45_56 = _mm_adds_epi16(resReg23_34_45_56, addFilterReg32);
- resReg23_34_45_56 = _mm_srai_epi16(resReg23_34_45_56, 6);
-
- // shrink to 8 bit each 16 bits, the first lane contain the first
- // convolve result and the second lane contain the second convolve
- // result
- resReg23_34_45_56 =
- _mm_packus_epi16(resReg23_34_45_56, _mm_setzero_si128());
-
- src_ptr += src_stride;
-
- *((int *)(output_ptr)) = _mm_cvtsi128_si32(resReg23_34_45_56);
- *((int *)(output_ptr + out_pitch)) =
- _mm_cvtsi128_si32(_mm_srli_si128(resReg23_34_45_56, 4));
-
- output_ptr += dst_stride;
-
- // save part of the registers for next strides
- resReg23 = resReg45;
- resReg34 = resReg56;
- srcReg4 = srcReg6;
- }
-}
diff --git a/third_party/aom/aom_dsp/x86/aom_subpixel_8t_sse2.asm b/third_party/aom/aom_dsp/x86/aom_subpixel_8t_sse2.asm
deleted file mode 100644
index 640c5b2416..0000000000
--- a/third_party/aom/aom_dsp/x86/aom_subpixel_8t_sse2.asm
+++ /dev/null
@@ -1,615 +0,0 @@
-;
-; Copyright (c) 2016, Alliance for Open Media. All rights reserved
-;
-; This source code is subject to the terms of the BSD 2 Clause License and
-; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
-; was not distributed with this source code in the LICENSE file, you can
-; obtain it at www.aomedia.org/license/software. If the Alliance for Open
-; Media Patent License 1.0 was not distributed with this source code in the
-; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
-;
-
-;
-
-
-%include "aom_ports/x86_abi_support.asm"
-
-;Note: tap3 and tap4 have to be applied and added after other taps to avoid
-;overflow.
-
-%macro GET_FILTERS_4 0
- mov rdx, arg(5) ;filter ptr
- mov rcx, 0x0400040
-
- movdqa xmm7, [rdx] ;load filters
- pshuflw xmm0, xmm7, 0b ;k0
- pshuflw xmm1, xmm7, 01010101b ;k1
- pshuflw xmm2, xmm7, 10101010b ;k2
- pshuflw xmm3, xmm7, 11111111b ;k3
- psrldq xmm7, 8
- pshuflw xmm4, xmm7, 0b ;k4
- pshuflw xmm5, xmm7, 01010101b ;k5
- pshuflw xmm6, xmm7, 10101010b ;k6
- pshuflw xmm7, xmm7, 11111111b ;k7
-
- punpcklqdq xmm0, xmm1
- punpcklqdq xmm2, xmm3
- punpcklqdq xmm5, xmm4
- punpcklqdq xmm6, xmm7
-
- movdqa k0k1, xmm0
- movdqa k2k3, xmm2
- movdqa k5k4, xmm5
- movdqa k6k7, xmm6
-
- movq xmm6, rcx
- pshufd xmm6, xmm6, 0
- movdqa krd, xmm6
-
- pxor xmm7, xmm7
- movdqa zero, xmm7
-%endm
-
-%macro APPLY_FILTER_4 1
- punpckldq xmm0, xmm1 ;two row in one register
- punpckldq xmm6, xmm7
- punpckldq xmm2, xmm3
- punpckldq xmm5, xmm4
-
- punpcklbw xmm0, zero ;unpack to word
- punpcklbw xmm6, zero
- punpcklbw xmm2, zero
- punpcklbw xmm5, zero
-
- pmullw xmm0, k0k1 ;multiply the filter factors
- pmullw xmm6, k6k7
- pmullw xmm2, k2k3
- pmullw xmm5, k5k4
-
- paddsw xmm0, xmm6 ;sum
- movdqa xmm1, xmm0
- psrldq xmm1, 8
- paddsw xmm0, xmm1
- paddsw xmm0, xmm2
- psrldq xmm2, 8
- paddsw xmm0, xmm5
- psrldq xmm5, 8
- paddsw xmm0, xmm2
- paddsw xmm0, xmm5
-
- paddsw xmm0, krd ;rounding
- psraw xmm0, 7 ;shift
- packuswb xmm0, xmm0 ;pack to byte
-
-%if %1
- movd xmm1, [rdi]
- pavgb xmm0, xmm1
-%endif
- movd [rdi], xmm0
-%endm
-
-%macro GET_FILTERS 0
- mov rdx, arg(5) ;filter ptr
- mov rsi, arg(0) ;src_ptr
- mov rdi, arg(2) ;output_ptr
- mov rcx, 0x0400040
-
- movdqa xmm7, [rdx] ;load filters
- pshuflw xmm0, xmm7, 0b ;k0
- pshuflw xmm1, xmm7, 01010101b ;k1
- pshuflw xmm2, xmm7, 10101010b ;k2
- pshuflw xmm3, xmm7, 11111111b ;k3
- pshufhw xmm4, xmm7, 0b ;k4
- pshufhw xmm5, xmm7, 01010101b ;k5
- pshufhw xmm6, xmm7, 10101010b ;k6
- pshufhw xmm7, xmm7, 11111111b ;k7
-
- punpcklwd xmm0, xmm0
- punpcklwd xmm1, xmm1
- punpcklwd xmm2, xmm2
- punpcklwd xmm3, xmm3
- punpckhwd xmm4, xmm4
- punpckhwd xmm5, xmm5
- punpckhwd xmm6, xmm6
- punpckhwd xmm7, xmm7
-
- movdqa k0, xmm0 ;store filter factors on stack
- movdqa k1, xmm1
- movdqa k2, xmm2
- movdqa k3, xmm3
- movdqa k4, xmm4
- movdqa k5, xmm5
- movdqa k6, xmm6
- movdqa k7, xmm7
-
- movq xmm6, rcx
- pshufd xmm6, xmm6, 0
- movdqa krd, xmm6 ;rounding
-
- pxor xmm7, xmm7
- movdqa zero, xmm7
-%endm
-
-%macro LOAD_VERT_8 1
- movq xmm0, [rsi + %1] ;0
- movq xmm1, [rsi + rax + %1] ;1
- movq xmm6, [rsi + rdx * 2 + %1] ;6
- lea rsi, [rsi + rax]
- movq xmm7, [rsi + rdx * 2 + %1] ;7
- movq xmm2, [rsi + rax + %1] ;2
- movq xmm3, [rsi + rax * 2 + %1] ;3
- movq xmm4, [rsi + rdx + %1] ;4
- movq xmm5, [rsi + rax * 4 + %1] ;5
-%endm
-
-%macro APPLY_FILTER_8 2
- punpcklbw xmm0, zero
- punpcklbw xmm1, zero
- punpcklbw xmm6, zero
- punpcklbw xmm7, zero
- punpcklbw xmm2, zero
- punpcklbw xmm5, zero
- punpcklbw xmm3, zero
- punpcklbw xmm4, zero
-
- pmullw xmm0, k0
- pmullw xmm1, k1
- pmullw xmm6, k6
- pmullw xmm7, k7
- pmullw xmm2, k2
- pmullw xmm5, k5
- pmullw xmm3, k3
- pmullw xmm4, k4
-
- paddsw xmm0, xmm1
- paddsw xmm0, xmm6
- paddsw xmm0, xmm7
- paddsw xmm0, xmm2
- paddsw xmm0, xmm5
- paddsw xmm0, xmm3
- paddsw xmm0, xmm4
-
- paddsw xmm0, krd ;rounding
- psraw xmm0, 7 ;shift
- packuswb xmm0, xmm0 ;pack back to byte
-%if %1
- movq xmm1, [rdi + %2]
- pavgb xmm0, xmm1
-%endif
- movq [rdi + %2], xmm0
-%endm
-
-SECTION .text
-
-;void aom_filter_block1d4_v8_sse2
-;(
-; unsigned char *src_ptr,
-; unsigned int src_pitch,
-; unsigned char *output_ptr,
-; unsigned int out_pitch,
-; unsigned int output_height,
-; short *filter
-;)
-globalsym(aom_filter_block1d4_v8_sse2)
-sym(aom_filter_block1d4_v8_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- push rbx
- ; end prolog
-
- ALIGN_STACK 16, rax
- sub rsp, 16 * 6
- %define k0k1 [rsp + 16 * 0]
- %define k2k3 [rsp + 16 * 1]
- %define k5k4 [rsp + 16 * 2]
- %define k6k7 [rsp + 16 * 3]
- %define krd [rsp + 16 * 4]
- %define zero [rsp + 16 * 5]
-
- GET_FILTERS_4
-
- mov rsi, arg(0) ;src_ptr
- mov rdi, arg(2) ;output_ptr
-
- movsxd rax, DWORD PTR arg(1) ;pixels_per_line
- movsxd rbx, DWORD PTR arg(3) ;out_pitch
- lea rdx, [rax + rax * 2]
- movsxd rcx, DWORD PTR arg(4) ;output_height
-
-.loop:
- movd xmm0, [rsi] ;load src: row 0
- movd xmm1, [rsi + rax] ;1
- movd xmm6, [rsi + rdx * 2] ;6
- lea rsi, [rsi + rax]
- movd xmm7, [rsi + rdx * 2] ;7
- movd xmm2, [rsi + rax] ;2
- movd xmm3, [rsi + rax * 2] ;3
- movd xmm4, [rsi + rdx] ;4
- movd xmm5, [rsi + rax * 4] ;5
-
- APPLY_FILTER_4 0
-
- lea rdi, [rdi + rbx]
- dec rcx
- jnz .loop
-
- add rsp, 16 * 6
- pop rsp
- pop rbx
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-;void aom_filter_block1d8_v8_sse2
-;(
-; unsigned char *src_ptr,
-; unsigned int src_pitch,
-; unsigned char *output_ptr,
-; unsigned int out_pitch,
-; unsigned int output_height,
-; short *filter
-;)
-globalsym(aom_filter_block1d8_v8_sse2)
-sym(aom_filter_block1d8_v8_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- push rbx
- ; end prolog
-
- ALIGN_STACK 16, rax
- sub rsp, 16 * 10
- %define k0 [rsp + 16 * 0]
- %define k1 [rsp + 16 * 1]
- %define k2 [rsp + 16 * 2]
- %define k3 [rsp + 16 * 3]
- %define k4 [rsp + 16 * 4]
- %define k5 [rsp + 16 * 5]
- %define k6 [rsp + 16 * 6]
- %define k7 [rsp + 16 * 7]
- %define krd [rsp + 16 * 8]
- %define zero [rsp + 16 * 9]
-
- GET_FILTERS
-
- movsxd rax, DWORD PTR arg(1) ;pixels_per_line
- movsxd rbx, DWORD PTR arg(3) ;out_pitch
- lea rdx, [rax + rax * 2]
- movsxd rcx, DWORD PTR arg(4) ;output_height
-
-.loop:
- LOAD_VERT_8 0
- APPLY_FILTER_8 0, 0
-
- lea rdi, [rdi + rbx]
- dec rcx
- jnz .loop
-
- add rsp, 16 * 10
- pop rsp
- pop rbx
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-;void aom_filter_block1d16_v8_sse2
-;(
-; unsigned char *src_ptr,
-; unsigned int src_pitch,
-; unsigned char *output_ptr,
-; unsigned int out_pitch,
-; unsigned int output_height,
-; short *filter
-;)
-globalsym(aom_filter_block1d16_v8_sse2)
-sym(aom_filter_block1d16_v8_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- push rbx
- ; end prolog
-
- ALIGN_STACK 16, rax
- sub rsp, 16 * 10
- %define k0 [rsp + 16 * 0]
- %define k1 [rsp + 16 * 1]
- %define k2 [rsp + 16 * 2]
- %define k3 [rsp + 16 * 3]
- %define k4 [rsp + 16 * 4]
- %define k5 [rsp + 16 * 5]
- %define k6 [rsp + 16 * 6]
- %define k7 [rsp + 16 * 7]
- %define krd [rsp + 16 * 8]
- %define zero [rsp + 16 * 9]
-
- GET_FILTERS
-
- movsxd rax, DWORD PTR arg(1) ;pixels_per_line
- movsxd rbx, DWORD PTR arg(3) ;out_pitch
- lea rdx, [rax + rax * 2]
- movsxd rcx, DWORD PTR arg(4) ;output_height
-
-.loop:
- LOAD_VERT_8 0
- APPLY_FILTER_8 0, 0
- sub rsi, rax
-
- LOAD_VERT_8 8
- APPLY_FILTER_8 0, 8
- add rdi, rbx
-
- dec rcx
- jnz .loop
-
- add rsp, 16 * 10
- pop rsp
- pop rbx
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-;void aom_filter_block1d4_h8_sse2
-;(
-; unsigned char *src_ptr,
-; unsigned int src_pixels_per_line,
-; unsigned char *output_ptr,
-; unsigned int output_pitch,
-; unsigned int output_height,
-; short *filter
-;)
-globalsym(aom_filter_block1d4_h8_sse2)
-sym(aom_filter_block1d4_h8_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- ; end prolog
-
- ALIGN_STACK 16, rax
- sub rsp, 16 * 6
- %define k0k1 [rsp + 16 * 0]
- %define k2k3 [rsp + 16 * 1]
- %define k5k4 [rsp + 16 * 2]
- %define k6k7 [rsp + 16 * 3]
- %define krd [rsp + 16 * 4]
- %define zero [rsp + 16 * 5]
-
- GET_FILTERS_4
-
- mov rsi, arg(0) ;src_ptr
- mov rdi, arg(2) ;output_ptr
-
- movsxd rax, DWORD PTR arg(1) ;pixels_per_line
- movsxd rdx, DWORD PTR arg(3) ;out_pitch
- movsxd rcx, DWORD PTR arg(4) ;output_height
-
-.loop:
- movdqu xmm0, [rsi - 3] ;load src
-
- movdqa xmm1, xmm0
- movdqa xmm6, xmm0
- movdqa xmm7, xmm0
- movdqa xmm2, xmm0
- movdqa xmm3, xmm0
- movdqa xmm5, xmm0
- movdqa xmm4, xmm0
-
- psrldq xmm1, 1
- psrldq xmm6, 6
- psrldq xmm7, 7
- psrldq xmm2, 2
- psrldq xmm3, 3
- psrldq xmm5, 5
- psrldq xmm4, 4
-
- APPLY_FILTER_4 0
-
- lea rsi, [rsi + rax]
- lea rdi, [rdi + rdx]
- dec rcx
- jnz .loop
-
- add rsp, 16 * 6
- pop rsp
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-;void aom_filter_block1d8_h8_sse2
-;(
-; unsigned char *src_ptr,
-; unsigned int src_pixels_per_line,
-; unsigned char *output_ptr,
-; unsigned int output_pitch,
-; unsigned int output_height,
-; short *filter
-;)
-globalsym(aom_filter_block1d8_h8_sse2)
-sym(aom_filter_block1d8_h8_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- ; end prolog
-
- ALIGN_STACK 16, rax
- sub rsp, 16 * 10
- %define k0 [rsp + 16 * 0]
- %define k1 [rsp + 16 * 1]
- %define k2 [rsp + 16 * 2]
- %define k3 [rsp + 16 * 3]
- %define k4 [rsp + 16 * 4]
- %define k5 [rsp + 16 * 5]
- %define k6 [rsp + 16 * 6]
- %define k7 [rsp + 16 * 7]
- %define krd [rsp + 16 * 8]
- %define zero [rsp + 16 * 9]
-
- GET_FILTERS
-
- movsxd rax, DWORD PTR arg(1) ;pixels_per_line
- movsxd rdx, DWORD PTR arg(3) ;out_pitch
- movsxd rcx, DWORD PTR arg(4) ;output_height
-
-.loop:
- movdqu xmm0, [rsi - 3] ;load src
-
- movdqa xmm1, xmm0
- movdqa xmm6, xmm0
- movdqa xmm7, xmm0
- movdqa xmm2, xmm0
- movdqa xmm5, xmm0
- movdqa xmm3, xmm0
- movdqa xmm4, xmm0
-
- psrldq xmm1, 1
- psrldq xmm6, 6
- psrldq xmm7, 7
- psrldq xmm2, 2
- psrldq xmm5, 5
- psrldq xmm3, 3
- psrldq xmm4, 4
-
- APPLY_FILTER_8 0, 0
-
- lea rsi, [rsi + rax]
- lea rdi, [rdi + rdx]
- dec rcx
- jnz .loop
-
- add rsp, 16 * 10
- pop rsp
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-;void aom_filter_block1d16_h8_sse2
-;(
-; unsigned char *src_ptr,
-; unsigned int src_pixels_per_line,
-; unsigned char *output_ptr,
-; unsigned int output_pitch,
-; unsigned int output_height,
-; short *filter
-;)
-globalsym(aom_filter_block1d16_h8_sse2)
-sym(aom_filter_block1d16_h8_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- ; end prolog
-
- ALIGN_STACK 16, rax
- sub rsp, 16 * 10
- %define k0 [rsp + 16 * 0]
- %define k1 [rsp + 16 * 1]
- %define k2 [rsp + 16 * 2]
- %define k3 [rsp + 16 * 3]
- %define k4 [rsp + 16 * 4]
- %define k5 [rsp + 16 * 5]
- %define k6 [rsp + 16 * 6]
- %define k7 [rsp + 16 * 7]
- %define krd [rsp + 16 * 8]
- %define zero [rsp + 16 * 9]
-
- GET_FILTERS
-
- movsxd rax, DWORD PTR arg(1) ;pixels_per_line
- movsxd rdx, DWORD PTR arg(3) ;out_pitch
- movsxd rcx, DWORD PTR arg(4) ;output_height
-
-.loop:
- movdqu xmm0, [rsi - 3] ;load src
-
- movdqa xmm1, xmm0
- movdqa xmm6, xmm0
- movdqa xmm7, xmm0
- movdqa xmm2, xmm0
- movdqa xmm5, xmm0
- movdqa xmm3, xmm0
- movdqa xmm4, xmm0
-
- psrldq xmm1, 1
- psrldq xmm6, 6
- psrldq xmm7, 7
- psrldq xmm2, 2
- psrldq xmm5, 5
- psrldq xmm3, 3
- psrldq xmm4, 4
-
- APPLY_FILTER_8 0, 0
-
- movdqu xmm0, [rsi + 5] ;load src
-
- movdqa xmm1, xmm0
- movdqa xmm6, xmm0
- movdqa xmm7, xmm0
- movdqa xmm2, xmm0
- movdqa xmm5, xmm0
- movdqa xmm3, xmm0
- movdqa xmm4, xmm0
-
- psrldq xmm1, 1
- psrldq xmm6, 6
- psrldq xmm7, 7
- psrldq xmm2, 2
- psrldq xmm5, 5
- psrldq xmm3, 3
- psrldq xmm4, 4
-
- APPLY_FILTER_8 0, 8
-
- lea rsi, [rsi + rax]
- lea rdi, [rdi + rdx]
- dec rcx
- jnz .loop
-
- add rsp, 16 * 10
- pop rsp
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
diff --git a/third_party/aom/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm b/third_party/aom/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
deleted file mode 100644
index 90dd55a4be..0000000000
--- a/third_party/aom/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
+++ /dev/null
@@ -1,295 +0,0 @@
-;
-; Copyright (c) 2016, Alliance for Open Media. All rights reserved
-;
-; This source code is subject to the terms of the BSD 2 Clause License and
-; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
-; was not distributed with this source code in the LICENSE file, you can
-; obtain it at www.aomedia.org/license/software. If the Alliance for Open
-; Media Patent License 1.0 was not distributed with this source code in the
-; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
-;
-
-;
-
-%include "aom_ports/x86_abi_support.asm"
-
-%macro GET_PARAM_4 0
- mov rdx, arg(5) ;filter ptr
- mov rsi, arg(0) ;src_ptr
- mov rdi, arg(2) ;output_ptr
- mov rcx, 0x0400040
-
- movdqa xmm3, [rdx] ;load filters
- pshuflw xmm4, xmm3, 11111111b ;k3
- psrldq xmm3, 8
- pshuflw xmm3, xmm3, 0b ;k4
- punpcklqdq xmm4, xmm3 ;k3k4
-
- movq xmm3, rcx ;rounding
- pshufd xmm3, xmm3, 0
-
- pxor xmm2, xmm2
-
- movsxd rax, DWORD PTR arg(1) ;pixels_per_line
- movsxd rdx, DWORD PTR arg(3) ;out_pitch
- movsxd rcx, DWORD PTR arg(4) ;output_height
-%endm
-
-%macro APPLY_FILTER_4 1
-
- punpckldq xmm0, xmm1 ;two row in one register
- punpcklbw xmm0, xmm2 ;unpack to word
- pmullw xmm0, xmm4 ;multiply the filter factors
-
- movdqa xmm1, xmm0
- psrldq xmm1, 8
- paddsw xmm0, xmm1
-
- paddsw xmm0, xmm3 ;rounding
- psraw xmm0, 7 ;shift
- packuswb xmm0, xmm0 ;pack to byte
-
-%if %1
- movd xmm1, [rdi]
- pavgb xmm0, xmm1
-%endif
-
- movd [rdi], xmm0
- lea rsi, [rsi + rax]
- lea rdi, [rdi + rdx]
- dec rcx
-%endm
-
-%macro GET_PARAM 0
- mov rdx, arg(5) ;filter ptr
- mov rsi, arg(0) ;src_ptr
- mov rdi, arg(2) ;output_ptr
- mov rcx, 0x0400040
-
- movdqa xmm7, [rdx] ;load filters
-
- pshuflw xmm6, xmm7, 11111111b ;k3
- pshufhw xmm7, xmm7, 0b ;k4
- punpcklwd xmm6, xmm6
- punpckhwd xmm7, xmm7
-
- movq xmm4, rcx ;rounding
- pshufd xmm4, xmm4, 0
-
- pxor xmm5, xmm5
-
- movsxd rax, DWORD PTR arg(1) ;pixels_per_line
- movsxd rdx, DWORD PTR arg(3) ;out_pitch
- movsxd rcx, DWORD PTR arg(4) ;output_height
-%endm
-
-%macro APPLY_FILTER_8 1
- punpcklbw xmm0, xmm5
- punpcklbw xmm1, xmm5
-
- pmullw xmm0, xmm6
- pmullw xmm1, xmm7
- paddsw xmm0, xmm1
- paddsw xmm0, xmm4 ;rounding
- psraw xmm0, 7 ;shift
- packuswb xmm0, xmm0 ;pack back to byte
-%if %1
- movq xmm1, [rdi]
- pavgb xmm0, xmm1
-%endif
- movq [rdi], xmm0 ;store the result
-
- lea rsi, [rsi + rax]
- lea rdi, [rdi + rdx]
- dec rcx
-%endm
-
-%macro APPLY_FILTER_16 1
- punpcklbw xmm0, xmm5
- punpcklbw xmm1, xmm5
- punpckhbw xmm2, xmm5
- punpckhbw xmm3, xmm5
-
- pmullw xmm0, xmm6
- pmullw xmm1, xmm7
- pmullw xmm2, xmm6
- pmullw xmm3, xmm7
-
- paddsw xmm0, xmm1
- paddsw xmm2, xmm3
-
- paddsw xmm0, xmm4 ;rounding
- paddsw xmm2, xmm4
- psraw xmm0, 7 ;shift
- psraw xmm2, 7
- packuswb xmm0, xmm2 ;pack back to byte
-%if %1
- movdqu xmm1, [rdi]
- pavgb xmm0, xmm1
-%endif
- movdqu [rdi], xmm0 ;store the result
-
- lea rsi, [rsi + rax]
- lea rdi, [rdi + rdx]
- dec rcx
-%endm
-
-SECTION .text
-
-globalsym(aom_filter_block1d4_v2_sse2)
-sym(aom_filter_block1d4_v2_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- push rsi
- push rdi
- ; end prolog
-
- GET_PARAM_4
-.loop:
- movd xmm0, [rsi] ;load src
- movd xmm1, [rsi + rax]
-
- APPLY_FILTER_4 0
- jnz .loop
-
- ; begin epilog
- pop rdi
- pop rsi
- UNSHADOW_ARGS
- pop rbp
- ret
-
-globalsym(aom_filter_block1d8_v2_sse2)
-sym(aom_filter_block1d8_v2_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- ; end prolog
-
- GET_PARAM
-.loop:
- movq xmm0, [rsi] ;0
- movq xmm1, [rsi + rax] ;1
-
- APPLY_FILTER_8 0
- jnz .loop
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-globalsym(aom_filter_block1d16_v2_sse2)
-sym(aom_filter_block1d16_v2_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- ; end prolog
-
- GET_PARAM
-.loop:
- movdqu xmm0, [rsi] ;0
- movdqu xmm1, [rsi + rax] ;1
- movdqa xmm2, xmm0
- movdqa xmm3, xmm1
-
- APPLY_FILTER_16 0
- jnz .loop
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-globalsym(aom_filter_block1d4_h2_sse2)
-sym(aom_filter_block1d4_h2_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- push rsi
- push rdi
- ; end prolog
-
- GET_PARAM_4
-.loop:
- movdqu xmm0, [rsi] ;load src
- movdqa xmm1, xmm0
- psrldq xmm1, 1
-
- APPLY_FILTER_4 0
- jnz .loop
-
- ; begin epilog
- pop rdi
- pop rsi
- UNSHADOW_ARGS
- pop rbp
- ret
-
-globalsym(aom_filter_block1d8_h2_sse2)
-sym(aom_filter_block1d8_h2_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- ; end prolog
-
- GET_PARAM
-.loop:
- movdqu xmm0, [rsi] ;load src
- movdqa xmm1, xmm0
- psrldq xmm1, 1
-
- APPLY_FILTER_8 0
- jnz .loop
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-globalsym(aom_filter_block1d16_h2_sse2)
-sym(aom_filter_block1d16_h2_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- push rsi
- push rdi
- ; end prolog
-
- GET_PARAM
-.loop:
- movdqu xmm0, [rsi] ;load src
- movdqu xmm1, [rsi + 1]
- movdqa xmm2, xmm0
- movdqa xmm3, xmm1
-
- APPLY_FILTER_16 0
- jnz .loop
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
diff --git a/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c b/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c
index 9ab9143eee..0b552b704b 100644
--- a/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c
+++ b/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c
@@ -133,7 +133,7 @@ unsigned int aom_avg_8x8_sse2(const uint8_t *s, int p) {
return (avg + 32) >> 6;
}
-void calc_avg_8x8_dual_sse2(const uint8_t *s, int p, int *avg) {
+static void calc_avg_8x8_dual_sse2(const uint8_t *s, int p, int *avg) {
__m128i sum0, sum1, s0, s1, s2, s3, u0;
u0 = _mm_setzero_si128();
s0 = _mm_sad_epu8(_mm_loadu_si128((const __m128i *)(s)), u0);
diff --git a/third_party/aom/aom_dsp/x86/fwd_txfm_impl_sse2.h b/third_party/aom/aom_dsp/x86/fwd_txfm_impl_sse2.h
index 7ee8ba330e..e1db3b950c 100644
--- a/third_party/aom/aom_dsp/x86/fwd_txfm_impl_sse2.h
+++ b/third_party/aom/aom_dsp/x86/fwd_txfm_impl_sse2.h
@@ -30,6 +30,7 @@
#define SUB_EPI16 _mm_sub_epi16
#endif
+#if defined(FDCT4x4_2D_HELPER)
static void FDCT4x4_2D_HELPER(const int16_t *input, int stride, __m128i *in0,
__m128i *in1) {
// Constants
@@ -185,7 +186,9 @@ static void FDCT4x4_2D_HELPER(const int16_t *input, int stride, __m128i *in0,
}
}
}
+#endif // defined(FDCT4x4_2D_HELPER)
+#if defined(FDCT4x4_2D)
void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
// This 2D transform implements 4 vertical 1D transforms followed
// by 4 horizontal 1D transforms. The multiplies and adds are as given
@@ -205,13 +208,16 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
storeu_output(&in0, output + 0 * 4);
storeu_output(&in1, output + 2 * 4);
}
+#endif // defined(FDCT4x4_2D)
+#if defined(FDCT4x4_2D_LP)
void FDCT4x4_2D_LP(const int16_t *input, int16_t *output, int stride) {
__m128i in0, in1;
FDCT4x4_2D_HELPER(input, stride, &in0, &in1);
_mm_storeu_si128((__m128i *)(output + 0 * 4), in0);
_mm_storeu_si128((__m128i *)(output + 2 * 4), in1);
}
+#endif // defined(FDCT4x4_2D_LP)
#if CONFIG_INTERNAL_STATS
void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) {
diff --git a/third_party/aom/aom_dsp/x86/highbd_variance_avx2.c b/third_party/aom/aom_dsp/x86/highbd_variance_avx2.c
index b4ff91d856..21e9e8b282 100644
--- a/third_party/aom/aom_dsp/x86/highbd_variance_avx2.c
+++ b/third_party/aom/aom_dsp/x86/highbd_variance_avx2.c
@@ -618,9 +618,9 @@ static uint32_t aom_highbd_var_filter_block2d_bil_avx2(
return (var > 0) ? var : 0;
}
-void aom_highbd_calc8x8var_avx2(const uint16_t *src, int src_stride,
- const uint16_t *ref, int ref_stride,
- uint32_t *sse, int *sum) {
+static void highbd_calc8x8var_avx2(const uint16_t *src, int src_stride,
+ const uint16_t *ref, int ref_stride,
+ uint32_t *sse, int *sum) {
__m256i v_sum_d = _mm256_setzero_si256();
__m256i v_sse_d = _mm256_setzero_si256();
for (int i = 0; i < 8; i += 2) {
@@ -653,9 +653,9 @@ void aom_highbd_calc8x8var_avx2(const uint16_t *src, int src_stride,
*sse = _mm_extract_epi32(v_d, 1);
}
-void aom_highbd_calc16x16var_avx2(const uint16_t *src, int src_stride,
- const uint16_t *ref, int ref_stride,
- uint32_t *sse, int *sum) {
+static void highbd_calc16x16var_avx2(const uint16_t *src, int src_stride,
+ const uint16_t *ref, int ref_stride,
+ uint32_t *sse, int *sum) {
__m256i v_sum_d = _mm256_setzero_si256();
__m256i v_sse_d = _mm256_setzero_si256();
const __m256i one = _mm256_set1_epi16(1);
@@ -703,19 +703,19 @@ static void highbd_10_variance_avx2(const uint16_t *src, int src_stride,
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 4);
}
-#define VAR_FN(w, h, block_size, shift) \
- uint32_t aom_highbd_10_variance##w##x##h##_avx2( \
- const uint8_t *src8, int src_stride, const uint8_t *ref8, \
- int ref_stride, uint32_t *sse) { \
- int sum; \
- int64_t var; \
- uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
- highbd_10_variance_avx2( \
- src, src_stride, ref, ref_stride, w, h, sse, &sum, \
- aom_highbd_calc##block_size##x##block_size##var_avx2, block_size); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
- return (var >= 0) ? (uint32_t)var : 0; \
+#define VAR_FN(w, h, block_size, shift) \
+ uint32_t aom_highbd_10_variance##w##x##h##_avx2( \
+ const uint8_t *src8, int src_stride, const uint8_t *ref8, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ int64_t var; \
+ uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ highbd_10_variance_avx2(src, src_stride, ref, ref_stride, w, h, sse, &sum, \
+ highbd_calc##block_size##x##block_size##var_avx2, \
+ block_size); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
+ return (var >= 0) ? (uint32_t)var : 0; \
}
VAR_FN(128, 128, 16, 14)
@@ -741,6 +741,17 @@ VAR_FN(8, 32, 8, 8)
#undef VAR_FN
+unsigned int aom_highbd_10_mse16x16_avx2(const uint8_t *src8, int src_stride,
+ const uint8_t *ref8, int ref_stride,
+ unsigned int *sse) {
+ int sum;
+ uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ highbd_10_variance_avx2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
+ highbd_calc16x16var_avx2, 16);
+ return *sse;
+}
+
#define SSE2_HEIGHT(H) \
uint32_t aom_highbd_10_sub_pixel_variance8x##H##_sse2( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
@@ -749,7 +760,7 @@ VAR_FN(8, 32, 8, 8)
SSE2_HEIGHT(8)
SSE2_HEIGHT(16)
-#undef SSE2_Height
+#undef SSE2_HEIGHT
#define HIGHBD_SUBPIX_VAR(W, H) \
uint32_t aom_highbd_10_sub_pixel_variance##W##x##H##_avx2( \
@@ -782,8 +793,8 @@ HIGHBD_SUBPIX_VAR(8, 8)
#undef HIGHBD_SUBPIX_VAR
-uint64_t aom_mse_4xh_16bit_highbd_avx2(uint16_t *dst, int dstride,
- uint16_t *src, int sstride, int h) {
+static uint64_t mse_4xh_16bit_highbd_avx2(uint16_t *dst, int dstride,
+ uint16_t *src, int sstride, int h) {
uint64_t sum = 0;
__m128i reg0_4x16, reg1_4x16, reg2_4x16, reg3_4x16;
__m256i src0_8x16, src1_8x16, src_16x16;
@@ -840,8 +851,8 @@ uint64_t aom_mse_4xh_16bit_highbd_avx2(uint16_t *dst, int dstride,
return sum;
}
-uint64_t aom_mse_8xh_16bit_highbd_avx2(uint16_t *dst, int dstride,
- uint16_t *src, int sstride, int h) {
+static uint64_t mse_8xh_16bit_highbd_avx2(uint16_t *dst, int dstride,
+ uint16_t *src, int sstride, int h) {
uint64_t sum = 0;
__m256i src0_8x16, src1_8x16, src_16x16;
__m256i dst0_8x16, dst1_8x16, dst_16x16;
@@ -897,8 +908,8 @@ uint64_t aom_mse_wxh_16bit_highbd_avx2(uint16_t *dst, int dstride,
assert((w == 8 || w == 4) && (h == 8 || h == 4) &&
"w=8/4 and h=8/4 must satisfy");
switch (w) {
- case 4: return aom_mse_4xh_16bit_highbd_avx2(dst, dstride, src, sstride, h);
- case 8: return aom_mse_8xh_16bit_highbd_avx2(dst, dstride, src, sstride, h);
+ case 4: return mse_4xh_16bit_highbd_avx2(dst, dstride, src, sstride, h);
+ case 8: return mse_8xh_16bit_highbd_avx2(dst, dstride, src, sstride, h);
default: assert(0 && "unsupported width"); return -1;
}
}
diff --git a/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c b/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c
index e897aab645..2fc2e1c0dd 100644
--- a/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c
+++ b/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c
@@ -637,8 +637,8 @@ void aom_highbd_dist_wtd_comp_avg_pred_sse2(
}
}
-uint64_t aom_mse_4xh_16bit_highbd_sse2(uint16_t *dst, int dstride,
- uint16_t *src, int sstride, int h) {
+static uint64_t mse_4xh_16bit_highbd_sse2(uint16_t *dst, int dstride,
+ uint16_t *src, int sstride, int h) {
uint64_t sum = 0;
__m128i reg0_4x16, reg1_4x16;
__m128i src_8x16;
@@ -682,8 +682,8 @@ uint64_t aom_mse_4xh_16bit_highbd_sse2(uint16_t *dst, int dstride,
return sum;
}
-uint64_t aom_mse_8xh_16bit_highbd_sse2(uint16_t *dst, int dstride,
- uint16_t *src, int sstride, int h) {
+static uint64_t mse_8xh_16bit_highbd_sse2(uint16_t *dst, int dstride,
+ uint16_t *src, int sstride, int h) {
uint64_t sum = 0;
__m128i src_8x16;
__m128i dst_8x16;
@@ -728,8 +728,8 @@ uint64_t aom_mse_wxh_16bit_highbd_sse2(uint16_t *dst, int dstride,
assert((w == 8 || w == 4) && (h == 8 || h == 4) &&
"w=8/4 and h=8/4 must satisfy");
switch (w) {
- case 4: return aom_mse_4xh_16bit_highbd_sse2(dst, dstride, src, sstride, h);
- case 8: return aom_mse_8xh_16bit_highbd_sse2(dst, dstride, src, sstride, h);
+ case 4: return mse_4xh_16bit_highbd_sse2(dst, dstride, src, sstride, h);
+ case 8: return mse_8xh_16bit_highbd_sse2(dst, dstride, src, sstride, h);
default: assert(0 && "unsupported width"); return -1;
}
}
diff --git a/third_party/aom/aom_dsp/x86/intrapred_ssse3.c b/third_party/aom/aom_dsp/x86/intrapred_ssse3.c
index fd48260c6f..869f880bda 100644
--- a/third_party/aom/aom_dsp/x86/intrapred_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/intrapred_ssse3.c
@@ -940,10 +940,10 @@ static AOM_FORCE_INLINE __m128i cvtepu16_epi32(__m128i x) {
return _mm_unpacklo_epi16((x), _mm_setzero_si128());
}
-void smooth_predictor_wxh(uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
- const uint8_t *LIBAOM_RESTRICT top_row,
- const uint8_t *LIBAOM_RESTRICT left_column, int width,
- int height) {
+static void smooth_predictor_wxh(uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
+ const uint8_t *LIBAOM_RESTRICT top_row,
+ const uint8_t *LIBAOM_RESTRICT left_column,
+ int width, int height) {
const uint8_t *const sm_weights_h = smooth_weights + height - 4;
const uint8_t *const sm_weights_w = smooth_weights + width - 4;
const __m128i zero = _mm_setzero_si128();
diff --git a/third_party/aom/aom_dsp/x86/masked_sad4d_ssse3.c b/third_party/aom/aom_dsp/x86/masked_sad4d_ssse3.c
index 799ce9ef44..d96a9dd23d 100644
--- a/third_party/aom/aom_dsp/x86/masked_sad4d_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/masked_sad4d_ssse3.c
@@ -103,11 +103,12 @@ static INLINE void masked_sadx4d_ssse3(const uint8_t *src_ptr, int src_stride,
pred = _mm_packus_epi16(pred_l, pred_r); \
res##idx = _mm_add_epi32(res##idx, _mm_sad_epu8(pred, src));
-void aom_masked_sad8xhx4d_ssse3(const uint8_t *src_ptr, int src_stride,
- const uint8_t *ref_array[4], int a_stride,
- const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height,
- int inv_mask, unsigned sad_array[4]) {
+static void masked_sad8xhx4d_ssse3(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_array[4], int a_stride,
+ const uint8_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride,
+ int height, int inv_mask,
+ unsigned sad_array[4]) {
const uint8_t *ref0 = ref_array[0];
const uint8_t *ref1 = ref_array[1];
const uint8_t *ref2 = ref_array[2];
@@ -164,11 +165,12 @@ void aom_masked_sad8xhx4d_ssse3(const uint8_t *src_ptr, int src_stride,
pred = _mm_packus_epi16(pred, _mm_setzero_si128()); \
res##idx = _mm_add_epi32(res##idx, _mm_sad_epu8(pred, src));
-void aom_masked_sad4xhx4d_ssse3(const uint8_t *src_ptr, int src_stride,
- const uint8_t *ref_array[4], int a_stride,
- const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height,
- int inv_mask, unsigned sad_array[4]) {
+static void masked_sad4xhx4d_ssse3(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_array[4], int a_stride,
+ const uint8_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride,
+ int height, int inv_mask,
+ unsigned sad_array[4]) {
const uint8_t *ref0 = ref_array[0];
const uint8_t *ref1 = ref_array[1];
const uint8_t *ref2 = ref_array[2];
@@ -224,22 +226,22 @@ void aom_masked_sad4xhx4d_ssse3(const uint8_t *src_ptr, int src_stride,
msk_stride, m, n, inv_mask, sad_array); \
}
-#define MASKSAD8XN_SSSE3(n) \
- void aom_masked_sad8x##n##x4d_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref[4], \
- int ref_stride, const uint8_t *second_pred, const uint8_t *msk, \
- int msk_stride, int inv_mask, unsigned sad_array[4]) { \
- aom_masked_sad8xhx4d_ssse3(src, src_stride, ref, ref_stride, second_pred, \
- 8, msk, msk_stride, n, inv_mask, sad_array); \
+#define MASKSAD8XN_SSSE3(n) \
+ void aom_masked_sad8x##n##x4d_ssse3( \
+ const uint8_t *src, int src_stride, const uint8_t *ref[4], \
+ int ref_stride, const uint8_t *second_pred, const uint8_t *msk, \
+ int msk_stride, int inv_mask, unsigned sad_array[4]) { \
+ masked_sad8xhx4d_ssse3(src, src_stride, ref, ref_stride, second_pred, 8, \
+ msk, msk_stride, n, inv_mask, sad_array); \
}
-#define MASKSAD4XN_SSSE3(n) \
- void aom_masked_sad4x##n##x4d_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref[4], \
- int ref_stride, const uint8_t *second_pred, const uint8_t *msk, \
- int msk_stride, int inv_mask, unsigned sad_array[4]) { \
- aom_masked_sad4xhx4d_ssse3(src, src_stride, ref, ref_stride, second_pred, \
- 4, msk, msk_stride, n, inv_mask, sad_array); \
+#define MASKSAD4XN_SSSE3(n) \
+ void aom_masked_sad4x##n##x4d_ssse3( \
+ const uint8_t *src, int src_stride, const uint8_t *ref[4], \
+ int ref_stride, const uint8_t *second_pred, const uint8_t *msk, \
+ int msk_stride, int inv_mask, unsigned sad_array[4]) { \
+ masked_sad4xhx4d_ssse3(src, src_stride, ref, ref_stride, second_pred, 4, \
+ msk, msk_stride, n, inv_mask, sad_array); \
}
MASKSADMXN_SSSE3(128, 128)
diff --git a/third_party/aom/aom_dsp/x86/subpel_variance_sse2.asm b/third_party/aom/aom_dsp/x86/subpel_variance_ssse3.asm
index d1d8373456..f424ce01dd 100644
--- a/third_party/aom/aom_dsp/x86/subpel_variance_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/subpel_variance_ssse3.asm
@@ -15,21 +15,6 @@
SECTION_RODATA
pw_8: times 8 dw 8
-bilin_filter_m_sse2: times 8 dw 16
- times 8 dw 0
- times 8 dw 14
- times 8 dw 2
- times 8 dw 12
- times 8 dw 4
- times 8 dw 10
- times 8 dw 6
- times 16 dw 8
- times 8 dw 6
- times 8 dw 10
- times 8 dw 4
- times 8 dw 12
- times 8 dw 2
- times 8 dw 14
bilin_filter_m_ssse3: times 8 db 16, 0
times 8 db 14, 2
@@ -109,9 +94,6 @@ SECTION .text
%if cpuflag(ssse3)
%define bilin_filter_m bilin_filter_m_ssse3
%define filter_idx_shift 4
-%else
-%define bilin_filter_m bilin_filter_m_sse2
-%define filter_idx_shift 5
%endif
; FIXME(rbultje) only bilinear filters use >8 registers, and ssse3 only uses
; 11, not 13, if the registers are ordered correctly. May make a minor speed
@@ -1449,21 +1431,11 @@ SECTION .text
; location in the sse/2 version, rather than duplicating that code in the
; binary.
-INIT_XMM sse2
-SUBPEL_VARIANCE 4
-SUBPEL_VARIANCE 8
-SUBPEL_VARIANCE 16
-
INIT_XMM ssse3
SUBPEL_VARIANCE 4
SUBPEL_VARIANCE 8
SUBPEL_VARIANCE 16
-INIT_XMM sse2
-SUBPEL_VARIANCE 4, 1
-SUBPEL_VARIANCE 8, 1
-SUBPEL_VARIANCE 16, 1
-
INIT_XMM ssse3
SUBPEL_VARIANCE 4, 1
SUBPEL_VARIANCE 8, 1
diff --git a/third_party/aom/aom_dsp/x86/synonyms.h b/third_party/aom/aom_dsp/x86/synonyms.h
index 6744ec51d0..74318de2e5 100644
--- a/third_party/aom/aom_dsp/x86/synonyms.h
+++ b/third_party/aom/aom_dsp/x86/synonyms.h
@@ -46,6 +46,25 @@ static INLINE __m128i xx_loadu_128(const void *a) {
return _mm_loadu_si128((const __m128i *)a);
}
+
+// _mm_loadu_si64 has been introduced in GCC 9, reimplement the function
+// manually on older compilers.
+#if !defined(__clang__) && __GNUC_MAJOR__ < 9
+static INLINE __m128i xx_loadu_2x64(const void *hi, const void *lo) {
+ __m64 hi_, lo_;
+ memcpy(&hi_, hi, sizeof(hi_));
+ memcpy(&lo_, lo, sizeof(lo_));
+ return _mm_set_epi64(hi_, lo_);
+}
+#else
+// Load 64 bits from each of hi and low, and pack into an SSE register
+// Since directly loading as `int64_t`s and using _mm_set_epi64 may violate
+// the strict aliasing rule, this takes a different approach
+static INLINE __m128i xx_loadu_2x64(const void *hi, const void *lo) {
+ return _mm_unpacklo_epi64(_mm_loadu_si64(lo), _mm_loadu_si64(hi));
+}
+#endif
+
static INLINE void xx_storel_32(void *const a, const __m128i v) {
const int val = _mm_cvtsi128_si32(v);
memcpy(a, &val, sizeof(val));
diff --git a/third_party/aom/aom_dsp/x86/synonyms_avx2.h b/third_party/aom/aom_dsp/x86/synonyms_avx2.h
index b729e5f410..7548d4d4f4 100644
--- a/third_party/aom/aom_dsp/x86/synonyms_avx2.h
+++ b/third_party/aom/aom_dsp/x86/synonyms_avx2.h
@@ -43,6 +43,16 @@ static INLINE void yy_storeu_256(void *const a, const __m256i v) {
_mm256_storeu_si256((__m256i *)a, v);
}
+// Fill an AVX register using an interleaved pair of values, ie. set the
+// 16 channels to {a, b} repeated 8 times, using the same channel ordering
+// as when a register is stored to / loaded from memory.
+//
+// This is useful for rearranging filter kernels for use with the _mm_madd_epi16
+// instruction
+static INLINE __m256i yy_set2_epi16(int16_t a, int16_t b) {
+ return _mm256_setr_epi16(a, b, a, b, a, b, a, b, a, b, a, b, a, b, a, b);
+}
+
// The _mm256_set1_epi64x() intrinsic is undefined for some Visual Studio
// compilers. The following function is equivalent to _mm256_set1_epi64x()
// acting on a 32-bit integer.
@@ -61,11 +71,26 @@ static INLINE __m256i yy_set_m128i(__m128i hi, __m128i lo) {
return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
}
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
+
+// _mm256_loadu2_m128i has been introduced in GCC 10.1
+#if !defined(__clang__) && GCC_VERSION < 101000
+static INLINE __m256i yy_loadu2_128(const void *hi, const void *lo) {
+ __m128i mhi = _mm_loadu_si128((const __m128i *)(hi));
+ __m128i mlo = _mm_loadu_si128((const __m128i *)(lo));
+ return _mm256_set_m128i(mhi, mlo);
+}
+#else
static INLINE __m256i yy_loadu2_128(const void *hi, const void *lo) {
__m128i mhi = _mm_loadu_si128((const __m128i *)(hi));
__m128i mlo = _mm_loadu_si128((const __m128i *)(lo));
return yy_set_m128i(mhi, mlo);
}
+#endif
+
+#undef GCC_VERSION
static INLINE void yy_storeu2_128(void *hi, void *lo, const __m256i a) {
_mm_storeu_si128((__m128i *)hi, _mm256_extracti128_si256(a, 1));
diff --git a/third_party/aom/aom_dsp/x86/variance_avx2.c b/third_party/aom/aom_dsp/x86/variance_avx2.c
index 046d6f10f8..0f872fc392 100644
--- a/third_party/aom/aom_dsp/x86/variance_avx2.c
+++ b/third_party/aom/aom_dsp/x86/variance_avx2.c
@@ -518,8 +518,8 @@ void aom_highbd_comp_mask_pred_avx2(uint8_t *comp_pred8, const uint8_t *pred8,
}
}
-uint64_t aom_mse_4xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
- int sstride, int h) {
+static uint64_t mse_4xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
+ int sstride, int h) {
uint64_t sum = 0;
__m128i dst0_4x8, dst1_4x8, dst2_4x8, dst3_4x8, dst_16x8;
__m128i src0_4x16, src1_4x16, src2_4x16, src3_4x16;
@@ -575,8 +575,9 @@ uint64_t aom_mse_4xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
// In src buffer, each 4x4 block in a 32x32 filter block is stored sequentially.
// Hence src_blk_stride is same as block width. Whereas dst buffer is a frame
// buffer, thus dstride is a frame level stride.
-uint64_t aom_mse_4xh_quad_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
- int src_blk_stride, int h) {
+static uint64_t mse_4xh_quad_16bit_avx2(uint8_t *dst, int dstride,
+ uint16_t *src, int src_blk_stride,
+ int h) {
uint64_t sum = 0;
__m128i dst0_16x8, dst1_16x8, dst2_16x8, dst3_16x8;
__m256i dst0_16x16, dst1_16x16, dst2_16x16, dst3_16x16;
@@ -665,8 +666,8 @@ uint64_t aom_mse_4xh_quad_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
return sum;
}
-uint64_t aom_mse_8xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
- int sstride, int h) {
+static uint64_t mse_8xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
+ int sstride, int h) {
uint64_t sum = 0;
__m128i dst0_8x8, dst1_8x8, dst3_16x8;
__m256i src0_8x16, src1_8x16, src_16x16, dst_16x16;
@@ -715,8 +716,9 @@ uint64_t aom_mse_8xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
// In src buffer, each 8x8 block in a 64x64 filter block is stored sequentially.
// Hence src_blk_stride is same as block width. Whereas dst buffer is a frame
// buffer, thus dstride is a frame level stride.
-uint64_t aom_mse_8xh_dual_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
- int src_blk_stride, int h) {
+static uint64_t mse_8xh_dual_16bit_avx2(uint8_t *dst, int dstride,
+ uint16_t *src, int src_blk_stride,
+ int h) {
uint64_t sum = 0;
__m128i dst0_16x8, dst1_16x8;
__m256i dst0_16x16, dst1_16x16;
@@ -780,8 +782,8 @@ uint64_t aom_mse_wxh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
assert((w == 8 || w == 4) && (h == 8 || h == 4) &&
"w=8/4 and h=8/4 must be satisfied");
switch (w) {
- case 4: return aom_mse_4xh_16bit_avx2(dst, dstride, src, sstride, h);
- case 8: return aom_mse_8xh_16bit_avx2(dst, dstride, src, sstride, h);
+ case 4: return mse_4xh_16bit_avx2(dst, dstride, src, sstride, h);
+ case 8: return mse_8xh_16bit_avx2(dst, dstride, src, sstride, h);
default: assert(0 && "unsupported width"); return -1;
}
}
@@ -795,8 +797,8 @@ uint64_t aom_mse_16xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
assert((w == 8 || w == 4) && (h == 8 || h == 4) &&
"w=8/4 and h=8/4 must be satisfied");
switch (w) {
- case 4: return aom_mse_4xh_quad_16bit_avx2(dst, dstride, src, w * h, h);
- case 8: return aom_mse_8xh_dual_16bit_avx2(dst, dstride, src, w * h, h);
+ case 4: return mse_4xh_quad_16bit_avx2(dst, dstride, src, w * h, h);
+ case 8: return mse_8xh_dual_16bit_avx2(dst, dstride, src, w * h, h);
default: assert(0 && "unsupported width"); return -1;
}
}
diff --git a/third_party/aom/aom_dsp/x86/variance_impl_avx2.c b/third_party/aom/aom_dsp/x86/variance_impl_avx2.c
index 9e9e70ea01..57a1cee781 100644
--- a/third_party/aom/aom_dsp/x86/variance_impl_avx2.c
+++ b/third_party/aom/aom_dsp/x86/variance_impl_avx2.c
@@ -648,7 +648,7 @@ MAKE_SUB_PIXEL_VAR_16XH(4, 2)
#endif
#define MAKE_SUB_PIXEL_AVG_VAR_32XH(height, log2height) \
- int aom_sub_pixel_avg_variance32x##height##_imp_avx2( \
+ static int sub_pixel_avg_variance32x##height##_imp_avx2( \
const uint8_t *src, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride, \
unsigned int *sse) { \
@@ -876,7 +876,7 @@ MAKE_SUB_PIXEL_VAR_16XH(4, 2)
const uint8_t *src, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst, int dst_stride, unsigned int *sse, \
const uint8_t *sec_ptr) { \
- const int sum = aom_sub_pixel_avg_variance32x##height##_imp_avx2( \
+ const int sum = sub_pixel_avg_variance32x##height##_imp_avx2( \
src, src_stride, x_offset, y_offset, dst, dst_stride, sec_ptr, 32, \
sse); \
return *sse - (unsigned int)(((int64_t)sum * sum) >> (5 + log2height)); \
@@ -899,7 +899,7 @@ MAKE_SUB_PIXEL_AVG_VAR_32XH(16, 4)
const uint8_t *sec_ptr = sec; \
for (int j = 0; j < (h / hf); ++j) { \
unsigned int sse2; \
- const int se2 = aom_sub_pixel_avg_variance##wf##x##hf##_imp_avx2( \
+ const int se2 = sub_pixel_avg_variance##wf##x##hf##_imp_avx2( \
src_ptr, src_stride, x_offset, y_offset, dst_ptr, dst_stride, \
sec_ptr, w, &sse2); \
dst_ptr += hf * dst_stride; \
diff --git a/third_party/aom/aom_dsp/x86/variance_sse2.c b/third_party/aom/aom_dsp/x86/variance_sse2.c
index faec9cf73d..81b30072a5 100644
--- a/third_party/aom/aom_dsp/x86/variance_sse2.c
+++ b/third_party/aom/aom_dsp/x86/variance_sse2.c
@@ -415,7 +415,6 @@ unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride,
DECL(8, opt); \
DECL(16, opt)
-DECLS(sse2);
DECLS(ssse3);
#undef DECLS
#undef DECL
@@ -492,7 +491,6 @@ DECLS(ssse3);
FN(4, 4, 4, 2, 2, opt, (int32_t), (int32_t))
#endif
-FNS(sse2)
FNS(ssse3)
#undef FNS
@@ -510,7 +508,6 @@ FNS(ssse3)
DECL(8, opt); \
DECL(16, opt)
-DECLS(sse2);
DECLS(ssse3);
#undef DECL
#undef DECLS
@@ -591,7 +588,6 @@ DECLS(ssse3);
FN(4, 4, 4, 2, 2, opt, (uint32_t), (int32_t))
#endif
-FNS(sse2)
FNS(ssse3)
#undef FNS
@@ -710,8 +706,8 @@ void aom_highbd_comp_mask_pred_sse2(uint8_t *comp_pred8, const uint8_t *pred8,
}
}
-uint64_t aom_mse_4xh_16bit_sse2(uint8_t *dst, int dstride, uint16_t *src,
- int sstride, int h) {
+static uint64_t mse_4xh_16bit_sse2(uint8_t *dst, int dstride, uint16_t *src,
+ int sstride, int h) {
uint64_t sum = 0;
__m128i dst0_8x8, dst1_8x8, dst_16x8;
__m128i src0_16x4, src1_16x4, src_16x8;
@@ -744,8 +740,8 @@ uint64_t aom_mse_4xh_16bit_sse2(uint8_t *dst, int dstride, uint16_t *src,
return sum;
}
-uint64_t aom_mse_8xh_16bit_sse2(uint8_t *dst, int dstride, uint16_t *src,
- int sstride, int h) {
+static uint64_t mse_8xh_16bit_sse2(uint8_t *dst, int dstride, uint16_t *src,
+ int sstride, int h) {
uint64_t sum = 0;
__m128i dst_8x8, dst_16x8;
__m128i src_16x8;
@@ -781,8 +777,8 @@ uint64_t aom_mse_wxh_16bit_sse2(uint8_t *dst, int dstride, uint16_t *src,
assert((w == 8 || w == 4) && (h == 8 || h == 4) &&
"w=8/4 and h=8/4 must satisfy");
switch (w) {
- case 4: return aom_mse_4xh_16bit_sse2(dst, dstride, src, sstride, h);
- case 8: return aom_mse_8xh_16bit_sse2(dst, dstride, src, sstride, h);
+ case 4: return mse_4xh_16bit_sse2(dst, dstride, src, sstride, h);
+ case 8: return mse_8xh_16bit_sse2(dst, dstride, src, sstride, h);
default: assert(0 && "unsupported width"); return -1;
}
}
diff --git a/third_party/aom/aom_ports/aarch64_cpudetect.c b/third_party/aom/aom_ports/aarch64_cpudetect.c
index 43d5a149c8..159e5b1008 100644
--- a/third_party/aom/aom_ports/aarch64_cpudetect.c
+++ b/third_party/aom/aom_ports/aarch64_cpudetect.c
@@ -9,8 +9,12 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+#include "config/aom_config.h"
+
#include "arm_cpudetect.h"
+#include "aom_ports/arm.h"
+
#if defined(__APPLE__)
#include <sys/sysctl.h>
#endif
@@ -104,12 +108,18 @@ static int arm_get_cpu_caps(void) {
#define AOM_AARCH64_HWCAP_CRC32 (1 << 7)
#define AOM_AARCH64_HWCAP_ASIMDDP (1 << 20)
#define AOM_AARCH64_HWCAP_SVE (1 << 22)
+#define AOM_AARCH64_HWCAP2_SVE2 (1 << 1)
#define AOM_AARCH64_HWCAP2_I8MM (1 << 13)
static int arm_get_cpu_caps(void) {
int flags = 0;
+#if HAVE_ARM_CRC32 || HAVE_NEON_DOTPROD || HAVE_SVE
unsigned long hwcap = getauxval(AT_HWCAP);
+#endif
+#if HAVE_NEON_I8MM || HAVE_SVE2
unsigned long hwcap2 = getauxval(AT_HWCAP2);
+#endif
+
#if HAVE_NEON
flags |= HAS_NEON; // Neon is mandatory in Armv8.0-A.
#endif // HAVE_NEON
@@ -125,6 +135,9 @@ static int arm_get_cpu_caps(void) {
#if HAVE_SVE
if (hwcap & AOM_AARCH64_HWCAP_SVE) flags |= HAS_SVE;
#endif // HAVE_SVE
+#if HAVE_SVE2
+ if (hwcap2 & AOM_AARCH64_HWCAP2_SVE2) flags |= HAS_SVE2;
+#endif // HAVE_SVE2
return flags;
}
@@ -184,5 +197,8 @@ int aom_arm_cpu_caps(void) {
if (!(flags & HAS_NEON_DOTPROD)) flags &= ~HAS_SVE;
if (!(flags & HAS_NEON_I8MM)) flags &= ~HAS_SVE;
+ // Restrict flags: SVE2 assumes that FEAT_SVE is available.
+ if (!(flags & HAS_SVE)) flags &= ~HAS_SVE2;
+
return flags;
}
diff --git a/third_party/aom/aom_ports/arm.h b/third_party/aom/aom_ports/arm.h
index 853741d19a..a57510895b 100644
--- a/third_party/aom/aom_ports/arm.h
+++ b/third_party/aom/aom_ports/arm.h
@@ -29,6 +29,8 @@ extern "C" {
#define HAS_NEON_I8MM (1 << 3)
// Armv8.2-A optional SVE instructions, mandatory from Armv9.0-A.
#define HAS_SVE (1 << 4)
+// Armv9.0-A SVE2 instructions.
+#define HAS_SVE2 (1 << 5)
int aom_arm_cpu_caps(void);
diff --git a/third_party/aom/aom_ports/mem.h b/third_party/aom/aom_ports/mem.h
index a70ce825b1..77180068ae 100644
--- a/third_party/aom/aom_ports/mem.h
+++ b/third_party/aom/aom_ports/mem.h
@@ -24,7 +24,13 @@
#define DECLARE_ALIGNED(n, typ, val) typ val
#endif
-#if HAVE_NEON && defined(_MSC_VER)
+#if defined(__has_builtin)
+#define AOM_HAS_BUILTIN(x) __has_builtin(x)
+#else
+#define AOM_HAS_BUILTIN(x) 0
+#endif
+
+#if !AOM_HAS_BUILTIN(__builtin_prefetch) && !defined(__GNUC__)
#define __builtin_prefetch(x)
#endif
diff --git a/third_party/aom/aom_scale/aom_scale_rtcd.pl b/third_party/aom/aom_scale/aom_scale_rtcd.pl
index ae0a85687f..0d545c2f3c 100644
--- a/third_party/aom/aom_scale/aom_scale_rtcd.pl
+++ b/third_party/aom/aom_scale/aom_scale_rtcd.pl
@@ -10,6 +10,8 @@
##
sub aom_scale_forward_decls() {
print <<EOF
+#include <stdbool.h>
+
struct yv12_buffer_config;
EOF
}
@@ -26,17 +28,17 @@ if (aom_config("CONFIG_SPATIAL_RESAMPLING") eq "yes") {
add_proto qw/void aom_vertical_band_2_1_scale_i/, "unsigned char *source, int src_pitch, unsigned char *dest, int dest_pitch, unsigned int dest_width";
}
-add_proto qw/int aom_yv12_realloc_with_new_border/, "struct yv12_buffer_config *ybf, int new_border, int byte_alignment, int num_pyramid_levels, int num_planes";
+add_proto qw/int aom_yv12_realloc_with_new_border/, "struct yv12_buffer_config *ybf, int new_border, int byte_alignment, bool alloc_pyramid, int num_planes";
add_proto qw/void aom_yv12_extend_frame_borders/, "struct yv12_buffer_config *ybf, const int num_planes";
add_proto qw/void aom_yv12_copy_frame/, "const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc, const int num_planes";
-add_proto qw/void aom_yv12_copy_y/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc";
+add_proto qw/void aom_yv12_copy_y/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc, int use_crop";
-add_proto qw/void aom_yv12_copy_u/, "const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc";
+add_proto qw/void aom_yv12_copy_u/, "const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc, int use_crop";
-add_proto qw/void aom_yv12_copy_v/, "const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc";
+add_proto qw/void aom_yv12_copy_v/, "const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc, int use_crop";
add_proto qw/void aom_yv12_partial_copy_y/, "const struct yv12_buffer_config *src_ybc, int hstart1, int hend1, int vstart1, int vend1, struct yv12_buffer_config *dst_ybc, int hstart2, int vstart2";
add_proto qw/void aom_yv12_partial_coloc_copy_y/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc, int hstart, int hend, int vstart, int vend";
@@ -47,7 +49,7 @@ add_proto qw/void aom_yv12_partial_coloc_copy_v/, "const struct yv12_buffer_conf
add_proto qw/void aom_extend_frame_borders_plane_row/, "const struct yv12_buffer_config *ybf, int plane, int v_start, int v_end";
-add_proto qw/void aom_extend_frame_borders/, "struct yv12_buffer_config *ybf, const int num_planes";
+add_proto qw/void aom_extend_frame_borders/, "struct yv12_buffer_config *ybf, int num_planes";
add_proto qw/void aom_extend_frame_inner_borders/, "struct yv12_buffer_config *ybf, const int num_planes";
diff --git a/third_party/aom/aom_scale/generic/yv12config.c b/third_party/aom/aom_scale/generic/yv12config.c
index 94b400b9e0..ed35bb1acb 100644
--- a/third_party/aom/aom_scale/generic/yv12config.c
+++ b/third_party/aom/aom_scale/generic/yv12config.c
@@ -11,9 +11,12 @@
#include <assert.h>
+#include "config/aom_config.h"
+
+#include "aom/aom_image.h"
#include "aom/internal/aom_image_internal.h"
-#include "aom_dsp/pyramid.h"
#include "aom_dsp/flow_estimation/corner_detect.h"
+#include "aom_dsp/pyramid.h"
#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_scale/yv12config.h"
@@ -60,7 +63,7 @@ static int realloc_frame_buffer_aligned(
const uint64_t uvplane_size, const int aligned_width,
const int aligned_height, const int uv_width, const int uv_height,
const int uv_stride, const int uv_border_w, const int uv_border_h,
- int num_pyramid_levels, int alloc_y_plane_only) {
+ bool alloc_pyramid, int alloc_y_plane_only) {
if (ybf) {
const int aom_byte_align = (byte_alignment == 0) ? 1 : byte_alignment;
const uint64_t frame_size =
@@ -71,8 +74,8 @@ static int realloc_frame_buffer_aligned(
#if CONFIG_REALTIME_ONLY || !CONFIG_AV1_ENCODER
// We should only need an 8-bit version of the source frame if we are
// encoding in non-realtime mode
- (void)num_pyramid_levels;
- assert(num_pyramid_levels == 0);
+ (void)alloc_pyramid;
+ assert(!alloc_pyramid);
#endif // CONFIG_REALTIME_ONLY || !CONFIG_AV1_ENCODER
#if defined AOM_MAX_ALLOCABLE_MEMORY
@@ -80,9 +83,8 @@ static int realloc_frame_buffer_aligned(
uint64_t alloc_size = frame_size;
#if CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
// The size of ybf->y_pyramid
- if (num_pyramid_levels > 0) {
- alloc_size += aom_get_pyramid_alloc_size(
- width, height, num_pyramid_levels, use_highbitdepth);
+ if (alloc_pyramid) {
+ alloc_size += aom_get_pyramid_alloc_size(width, height, use_highbitdepth);
alloc_size += av1_get_corner_list_size();
}
#endif // CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
@@ -190,9 +192,8 @@ static int realloc_frame_buffer_aligned(
av1_free_corner_list(ybf->corners);
ybf->corners = NULL;
}
- if (num_pyramid_levels > 0) {
- ybf->y_pyramid = aom_alloc_pyramid(width, height, num_pyramid_levels,
- use_highbitdepth);
+ if (alloc_pyramid) {
+ ybf->y_pyramid = aom_alloc_pyramid(width, height, use_highbitdepth);
if (!ybf->y_pyramid) return AOM_CODEC_MEM_ERROR;
ybf->corners = av1_alloc_corner_list();
if (!ybf->corners) return AOM_CODEC_MEM_ERROR;
@@ -237,7 +238,7 @@ int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
int border, int byte_alignment,
aom_codec_frame_buffer_t *fb,
aom_get_frame_buffer_cb_fn_t cb, void *cb_priv,
- int num_pyramid_levels, int alloc_y_plane_only) {
+ bool alloc_pyramid, int alloc_y_plane_only) {
#if CONFIG_SIZE_LIMIT
if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
return AOM_CODEC_MEM_ERROR;
@@ -264,21 +265,20 @@ int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
ybf, width, height, ss_x, ss_y, use_highbitdepth, border,
byte_alignment, fb, cb, cb_priv, y_stride, yplane_size, uvplane_size,
aligned_width, aligned_height, uv_width, uv_height, uv_stride,
- uv_border_w, uv_border_h, num_pyramid_levels, alloc_y_plane_only);
+ uv_border_w, uv_border_h, alloc_pyramid, alloc_y_plane_only);
}
return AOM_CODEC_MEM_ERROR;
}
int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
int ss_x, int ss_y, int use_highbitdepth, int border,
- int byte_alignment, int num_pyramid_levels,
+ int byte_alignment, bool alloc_pyramid,
int alloc_y_plane_only) {
if (ybf) {
aom_free_frame_buffer(ybf);
- return aom_realloc_frame_buffer(ybf, width, height, ss_x, ss_y,
- use_highbitdepth, border, byte_alignment,
- NULL, NULL, NULL, num_pyramid_levels,
- alloc_y_plane_only);
+ return aom_realloc_frame_buffer(
+ ybf, width, height, ss_x, ss_y, use_highbitdepth, border,
+ byte_alignment, NULL, NULL, NULL, alloc_pyramid, alloc_y_plane_only);
}
return AOM_CODEC_MEM_ERROR;
}
diff --git a/third_party/aom/aom_scale/generic/yv12extend.c b/third_party/aom/aom_scale/generic/yv12extend.c
index 5546112d40..384b72c21e 100644
--- a/third_party/aom/aom_scale/generic/yv12extend.c
+++ b/third_party/aom/aom_scale/generic/yv12extend.c
@@ -302,8 +302,10 @@ void aom_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_bc,
}
void aom_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc,
- YV12_BUFFER_CONFIG *dst_ybc) {
+ YV12_BUFFER_CONFIG *dst_ybc, int use_crop) {
int row;
+ int width = use_crop ? src_ybc->y_crop_width : src_ybc->y_width;
+ int height = use_crop ? src_ybc->y_crop_height : src_ybc->y_height;
const uint8_t *src = src_ybc->y_buffer;
uint8_t *dst = dst_ybc->y_buffer;
@@ -311,8 +313,8 @@ void aom_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc,
if (src_ybc->flags & YV12_FLAG_HIGHBITDEPTH) {
const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
- for (row = 0; row < src_ybc->y_height; ++row) {
- memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t));
+ for (row = 0; row < height; ++row) {
+ memcpy(dst16, src16, width * sizeof(uint16_t));
src16 += src_ybc->y_stride;
dst16 += dst_ybc->y_stride;
}
@@ -320,56 +322,60 @@ void aom_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc,
}
#endif
- for (row = 0; row < src_ybc->y_height; ++row) {
- memcpy(dst, src, src_ybc->y_width);
+ for (row = 0; row < height; ++row) {
+ memcpy(dst, src, width);
src += src_ybc->y_stride;
dst += dst_ybc->y_stride;
}
}
void aom_yv12_copy_u_c(const YV12_BUFFER_CONFIG *src_bc,
- YV12_BUFFER_CONFIG *dst_bc) {
+ YV12_BUFFER_CONFIG *dst_bc, int use_crop) {
int row;
+ int width = use_crop ? src_bc->uv_crop_width : src_bc->uv_width;
+ int height = use_crop ? src_bc->uv_crop_height : src_bc->uv_height;
const uint8_t *src = src_bc->u_buffer;
uint8_t *dst = dst_bc->u_buffer;
#if CONFIG_AV1_HIGHBITDEPTH
if (src_bc->flags & YV12_FLAG_HIGHBITDEPTH) {
const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
- for (row = 0; row < src_bc->uv_height; ++row) {
- memcpy(dst16, src16, src_bc->uv_width * sizeof(uint16_t));
+ for (row = 0; row < height; ++row) {
+ memcpy(dst16, src16, width * sizeof(uint16_t));
src16 += src_bc->uv_stride;
dst16 += dst_bc->uv_stride;
}
return;
}
#endif
- for (row = 0; row < src_bc->uv_height; ++row) {
- memcpy(dst, src, src_bc->uv_width);
+ for (row = 0; row < height; ++row) {
+ memcpy(dst, src, width);
src += src_bc->uv_stride;
dst += dst_bc->uv_stride;
}
}
void aom_yv12_copy_v_c(const YV12_BUFFER_CONFIG *src_bc,
- YV12_BUFFER_CONFIG *dst_bc) {
+ YV12_BUFFER_CONFIG *dst_bc, int use_crop) {
int row;
+ int width = use_crop ? src_bc->uv_crop_width : src_bc->uv_width;
+ int height = use_crop ? src_bc->uv_crop_height : src_bc->uv_height;
const uint8_t *src = src_bc->v_buffer;
uint8_t *dst = dst_bc->v_buffer;
#if CONFIG_AV1_HIGHBITDEPTH
if (src_bc->flags & YV12_FLAG_HIGHBITDEPTH) {
const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
- for (row = 0; row < src_bc->uv_height; ++row) {
- memcpy(dst16, src16, src_bc->uv_width * sizeof(uint16_t));
+ for (row = 0; row < height; ++row) {
+ memcpy(dst16, src16, width * sizeof(uint16_t));
src16 += src_bc->uv_stride;
dst16 += dst_bc->uv_stride;
}
return;
}
#endif
- for (row = 0; row < src_bc->uv_height; ++row) {
- memcpy(dst, src, src_bc->uv_width);
+ for (row = 0; row < height; ++row) {
+ memcpy(dst, src, width);
src += src_bc->uv_stride;
dst += dst_bc->uv_stride;
}
@@ -491,8 +497,8 @@ void aom_yv12_partial_coloc_copy_v_c(const YV12_BUFFER_CONFIG *src_bc,
}
int aom_yv12_realloc_with_new_border_c(YV12_BUFFER_CONFIG *ybf, int new_border,
- int byte_alignment,
- int num_pyramid_levels, int num_planes) {
+ int byte_alignment, bool alloc_pyramid,
+ int num_planes) {
if (ybf) {
if (new_border == ybf->border) return 0;
YV12_BUFFER_CONFIG new_buf;
@@ -500,7 +506,7 @@ int aom_yv12_realloc_with_new_border_c(YV12_BUFFER_CONFIG *ybf, int new_border,
const int error = aom_alloc_frame_buffer(
&new_buf, ybf->y_crop_width, ybf->y_crop_height, ybf->subsampling_x,
ybf->subsampling_y, ybf->flags & YV12_FLAG_HIGHBITDEPTH, new_border,
- byte_alignment, num_pyramid_levels, 0);
+ byte_alignment, alloc_pyramid, 0);
if (error) return error;
// Copy image buffer
aom_yv12_copy_frame(ybf, &new_buf, num_planes);
diff --git a/third_party/aom/aom_scale/yv12config.h b/third_party/aom/aom_scale/yv12config.h
index f192a3032e..bc05de2102 100644
--- a/third_party/aom/aom_scale/yv12config.h
+++ b/third_party/aom/aom_scale/yv12config.h
@@ -16,6 +16,8 @@
extern "C" {
#endif
+#include <stdbool.h>
+
#include "config/aom_config.h"
#include "aom/aom_codec.h"
@@ -45,18 +47,29 @@ typedef struct yv12_buffer_config {
/*!\cond */
union {
struct {
+ // The aligned frame width of luma.
+ // It is aligned to a multiple of 8:
+ // y_width = (y_crop_width + 7) & ~7
int y_width;
+ // The aligned frame width of chroma.
+ // uv_width = y_width >> subsampling_x
int uv_width;
};
int widths[2];
};
union {
struct {
+ // The aligned frame height of luma.
+ // It is aligned to a multiple of 8:
+ // y_height = (y_crop_height + 7) & ~7
int y_height;
+ // The aligned frame height of chroma.
+ // uv_height = y_height >> subsampling_y
int uv_height;
};
int heights[2];
};
+ // The frame size en/decoded by AV1
union {
struct {
int y_crop_width;
@@ -139,7 +152,7 @@ typedef struct yv12_buffer_config {
// available return values.
int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
int ss_x, int ss_y, int use_highbitdepth, int border,
- int byte_alignment, int num_pyramid_levels,
+ int byte_alignment, bool alloc_pyramid,
int alloc_y_plane_only);
// Updates the yv12 buffer config with the frame buffer. |byte_alignment| must
@@ -149,15 +162,11 @@ int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
// to decode the current frame. If cb is NULL, libaom will allocate memory
// internally to decode the current frame.
//
-// If num_pyramid_levels > 0, then an image pyramid will be allocated with
-// the specified number of levels.
-//
-// Any buffer which may become a source or ref frame buffer in the encoder
-// must have num_pyramid_levels = cpi->image_pyramid_levels. This will cause
-// an image pyramid to be allocated if one is needed.
-//
-// Any other buffers (in particular, any buffers inside the decoder)
-// must have cpi->image_pyramid_levels = 0, as a pyramid is unneeded there.
+// If alloc_pyramid is true, then an image pyramid will be allocated
+// for use in global motion estimation. This is only needed if this frame
+// buffer will be used to store a source frame or a reference frame in
+// the encoder. Any other framebuffers (eg, intermediates for filtering,
+// or any buffer in the decoder) can set alloc_pyramid = false.
//
// Returns 0 on success. Returns < 0 on failure.
int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
@@ -165,7 +174,7 @@ int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
int border, int byte_alignment,
aom_codec_frame_buffer_t *fb,
aom_get_frame_buffer_cb_fn_t cb, void *cb_priv,
- int num_pyramid_levels, int alloc_y_plane_only);
+ bool alloc_pyramid, int alloc_y_plane_only);
int aom_free_frame_buffer(YV12_BUFFER_CONFIG *ybf);
diff --git a/third_party/aom/aom_util/aom_pthread.h b/third_party/aom/aom_util/aom_pthread.h
new file mode 100644
index 0000000000..99deeb292a
--- /dev/null
+++ b/third_party/aom/aom_util/aom_pthread.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+//
+// pthread.h wrapper
+
+#ifndef AOM_AOM_UTIL_AOM_PTHREAD_H_
+#define AOM_AOM_UTIL_AOM_PTHREAD_H_
+
+#include "config/aom_config.h"
+
+#if CONFIG_MULTITHREAD
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(_WIN32) && !HAVE_PTHREAD_H
+// Prevent leaking max/min macros.
+#undef NOMINMAX
+#define NOMINMAX
+#undef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#include <process.h> // NOLINT
+#include <stddef.h> // NOLINT
+#include <windows.h> // NOLINT
+typedef HANDLE pthread_t;
+typedef int pthread_attr_t;
+typedef CRITICAL_SECTION pthread_mutex_t;
+
+#include <errno.h>
+
+#if _WIN32_WINNT < 0x0600
+#error _WIN32_WINNT must target Windows Vista / Server 2008 or newer.
+#endif
+typedef CONDITION_VARIABLE pthread_cond_t;
+
+#ifndef WINAPI_FAMILY_PARTITION
+#define WINAPI_PARTITION_DESKTOP 1
+#define WINAPI_FAMILY_PARTITION(x) x
+#endif
+
+#if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+#define USE_CREATE_THREAD
+#endif
+
+//------------------------------------------------------------------------------
+// simplistic pthread emulation layer
+
+// _beginthreadex requires __stdcall
+#if defined(__GNUC__) && \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
+#define THREADFN __attribute__((force_align_arg_pointer)) unsigned int __stdcall
+#else
+#define THREADFN unsigned int __stdcall
+#endif
+#define THREAD_EXIT_SUCCESS 0
+
+static INLINE int pthread_attr_init(pthread_attr_t *attr) {
+ (void)attr;
+ return 0;
+}
+
+static INLINE int pthread_attr_destroy(pthread_attr_t *attr) {
+ (void)attr;
+ return 0;
+}
+
+static INLINE int pthread_create(pthread_t *const thread,
+ const pthread_attr_t *attr,
+ unsigned int(__stdcall *start)(void *),
+ void *arg) {
+ (void)attr;
+#ifdef USE_CREATE_THREAD
+ *thread = CreateThread(NULL, /* lpThreadAttributes */
+ 0, /* dwStackSize */
+ start, arg, 0, /* dwStackSize */
+ NULL); /* lpThreadId */
+#else
+ *thread = (pthread_t)_beginthreadex(NULL, /* void *security */
+ 0, /* unsigned stack_size */
+ start, arg, 0, /* unsigned initflag */
+ NULL); /* unsigned *thrdaddr */
+#endif
+ if (*thread == NULL) return 1;
+ SetThreadPriority(*thread, THREAD_PRIORITY_ABOVE_NORMAL);
+ return 0;
+}
+
+static INLINE int pthread_join(pthread_t thread, void **value_ptr) {
+ (void)value_ptr;
+ return (WaitForSingleObjectEx(thread, INFINITE, FALSE /*bAlertable*/) !=
+ WAIT_OBJECT_0 ||
+ CloseHandle(thread) == 0);
+}
+
+// Mutex
+static INLINE int pthread_mutex_init(pthread_mutex_t *const mutex,
+ void *mutexattr) {
+ (void)mutexattr;
+ InitializeCriticalSectionEx(mutex, 0 /*dwSpinCount*/, 0 /*Flags*/);
+ return 0;
+}
+
+static INLINE int pthread_mutex_trylock(pthread_mutex_t *const mutex) {
+ return TryEnterCriticalSection(mutex) ? 0 : EBUSY;
+}
+
+static INLINE int pthread_mutex_lock(pthread_mutex_t *const mutex) {
+ EnterCriticalSection(mutex);
+ return 0;
+}
+
+static INLINE int pthread_mutex_unlock(pthread_mutex_t *const mutex) {
+ LeaveCriticalSection(mutex);
+ return 0;
+}
+
+static INLINE int pthread_mutex_destroy(pthread_mutex_t *const mutex) {
+ DeleteCriticalSection(mutex);
+ return 0;
+}
+
+// Condition
+static INLINE int pthread_cond_destroy(pthread_cond_t *const condition) {
+ (void)condition;
+ return 0;
+}
+
+static INLINE int pthread_cond_init(pthread_cond_t *const condition,
+ void *cond_attr) {
+ (void)cond_attr;
+ InitializeConditionVariable(condition);
+ return 0;
+}
+
+static INLINE int pthread_cond_signal(pthread_cond_t *const condition) {
+ WakeConditionVariable(condition);
+ return 0;
+}
+
+static INLINE int pthread_cond_broadcast(pthread_cond_t *const condition) {
+ WakeAllConditionVariable(condition);
+ return 0;
+}
+
+static INLINE int pthread_cond_wait(pthread_cond_t *const condition,
+ pthread_mutex_t *const mutex) {
+ int ok;
+ ok = SleepConditionVariableCS(condition, mutex, INFINITE);
+ return !ok;
+}
+#else // _WIN32
+#include <pthread.h> // NOLINT
+#define THREADFN void *
+#define THREAD_EXIT_SUCCESS NULL
+#endif
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // CONFIG_MULTITHREAD
+
+#endif // AOM_AOM_UTIL_AOM_PTHREAD_H_
diff --git a/third_party/aom/aom_util/aom_thread.c b/third_party/aom/aom_util/aom_thread.c
index fa3b0a25e4..bdf2b7dfa6 100644
--- a/third_party/aom/aom_util/aom_thread.c
+++ b/third_party/aom/aom_util/aom_thread.c
@@ -23,8 +23,11 @@
#include <assert.h>
#include <string.h> // for memset()
+#include "config/aom_config.h"
+
#include "aom_mem/aom_mem.h"
#include "aom_ports/sanitizer.h"
+#include "aom_util/aom_pthread.h"
#include "aom_util/aom_thread.h"
#if CONFIG_MULTITHREAD
@@ -65,29 +68,30 @@ static THREADFN thread_loop(void *ptr) {
#endif
pthread_mutex_lock(&worker->impl_->mutex_);
for (;;) {
- while (worker->status_ == OK) { // wait in idling mode
+ while (worker->status_ == AVX_WORKER_STATUS_OK) { // wait in idling mode
pthread_cond_wait(&worker->impl_->condition_, &worker->impl_->mutex_);
}
- if (worker->status_ == WORK) {
- // When worker->status_ is WORK, the main thread doesn't change
- // worker->status_ and will wait until the worker changes worker->status_
- // to OK. See change_state(). So the worker can safely call execute()
- // without holding worker->impl_->mutex_. When the worker reacquires
- // worker->impl_->mutex_, worker->status_ must still be WORK.
+ if (worker->status_ == AVX_WORKER_STATUS_WORKING) {
+ // When worker->status_ is AVX_WORKER_STATUS_WORKING, the main thread
+ // doesn't change worker->status_ and will wait until the worker changes
+ // worker->status_ to AVX_WORKER_STATUS_OK. See change_state(). So the
+ // worker can safely call execute() without holding worker->impl_->mutex_.
+ // When the worker reacquires worker->impl_->mutex_, worker->status_ must
+ // still be AVX_WORKER_STATUS_WORKING.
pthread_mutex_unlock(&worker->impl_->mutex_);
execute(worker);
pthread_mutex_lock(&worker->impl_->mutex_);
- assert(worker->status_ == WORK);
- worker->status_ = OK;
+ assert(worker->status_ == AVX_WORKER_STATUS_WORKING);
+ worker->status_ = AVX_WORKER_STATUS_OK;
// signal to the main thread that we're done (for sync())
pthread_cond_signal(&worker->impl_->condition_);
} else {
- assert(worker->status_ == NOT_OK); // finish the worker
+ assert(worker->status_ == AVX_WORKER_STATUS_NOT_OK); // finish the worker
break;
}
}
pthread_mutex_unlock(&worker->impl_->mutex_);
- return THREAD_RETURN(NULL); // Thread is finished
+ return THREAD_EXIT_SUCCESS; // Thread is finished
}
// main thread state control
@@ -98,13 +102,13 @@ static void change_state(AVxWorker *const worker, AVxWorkerStatus new_status) {
if (worker->impl_ == NULL) return;
pthread_mutex_lock(&worker->impl_->mutex_);
- if (worker->status_ >= OK) {
+ if (worker->status_ >= AVX_WORKER_STATUS_OK) {
// wait for the worker to finish
- while (worker->status_ != OK) {
+ while (worker->status_ != AVX_WORKER_STATUS_OK) {
pthread_cond_wait(&worker->impl_->condition_, &worker->impl_->mutex_);
}
// assign new status and release the working thread if needed
- if (new_status != OK) {
+ if (new_status != AVX_WORKER_STATUS_OK) {
worker->status_ = new_status;
pthread_cond_signal(&worker->impl_->condition_);
}
@@ -118,21 +122,21 @@ static void change_state(AVxWorker *const worker, AVxWorkerStatus new_status) {
static void init(AVxWorker *const worker) {
memset(worker, 0, sizeof(*worker));
- worker->status_ = NOT_OK;
+ worker->status_ = AVX_WORKER_STATUS_NOT_OK;
}
static int sync(AVxWorker *const worker) {
#if CONFIG_MULTITHREAD
- change_state(worker, OK);
+ change_state(worker, AVX_WORKER_STATUS_OK);
#endif
- assert(worker->status_ <= OK);
+ assert(worker->status_ <= AVX_WORKER_STATUS_OK);
return !worker->had_error;
}
static int reset(AVxWorker *const worker) {
int ok = 1;
worker->had_error = 0;
- if (worker->status_ < OK) {
+ if (worker->status_ < AVX_WORKER_STATUS_OK) {
#if CONFIG_MULTITHREAD
worker->impl_ = (AVxWorkerImpl *)aom_calloc(1, sizeof(*worker->impl_));
if (worker->impl_ == NULL) {
@@ -164,7 +168,7 @@ static int reset(AVxWorker *const worker) {
#endif
pthread_mutex_lock(&worker->impl_->mutex_);
ok = !pthread_create(&worker->impl_->thread_, &attr, thread_loop, worker);
- if (ok) worker->status_ = OK;
+ if (ok) worker->status_ = AVX_WORKER_STATUS_OK;
pthread_mutex_unlock(&worker->impl_->mutex_);
pthread_attr_destroy(&attr);
if (!ok) {
@@ -177,12 +181,12 @@ static int reset(AVxWorker *const worker) {
return 0;
}
#else
- worker->status_ = OK;
+ worker->status_ = AVX_WORKER_STATUS_OK;
#endif
- } else if (worker->status_ > OK) {
+ } else if (worker->status_ > AVX_WORKER_STATUS_OK) {
ok = sync(worker);
}
- assert(!ok || (worker->status_ == OK));
+ assert(!ok || (worker->status_ == AVX_WORKER_STATUS_OK));
return ok;
}
@@ -194,7 +198,7 @@ static void execute(AVxWorker *const worker) {
static void launch(AVxWorker *const worker) {
#if CONFIG_MULTITHREAD
- change_state(worker, WORK);
+ change_state(worker, AVX_WORKER_STATUS_WORKING);
#else
execute(worker);
#endif
@@ -203,7 +207,7 @@ static void launch(AVxWorker *const worker) {
static void end(AVxWorker *const worker) {
#if CONFIG_MULTITHREAD
if (worker->impl_ != NULL) {
- change_state(worker, NOT_OK);
+ change_state(worker, AVX_WORKER_STATUS_NOT_OK);
pthread_join(worker->impl_->thread_, NULL);
pthread_mutex_destroy(&worker->impl_->mutex_);
pthread_cond_destroy(&worker->impl_->condition_);
@@ -211,10 +215,10 @@ static void end(AVxWorker *const worker) {
worker->impl_ = NULL;
}
#else
- worker->status_ = NOT_OK;
+ worker->status_ = AVX_WORKER_STATUS_NOT_OK;
assert(worker->impl_ == NULL);
#endif
- assert(worker->status_ == NOT_OK);
+ assert(worker->status_ == AVX_WORKER_STATUS_NOT_OK);
}
//------------------------------------------------------------------------------
diff --git a/third_party/aom/aom_util/aom_thread.h b/third_party/aom/aom_util/aom_thread.h
index ec2ea43491..92e162f121 100644
--- a/third_party/aom/aom_util/aom_thread.h
+++ b/third_party/aom/aom_util/aom_thread.h
@@ -17,157 +17,17 @@
#ifndef AOM_AOM_UTIL_AOM_THREAD_H_
#define AOM_AOM_UTIL_AOM_THREAD_H_
-#include "config/aom_config.h"
-
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_NUM_THREADS 64
-#if CONFIG_MULTITHREAD
-
-#if defined(_WIN32) && !HAVE_PTHREAD_H
-// Prevent leaking max/min macros.
-#undef NOMINMAX
-#define NOMINMAX
-#undef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN
-#include <errno.h> // NOLINT
-#include <process.h> // NOLINT
-#include <windows.h> // NOLINT
-typedef HANDLE pthread_t;
-typedef int pthread_attr_t;
-typedef CRITICAL_SECTION pthread_mutex_t;
-
-#if _WIN32_WINNT < 0x0600
-#error _WIN32_WINNT must target Windows Vista / Server 2008 or newer.
-#endif
-typedef CONDITION_VARIABLE pthread_cond_t;
-
-#ifndef WINAPI_FAMILY_PARTITION
-#define WINAPI_PARTITION_DESKTOP 1
-#define WINAPI_FAMILY_PARTITION(x) x
-#endif
-
-#if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
-#define USE_CREATE_THREAD
-#endif
-
-//------------------------------------------------------------------------------
-// simplistic pthread emulation layer
-
-// _beginthreadex requires __stdcall
-#define THREADFN unsigned int __stdcall
-#define THREAD_RETURN(val) (unsigned int)((DWORD_PTR)val)
-
-static INLINE int pthread_attr_init(pthread_attr_t *attr) {
- (void)attr;
- return 0;
-}
-
-static INLINE int pthread_attr_destroy(pthread_attr_t *attr) {
- (void)attr;
- return 0;
-}
-
-static INLINE int pthread_create(pthread_t *const thread,
- const pthread_attr_t *attr,
- unsigned int(__stdcall *start)(void *),
- void *arg) {
- (void)attr;
-#ifdef USE_CREATE_THREAD
- *thread = CreateThread(NULL, /* lpThreadAttributes */
- 0, /* dwStackSize */
- start, arg, 0, /* dwStackSize */
- NULL); /* lpThreadId */
-#else
- *thread = (pthread_t)_beginthreadex(NULL, /* void *security */
- 0, /* unsigned stack_size */
- start, arg, 0, /* unsigned initflag */
- NULL); /* unsigned *thrdaddr */
-#endif
- if (*thread == NULL) return 1;
- SetThreadPriority(*thread, THREAD_PRIORITY_ABOVE_NORMAL);
- return 0;
-}
-
-static INLINE int pthread_join(pthread_t thread, void **value_ptr) {
- (void)value_ptr;
- return (WaitForSingleObjectEx(thread, INFINITE, FALSE /*bAlertable*/) !=
- WAIT_OBJECT_0 ||
- CloseHandle(thread) == 0);
-}
-
-// Mutex
-static INLINE int pthread_mutex_init(pthread_mutex_t *const mutex,
- void *mutexattr) {
- (void)mutexattr;
- InitializeCriticalSectionEx(mutex, 0 /*dwSpinCount*/, 0 /*Flags*/);
- return 0;
-}
-
-static INLINE int pthread_mutex_trylock(pthread_mutex_t *const mutex) {
- return TryEnterCriticalSection(mutex) ? 0 : EBUSY;
-}
-
-static INLINE int pthread_mutex_lock(pthread_mutex_t *const mutex) {
- EnterCriticalSection(mutex);
- return 0;
-}
-
-static INLINE int pthread_mutex_unlock(pthread_mutex_t *const mutex) {
- LeaveCriticalSection(mutex);
- return 0;
-}
-
-static INLINE int pthread_mutex_destroy(pthread_mutex_t *const mutex) {
- DeleteCriticalSection(mutex);
- return 0;
-}
-
-// Condition
-static INLINE int pthread_cond_destroy(pthread_cond_t *const condition) {
- (void)condition;
- return 0;
-}
-
-static INLINE int pthread_cond_init(pthread_cond_t *const condition,
- void *cond_attr) {
- (void)cond_attr;
- InitializeConditionVariable(condition);
- return 0;
-}
-
-static INLINE int pthread_cond_signal(pthread_cond_t *const condition) {
- WakeConditionVariable(condition);
- return 0;
-}
-
-static INLINE int pthread_cond_broadcast(pthread_cond_t *const condition) {
- WakeAllConditionVariable(condition);
- return 0;
-}
-
-static INLINE int pthread_cond_wait(pthread_cond_t *const condition,
- pthread_mutex_t *const mutex) {
- int ok;
- ok = SleepConditionVariableCS(condition, mutex, INFINITE);
- return !ok;
-}
-#else // _WIN32
-#include <pthread.h> // NOLINT
-#define THREADFN void *
-#define THREAD_RETURN(val) val
-#endif
-
-#endif // CONFIG_MULTITHREAD
-
// State of the worker thread object
typedef enum {
- NOT_OK = 0, // object is unusable
- OK, // ready to work
- WORK // busy finishing the current task
+ AVX_WORKER_STATUS_NOT_OK = 0, // object is unusable
+ AVX_WORKER_STATUS_OK, // ready to work
+ AVX_WORKER_STATUS_WORKING // busy finishing the current task
} AVxWorkerStatus;
// Function to be called by the worker thread. Takes two opaque pointers as
diff --git a/third_party/aom/aom_util/aom_util.cmake b/third_party/aom/aom_util/aom_util.cmake
index 6bf4fafc4c..d3da550485 100644
--- a/third_party/aom/aom_util/aom_util.cmake
+++ b/third_party/aom/aom_util/aom_util.cmake
@@ -13,7 +13,8 @@ if(AOM_AOM_UTIL_AOM_UTIL_CMAKE_)
endif() # AOM_AOM_UTIL_AOM_UTIL_CMAKE_
set(AOM_AOM_UTIL_AOM_UTIL_CMAKE_ 1)
-list(APPEND AOM_UTIL_SOURCES "${AOM_ROOT}/aom_util/aom_thread.c"
+list(APPEND AOM_UTIL_SOURCES "${AOM_ROOT}/aom_util/aom_pthread.h"
+ "${AOM_ROOT}/aom_util/aom_thread.c"
"${AOM_ROOT}/aom_util/aom_thread.h"
"${AOM_ROOT}/aom_util/endian_inl.h")
diff --git a/third_party/aom/apps/aomenc.c b/third_party/aom/apps/aomenc.c
index 3c9c136eed..799fb3a4f8 100644
--- a/third_party/aom/apps/aomenc.c
+++ b/third_party/aom/apps/aomenc.c
@@ -442,12 +442,12 @@ const arg_def_t *av1_ctrl_args[] = {
#endif
&g_av1_codec_arg_defs.dv_cost_upd_freq,
&g_av1_codec_arg_defs.partition_info_path,
- &g_av1_codec_arg_defs.enable_rate_guide_deltaq,
- &g_av1_codec_arg_defs.rate_distribution_info,
&g_av1_codec_arg_defs.enable_directional_intra,
&g_av1_codec_arg_defs.enable_tx_size_search,
&g_av1_codec_arg_defs.loopfilter_control,
&g_av1_codec_arg_defs.auto_intra_tools_off,
+ &g_av1_codec_arg_defs.enable_rate_guide_deltaq,
+ &g_av1_codec_arg_defs.rate_distribution_info,
NULL,
};
diff --git a/third_party/aom/av1/av1.cmake b/third_party/aom/av1/av1.cmake
index c66a748d40..32645f6065 100644
--- a/third_party/aom/av1/av1.cmake
+++ b/third_party/aom/av1/av1.cmake
@@ -262,7 +262,6 @@ list(APPEND AOM_AV1_ENCODER_SOURCES
list(APPEND AOM_AV1_COMMON_INTRIN_SSE2
"${AOM_ROOT}/av1/common/x86/av1_txfm_sse2.h"
- "${AOM_ROOT}/av1/common/x86/cdef_block_sse2.c"
"${AOM_ROOT}/av1/common/x86/cfl_sse2.c"
"${AOM_ROOT}/av1/common/x86/convolve_2d_sse2.c"
"${AOM_ROOT}/av1/common/x86/convolve_sse2.c"
@@ -272,11 +271,14 @@ list(APPEND AOM_AV1_COMMON_INTRIN_SSE2
list(APPEND AOM_AV1_COMMON_INTRIN_SSSE3
"${AOM_ROOT}/av1/common/x86/av1_inv_txfm_ssse3.c"
"${AOM_ROOT}/av1/common/x86/av1_inv_txfm_ssse3.h"
- "${AOM_ROOT}/av1/common/x86/cdef_block_ssse3.c"
"${AOM_ROOT}/av1/common/x86/cfl_ssse3.c"
"${AOM_ROOT}/av1/common/x86/jnt_convolve_ssse3.c"
"${AOM_ROOT}/av1/common/x86/resize_ssse3.c")
+# Fallbacks to support Valgrind on 32-bit x86
+list(APPEND AOM_AV1_COMMON_INTRIN_SSSE3_X86
+ "${AOM_ROOT}/av1/common/x86/cdef_block_ssse3.c")
+
list(APPEND AOM_AV1_COMMON_INTRIN_SSE4_1
"${AOM_ROOT}/av1/common/x86/av1_convolve_horiz_rs_sse4.c"
"${AOM_ROOT}/av1/common/x86/av1_convolve_scale_sse4.c"
@@ -372,7 +374,8 @@ list(APPEND AOM_AV1_ENCODER_INTRIN_NEON_DOTPROD
"${AOM_ROOT}/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c")
list(APPEND AOM_AV1_ENCODER_INTRIN_SVE
- "${AOM_ROOT}/av1/encoder/arm/neon/av1_error_sve.c")
+ "${AOM_ROOT}/av1/encoder/arm/neon/av1_error_sve.c"
+ "${AOM_ROOT}/av1/encoder/arm/neon/wedge_utils_sve.c")
list(APPEND AOM_AV1_ENCODER_INTRIN_ARM_CRC32
"${AOM_ROOT}/av1/encoder/arm/crc32/hash_arm_crc32.c")
@@ -477,6 +480,10 @@ if(CONFIG_AV1_HIGHBITDEPTH)
"${AOM_ROOT}/av1/common/arm/highbd_warp_plane_neon.c"
"${AOM_ROOT}/av1/common/arm/highbd_wiener_convolve_neon.c")
+ list(APPEND AOM_AV1_COMMON_INTRIN_SVE2
+ "${AOM_ROOT}/av1/common/arm/highbd_compound_convolve_sve2.c"
+ "${AOM_ROOT}/av1/common/arm/highbd_convolve_sve2.c")
+
list(APPEND AOM_AV1_ENCODER_INTRIN_SSE2
"${AOM_ROOT}/av1/encoder/x86/highbd_block_error_intrin_sse2.c"
"${AOM_ROOT}/av1/encoder/x86/highbd_temporal_filter_sse2.c")
@@ -605,6 +612,10 @@ function(setup_av1_targets)
require_compiler_flag_nomsvc("-mssse3" NO)
add_intrinsics_object_library("-mssse3" "ssse3" "aom_av1_common"
"AOM_AV1_COMMON_INTRIN_SSSE3")
+ if(AOM_ARCH_X86)
+ add_intrinsics_object_library("-mssse3" "ssse3_x86" "aom_av1_common"
+ "AOM_AV1_COMMON_INTRIN_SSSE3_X86")
+ endif()
if(CONFIG_AV1_DECODER)
if(AOM_AV1_DECODER_INTRIN_SSSE3)
@@ -703,6 +714,11 @@ function(setup_av1_targets)
endif()
endif()
+ if(HAVE_SVE2)
+ add_intrinsics_object_library("${AOM_SVE2_FLAG}" "sve2" "aom_av1_common"
+ "AOM_AV1_COMMON_INTRIN_SVE2")
+ endif()
+
if(HAVE_VSX)
if(AOM_AV1_COMMON_INTRIN_VSX)
add_intrinsics_object_library("-mvsx -maltivec" "vsx" "aom_av1_common"
diff --git a/third_party/aom/av1/av1_cx_iface.c b/third_party/aom/av1/av1_cx_iface.c
index 9214feb4e6..2b6b1504e6 100644
--- a/third_party/aom/av1/av1_cx_iface.c
+++ b/third_party/aom/av1/av1_cx_iface.c
@@ -9,22 +9,28 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <limits.h>
+#include <stdint.h>
#include <stdlib.h>
#include <string.h>
-#include "aom_mem/aom_mem.h"
#include "config/aom_config.h"
#include "config/aom_version.h"
-#include "aom_ports/mem_ops.h"
-
+#include "aom/aomcx.h"
#include "aom/aom_encoder.h"
+#include "aom/aom_external_partition.h"
+#include "aom/aom_image.h"
#include "aom/internal/aom_codec_internal.h"
-
#include "aom_dsp/flow_estimation/flow_estimation.h"
+#include "aom_mem/aom_mem.h"
+#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "av1/av1_cx_iface.h"
#include "av1/av1_iface_common.h"
+#include "av1/common/av1_common_int.h"
+#include "av1/common/enums.h"
+#include "av1/common/scale.h"
#include "av1/encoder/bitstream.h"
#include "av1/encoder/encoder.h"
#include "av1/encoder/encoder_alloc.h"
@@ -32,6 +38,7 @@
#include "av1/encoder/ethread.h"
#include "av1/encoder/external_partition.h"
#include "av1/encoder/firstpass.h"
+#include "av1/encoder/lookahead.h"
#include "av1/encoder/rc_utils.h"
#include "av1/arg_defs.h"
@@ -1836,6 +1843,11 @@ static aom_codec_err_t ctrl_set_enable_qm(aom_codec_alg_priv_t *ctx,
va_list args) {
struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.enable_qm = CAST(AV1E_SET_ENABLE_QM, args);
+#if !CONFIG_QUANT_MATRIX
+ if (extra_cfg.enable_qm) {
+ ERROR("QM can't be enabled with CONFIG_QUANT_MATRIX=0.");
+ }
+#endif
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_qm_y(aom_codec_alg_priv_t *ctx, va_list args) {
@@ -3072,11 +3084,36 @@ static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx,
ctx->pts_offset = ptsvol;
ctx->pts_offset_initialized = 1;
}
+ if (ptsvol < ctx->pts_offset) {
+ aom_internal_error(&ppi->error, AOM_CODEC_INVALID_PARAM,
+ "pts is smaller than initial pts");
+ }
ptsvol -= ctx->pts_offset;
+ if (ptsvol > INT64_MAX / cpi_data.timestamp_ratio->num) {
+ aom_internal_error(
+ &ppi->error, AOM_CODEC_INVALID_PARAM,
+ "conversion of relative pts to ticks would overflow");
+ }
int64_t src_time_stamp =
timebase_units_to_ticks(cpi_data.timestamp_ratio, ptsvol);
+#if ULONG_MAX > INT64_MAX
+ if (duration > INT64_MAX) {
+ aom_internal_error(&ppi->error, AOM_CODEC_INVALID_PARAM,
+ "duration is too big");
+ }
+#endif
+ if (ptsvol > INT64_MAX - (int64_t)duration) {
+ aom_internal_error(&ppi->error, AOM_CODEC_INVALID_PARAM,
+ "relative pts + duration is too big");
+ }
+ aom_codec_pts_t pts_end = ptsvol + (int64_t)duration;
+ if (pts_end > INT64_MAX / cpi_data.timestamp_ratio->num) {
+ aom_internal_error(
+ &ppi->error, AOM_CODEC_INVALID_PARAM,
+ "conversion of relative pts + duration to ticks would overflow");
+ }
int64_t src_end_time_stamp =
- timebase_units_to_ticks(cpi_data.timestamp_ratio, ptsvol + duration);
+ timebase_units_to_ticks(cpi_data.timestamp_ratio, pts_end);
YV12_BUFFER_CONFIG sd;
res = image2yuvconfig(img, &sd);
@@ -3110,7 +3147,7 @@ static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx,
subsampling_x, subsampling_y, use_highbitdepth, lag_in_frames,
src_border_in_pixels, cpi->common.features.byte_alignment,
ctx->num_lap_buffers, (cpi->oxcf.kf_cfg.key_freq_max == 0),
- cpi->image_pyramid_levels);
+ cpi->alloc_pyramid);
}
if (!ppi->lookahead)
aom_internal_error(&ppi->error, AOM_CODEC_MEM_ERROR,
diff --git a/third_party/aom/av1/av1_dx_iface.c b/third_party/aom/av1/av1_dx_iface.c
index 3d7e132ab8..1a2dea37b6 100644
--- a/third_party/aom/av1/av1_dx_iface.c
+++ b/third_party/aom/av1/av1_dx_iface.c
@@ -19,18 +19,23 @@
#include "aom/internal/aom_image_internal.h"
#include "aom/aomdx.h"
#include "aom/aom_decoder.h"
+#include "aom/aom_image.h"
#include "aom_dsp/bitreader_buffer.h"
#include "aom_dsp/aom_dsp_common.h"
+#include "aom_ports/mem.h"
#include "aom_ports/mem_ops.h"
+#include "aom_util/aom_pthread.h"
#include "aom_util/aom_thread.h"
#include "av1/common/alloccommon.h"
+#include "av1/common/av1_common_int.h"
#include "av1/common/frame_buffers.h"
#include "av1/common/enums.h"
#include "av1/common/obu_util.h"
#include "av1/decoder/decoder.h"
#include "av1/decoder/decodeframe.h"
+#include "av1/decoder/dthread.h"
#include "av1/decoder/grain_synthesis.h"
#include "av1/decoder/obu.h"
@@ -865,7 +870,9 @@ static aom_image_t *decoder_get_frame(aom_codec_alg_priv_t *ctx,
if (pbi->ext_tile_debug && tiles->single_tile_decoding &&
pbi->dec_tile_row >= 0) {
int tile_width, tile_height;
- av1_get_uniform_tile_size(cm, &tile_width, &tile_height);
+ if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) {
+ return NULL;
+ }
const int tile_row = AOMMIN(pbi->dec_tile_row, tiles->rows - 1);
const int mi_row = tile_row * tile_height;
const int ssy = ctx->img.y_chroma_shift;
@@ -884,7 +891,9 @@ static aom_image_t *decoder_get_frame(aom_codec_alg_priv_t *ctx,
if (pbi->ext_tile_debug && tiles->single_tile_decoding &&
pbi->dec_tile_col >= 0) {
int tile_width, tile_height;
- av1_get_uniform_tile_size(cm, &tile_width, &tile_height);
+ if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) {
+ return NULL;
+ }
const int tile_col = AOMMIN(pbi->dec_tile_col, tiles->cols - 1);
const int mi_col = tile_col * tile_width;
const int ssx = ctx->img.x_chroma_shift;
@@ -1428,7 +1437,9 @@ static aom_codec_err_t ctrl_get_tile_size(aom_codec_alg_priv_t *ctx,
(FrameWorkerData *)worker->data1;
const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
int tile_width, tile_height;
- av1_get_uniform_tile_size(cm, &tile_width, &tile_height);
+ if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) {
+ return AOM_CODEC_CORRUPT_FRAME;
+ }
*tile_size = ((tile_width * MI_SIZE) << 16) + tile_height * MI_SIZE;
return AOM_CODEC_OK;
} else {
diff --git a/third_party/aom/av1/common/alloccommon.c b/third_party/aom/av1/common/alloccommon.c
index 2a9a8beb40..e9a38c4a60 100644
--- a/third_party/aom/av1/common/alloccommon.c
+++ b/third_party/aom/av1/common/alloccommon.c
@@ -13,6 +13,8 @@
#include "config/aom_config.h"
#include "aom_mem/aom_mem.h"
+#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/alloccommon.h"
#include "av1/common/av1_common_int.h"
@@ -20,6 +22,8 @@
#include "av1/common/cdef_block.h"
#include "av1/common/entropymode.h"
#include "av1/common/entropymv.h"
+#include "av1/common/enums.h"
+#include "av1/common/restoration.h"
#include "av1/common/thread_common.h"
int av1_get_MBs(int width, int height) {
@@ -200,7 +204,7 @@ void av1_alloc_cdef_buffers(AV1_COMMON *const cm,
const int is_num_workers_changed =
cdef_info->allocated_num_workers != num_workers;
const int is_cdef_enabled =
- cm->seq_params->enable_cdef && !cm->tiles.large_scale;
+ cm->seq_params->enable_cdef && !cm->tiles.single_tile_decoding;
// num-bufs=3 represents ping-pong buffers for top linebuf,
// followed by bottom linebuf.
diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c
index fc03a2ee04..9247ded6bf 100644
--- a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c
+++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.c
@@ -20,266 +20,9 @@
#include "aom_ports/mem.h"
#include "av1/common/convolve.h"
#include "av1/common/filter.h"
+#include "av1/common/arm/highbd_compound_convolve_neon.h"
#include "av1/common/arm/highbd_convolve_neon.h"
-#define ROUND_SHIFT 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS
-
-static INLINE void highbd_12_comp_avg_neon(const uint16_t *src_ptr,
- int src_stride, uint16_t *dst_ptr,
- int dst_stride, int w, int h,
- ConvolveParams *conv_params,
- const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint16x4_t offset_vec = vdup_n_u16(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
-
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint16x4_t avg = vhadd_u16(src, ref);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint16x8_t avg = vhaddq_u16(s, r);
- int32x4_t d0_lo =
- vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
- int32x4_t d0_hi =
- vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
-
- uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT - 2),
- vqrshrun_n_s32(d0_hi, ROUND_SHIFT - 2));
- d0 = vminq_u16(d0, max);
- vst1q_u16(dst, d0);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- }
-}
-
-static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride,
- uint16_t *dst_ptr, int dst_stride,
- int w, int h,
- ConvolveParams *conv_params,
- const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint16x4_t offset_vec = vdup_n_u16(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
-
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint16x4_t avg = vhadd_u16(src, ref);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint16x8_t avg = vhaddq_u16(s, r);
- int32x4_t d0_lo =
- vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
- int32x4_t d0_hi =
- vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
-
- uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT),
- vqrshrun_n_s32(d0_hi, ROUND_SHIFT));
- d0 = vminq_u16(d0, max);
- vst1q_u16(dst, d0);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- dst_ptr += dst_stride;
- } while (--h != 0);
- }
-}
-
-static INLINE void highbd_12_dist_wtd_comp_avg_neon(
- const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
- int w, int h, ConvolveParams *conv_params, const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint32x4_t offset_vec = vdupq_n_u32(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
- uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
- uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
-
- // Weighted averaging
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
- wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
- wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
- wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
- wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
-
- uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
- wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
- wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
- int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
-
- uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT - 2),
- vqrshrun_n_s32(d1, ROUND_SHIFT - 2));
- d01 = vminq_u16(d01, max);
- vst1q_u16(dst, d01);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- }
-}
-
-static INLINE void highbd_dist_wtd_comp_avg_neon(
- const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
- int w, int h, ConvolveParams *conv_params, const int offset, const int bd) {
- CONV_BUF_TYPE *ref_ptr = conv_params->dst;
- const int ref_stride = conv_params->dst_stride;
- const uint32x4_t offset_vec = vdupq_n_u32(offset);
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
- uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
- uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
-
- // Weighted averaging
- if (w == 4) {
- do {
- const uint16x4_t src = vld1_u16(src_ptr);
- const uint16x4_t ref = vld1_u16(ref_ptr);
-
- uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
- wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
- wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
-
- uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
- d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
-
- vst1_u16(dst_ptr, d0_u16);
-
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- } else {
- do {
- int width = w;
- const uint16_t *src = src_ptr;
- const uint16_t *ref = ref_ptr;
- uint16_t *dst = dst_ptr;
- do {
- const uint16x8_t s = vld1q_u16(src);
- const uint16x8_t r = vld1q_u16(ref);
-
- uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
- wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
- wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
- int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
-
- uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
- wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
- wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
- int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
-
- uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT),
- vqrshrun_n_s32(d1, ROUND_SHIFT));
- d01 = vminq_u16(d01, max);
- vst1q_u16(dst, d01);
-
- src += 8;
- ref += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += src_stride;
- dst_ptr += dst_stride;
- ref_ptr += ref_stride;
- } while (--h != 0);
- }
-}
-
static INLINE uint16x4_t highbd_12_convolve6_4(
const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
@@ -743,9 +486,6 @@ void av1_highbd_dist_wtd_convolve_x_neon(
const int im_stride = MAX_SB_SIZE;
const int horiz_offset = filter_params_x->taps / 2 - 1;
assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
- const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
- const int offset_avg = (1 << (offset_bits - conv_params->round_1)) +
- (1 << (offset_bits - conv_params->round_1 - 1));
const int offset_convolve = (1 << (conv_params->round_0 - 1)) +
(1 << (bd + FILTER_BITS)) +
(1 << (bd + FILTER_BITS - 1));
@@ -768,10 +508,10 @@ void av1_highbd_dist_wtd_convolve_x_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
- w, h, conv_params, offset_avg, bd);
+ w, h, conv_params);
} else {
highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, offset_avg, bd);
+ conv_params);
}
} else {
if (x_filter_taps <= 6 && w != 4) {
@@ -795,10 +535,10 @@ void av1_highbd_dist_wtd_convolve_x_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
- h, conv_params, offset_avg, bd);
+ h, conv_params, bd);
} else {
highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, offset_avg, bd);
+ conv_params, bd);
}
} else {
if (x_filter_taps <= 6 && w != 4) {
@@ -971,6 +711,212 @@ static INLINE void highbd_dist_wtd_convolve_y_6tap_neon(
}
}
+static INLINE uint16x4_t highbd_12_convolve4_4(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ return vqshrun_n_s32(sum, ROUND0_BITS + 2);
+}
+
+static INLINE uint16x8_t highbd_12_convolve4_8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS + 2),
+ vqshrun_n_s32(sum1, ROUND0_BITS + 2));
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_y_4tap_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, const int16_t *y_filter_ptr, const int offset) {
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+ const int32x4_t offset_vec = vdupq_n_s32(offset);
+
+ if (w == 4) {
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 =
+ highbd_12_convolve4_4(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x4_t d1 =
+ highbd_12_convolve4_4(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x4_t d2 =
+ highbd_12_convolve4_4(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x4_t d3 =
+ highbd_12_convolve4_4(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ } else {
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 =
+ highbd_12_convolve4_8(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x8_t d1 =
+ highbd_12_convolve4_8(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x8_t d2 =
+ highbd_12_convolve4_8(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x8_t d3 =
+ highbd_12_convolve4_8(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ return vqshrun_n_s32(sum, ROUND0_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ return vcombine_u16(vqshrun_n_s32(sum0, ROUND0_BITS),
+ vqshrun_n_s32(sum1, ROUND0_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_y_4tap_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, const int16_t *y_filter_ptr, const int offset) {
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+ const int32x4_t offset_vec = vdupq_n_s32(offset);
+
+ if (w == 4) {
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 = highbd_convolve4_4(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x4_t d1 = highbd_convolve4_4(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x4_t d2 = highbd_convolve4_4(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x4_t d3 = highbd_convolve4_4(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ } else {
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 =
+ highbd_convolve4_8(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x8_t d1 =
+ highbd_convolve4_8(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x8_t d2 =
+ highbd_convolve4_8(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x8_t d3 =
+ highbd_convolve4_8(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w != 0);
+ }
+}
+
static INLINE void highbd_12_dist_wtd_convolve_y_8tap_neon(
const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
int w, int h, const int16_t *y_filter_ptr, const int offset) {
@@ -1148,9 +1094,6 @@ void av1_highbd_dist_wtd_convolve_y_neon(
const int im_stride = MAX_SB_SIZE;
const int vert_offset = filter_params_y->taps / 2 - 1;
assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
- const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
- const int round_offset_avg = (1 << (offset_bits - conv_params->round_1)) +
- (1 << (offset_bits - conv_params->round_1 - 1));
const int round_offset_conv = (1 << (conv_params->round_0 - 1)) +
(1 << (bd + FILTER_BITS)) +
(1 << (bd + FILTER_BITS - 1));
@@ -1162,7 +1105,11 @@ void av1_highbd_dist_wtd_convolve_y_neon(
if (bd == 12) {
if (conv_params->do_average) {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_y_4tap_neon(
+ src + 2 * src_stride, src_stride, im_block, im_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_12_dist_wtd_convolve_y_6tap_neon(
src + src_stride, src_stride, im_block, im_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1173,14 +1120,17 @@ void av1_highbd_dist_wtd_convolve_y_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
- w, h, conv_params, round_offset_avg,
- bd);
+ w, h, conv_params);
} else {
highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params);
}
} else {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_y_4tap_neon(
+ src + 2 * src_stride, src_stride, dst16, dst16_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_12_dist_wtd_convolve_y_6tap_neon(
src + src_stride, src_stride, dst16, dst16_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1192,7 +1142,11 @@ void av1_highbd_dist_wtd_convolve_y_neon(
}
} else {
if (conv_params->do_average) {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_y_4tap_neon(src + 2 * src_stride, src_stride,
+ im_block, im_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride,
im_block, im_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1203,13 +1157,17 @@ void av1_highbd_dist_wtd_convolve_y_neon(
}
if (conv_params->use_dist_wtd_comp_avg) {
highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
- h, conv_params, round_offset_avg, bd);
+ h, conv_params, bd);
} else {
highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params, bd);
}
} else {
- if (y_filter_taps <= 6) {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_y_4tap_neon(src + 2 * src_stride, src_stride,
+ dst16, dst16_stride, w, h,
+ y_filter_ptr, round_offset_conv);
+ } else if (y_filter_taps == 6) {
highbd_dist_wtd_convolve_y_6tap_neon(src + src_stride, src_stride,
dst16, dst16_stride, w, h,
y_filter_ptr, round_offset_conv);
@@ -1285,18 +1243,18 @@ void av1_highbd_dist_wtd_convolve_2d_copy_neon(const uint16_t *src,
if (conv_params->use_dist_wtd_comp_avg) {
if (bd == 12) {
highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
- w, h, conv_params, round_offset, bd);
+ w, h, conv_params);
} else {
highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
- h, conv_params, round_offset, bd);
+ h, conv_params, bd);
}
} else {
if (bd == 12) {
highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset, bd);
+ conv_params);
} else {
highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset, bd);
+ conv_params, bd);
}
}
}
@@ -1949,9 +1907,6 @@ void av1_highbd_dist_wtd_convolve_2d_neon(
(1 << (bd + FILTER_BITS - 1)) + (1 << (conv_params->round_0 - 1));
const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
const int round_offset_conv_y = (1 << y_offset_bits);
- const int round_offset_avg =
- ((1 << (y_offset_bits - conv_params->round_1)) +
- (1 << (y_offset_bits - conv_params->round_1 - 1)));
const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
@@ -2012,19 +1967,18 @@ void av1_highbd_dist_wtd_convolve_2d_neon(
if (conv_params->use_dist_wtd_comp_avg) {
if (bd == 12) {
highbd_12_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride,
- w, h, conv_params, round_offset_avg,
- bd);
+ w, h, conv_params);
} else {
highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w,
- h, conv_params, round_offset_avg, bd);
+ h, conv_params, bd);
}
} else {
if (bd == 12) {
highbd_12_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params);
} else {
highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
- conv_params, round_offset_avg, bd);
+ conv_params, bd);
}
}
}
diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h
new file mode 100644
index 0000000000..c9344f3adf
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_neon.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_ports/mem.h"
+
+#define ROUND_SHIFT 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS
+
+static INLINE void highbd_12_comp_avg_neon(const uint16_t *src_ptr,
+ int src_stride, uint16_t *dst_ptr,
+ int dst_stride, int w, int h,
+ ConvolveParams *conv_params) {
+ const int offset_bits = 12 + 2 * FILTER_BITS - ROUND0_BITS - 2;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint16x4_t offset_vec = vdup_n_u16((uint16_t)offset);
+ const uint16x8_t max = vdupq_n_u16((1 << 12) - 1);
+
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint16x4_t avg = vhadd_u16(src, ref);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint16x8_t avg = vhaddq_u16(s, r);
+ int32x4_t d0_lo =
+ vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
+ int32x4_t d0_hi =
+ vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
+
+ uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT - 2),
+ vqrshrun_n_s32(d0_hi, ROUND_SHIFT - 2));
+ d0 = vminq_u16(d0, max);
+ vst1q_u16(dst, d0);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void highbd_comp_avg_neon(const uint16_t *src_ptr, int src_stride,
+ uint16_t *dst_ptr, int dst_stride,
+ int w, int h,
+ ConvolveParams *conv_params,
+ const int bd) {
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint16x4_t offset_vec = vdup_n_u16((uint16_t)offset);
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint16x4_t avg = vhadd_u16(src, ref);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubl_u16(avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint16x8_t avg = vhaddq_u16(s, r);
+ int32x4_t d0_lo =
+ vreinterpretq_s32_u32(vsubl_u16(vget_low_u16(avg), offset_vec));
+ int32x4_t d0_hi =
+ vreinterpretq_s32_u32(vsubl_u16(vget_high_u16(avg), offset_vec));
+
+ uint16x8_t d0 = vcombine_u16(vqrshrun_n_s32(d0_lo, ROUND_SHIFT),
+ vqrshrun_n_s32(d0_hi, ROUND_SHIFT));
+ d0 = vminq_u16(d0, max);
+ vst1q_u16(dst, d0);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ dst_ptr += dst_stride;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void highbd_12_dist_wtd_comp_avg_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, ConvolveParams *conv_params) {
+ const int offset_bits = 12 + 2 * FILTER_BITS - ROUND0_BITS - 2;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint32x4_t offset_vec = vdupq_n_u32(offset);
+ const uint16x8_t max = vdupq_n_u16((1 << 12) - 1);
+ uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
+ uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
+
+ // Weighted averaging
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
+ wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
+ wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT - 2);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
+ wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
+ wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
+
+ uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
+ wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
+ wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
+ int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
+
+ uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT - 2),
+ vqrshrun_n_s32(d1, ROUND_SHIFT - 2));
+ d01 = vminq_u16(d01, max);
+ vst1q_u16(dst, d01);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void highbd_dist_wtd_comp_avg_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, ConvolveParams *conv_params, const int bd) {
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ CONV_BUF_TYPE *ref_ptr = conv_params->dst;
+ const int ref_stride = conv_params->dst_stride;
+ const uint32x4_t offset_vec = vdupq_n_u32(offset);
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ uint16x4_t fwd_offset = vdup_n_u16(conv_params->fwd_offset);
+ uint16x4_t bck_offset = vdup_n_u16(conv_params->bck_offset);
+
+ // Weighted averaging
+ if (w == 4) {
+ do {
+ const uint16x4_t src = vld1_u16(src_ptr);
+ const uint16x4_t ref = vld1_u16(ref_ptr);
+
+ uint32x4_t wtd_avg = vmull_u16(ref, fwd_offset);
+ wtd_avg = vmlal_u16(wtd_avg, src, bck_offset);
+ wtd_avg = vshrq_n_u32(wtd_avg, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg, offset_vec));
+
+ uint16x4_t d0_u16 = vqrshrun_n_s32(d0, ROUND_SHIFT);
+ d0_u16 = vmin_u16(d0_u16, vget_low_u16(max));
+
+ vst1_u16(dst_ptr, d0_u16);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ } else {
+ do {
+ int width = w;
+ const uint16_t *src = src_ptr;
+ const uint16_t *ref = ref_ptr;
+ uint16_t *dst = dst_ptr;
+ do {
+ const uint16x8_t s = vld1q_u16(src);
+ const uint16x8_t r = vld1q_u16(ref);
+
+ uint32x4_t wtd_avg0 = vmull_u16(vget_low_u16(r), fwd_offset);
+ wtd_avg0 = vmlal_u16(wtd_avg0, vget_low_u16(s), bck_offset);
+ wtd_avg0 = vshrq_n_u32(wtd_avg0, DIST_PRECISION_BITS);
+ int32x4_t d0 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg0, offset_vec));
+
+ uint32x4_t wtd_avg1 = vmull_u16(vget_high_u16(r), fwd_offset);
+ wtd_avg1 = vmlal_u16(wtd_avg1, vget_high_u16(s), bck_offset);
+ wtd_avg1 = vshrq_n_u32(wtd_avg1, DIST_PRECISION_BITS);
+ int32x4_t d1 = vreinterpretq_s32_u32(vsubq_u32(wtd_avg1, offset_vec));
+
+ uint16x8_t d01 = vcombine_u16(vqrshrun_n_s32(d0, ROUND_SHIFT),
+ vqrshrun_n_s32(d1, ROUND_SHIFT));
+ d01 = vminq_u16(d01, max);
+ vst1q_u16(dst, d01);
+
+ src += 8;
+ ref += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ref_ptr += ref_stride;
+ } while (--h != 0);
+ }
+}
diff --git a/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c b/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c
new file mode 100644
index 0000000000..1d6c9b4faf
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_compound_convolve_sve2.c
@@ -0,0 +1,1555 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_ports/mem.h"
+#include "av1/common/convolve.h"
+#include "av1/common/filter.h"
+#include "av1/common/filter.h"
+#include "av1/common/arm/highbd_compound_convolve_neon.h"
+#include "av1/common/arm/highbd_convolve_neon.h"
+#include "av1/common/arm/highbd_convolve_sve2.h"
+
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = {
+ 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
+ 4, 5, 6, 7, 5, 6, 7, 0, 6, 7, 0, 1, 7, 0, 1, 2,
+};
+
+static INLINE uint16x8_t highbd_12_convolve8_8_x(int16x8_t s0[8],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS + 2),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS + 2));
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_x_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x1_t offset_vec =
+ vcreate_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1)));
+ const int64x2_t offset_lo = vcombine_s64(offset_vec, vdup_n_s64(0));
+
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset_lo);
+ uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset_lo);
+ uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset_lo);
+ uint16x8_t d3 = highbd_12_convolve8_8_x(s3, filter, offset_lo);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_x(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_x_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x1_t offset_vec =
+ vcreate_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)));
+ const int64x2_t offset_lo = vcombine_s64(offset_vec, vdup_n_s64(0));
+
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset_lo);
+ uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset_lo);
+ uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset_lo);
+ uint16x8_t d3 = highbd_convolve8_8_x(s3, filter, offset_lo);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+}
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDeinterleaveTbl[8]) = {
+ 0, 2, 4, 6, 1, 3, 5, 7,
+};
+// clang-format on
+
+static INLINE uint16x4_t highbd_12_convolve4_4_x(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset,
+ uint16x8x2_t permute_tbl) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS + 2);
+}
+
+static INLINE uint16x8_t highbd_12_convolve4_8_x(int16x8_t s0[4],
+ int16x8_t filter,
+ int64x2_t offset,
+ uint16x8_t tbl) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, ROUND0_BITS + 2),
+ vqrshrun_n_s32(sum2637, ROUND0_BITS + 2));
+ return aom_tbl_u16(res, tbl);
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_x_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1)));
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_12_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_12_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_x(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset,
+ uint16x8x2_t permute_tbl) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_x(int16x8_t s0[4], int16x8_t filter,
+ int64x2_t offset,
+ uint16x8_t tbl) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, ROUND0_BITS),
+ vqrshrun_n_s32(sum2637, ROUND0_BITS));
+ return aom_tbl_u16(res, tbl);
+}
+
+static INLINE void highbd_dist_wtd_convolve_x_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)));
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+void av1_highbd_dist_wtd_convolve_x_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params, int bd) {
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+
+ if (x_filter_taps == 6) {
+ av1_highbd_dist_wtd_convolve_x_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn,
+ conv_params, bd);
+ return;
+ }
+
+ int dst16_stride = conv_params->dst_stride;
+ const int im_stride = MAX_SB_SIZE;
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
+
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+ src -= horiz_offset;
+
+ if (bd == 12) {
+ if (conv_params->do_average) {
+ if (x_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_x_4tap_sve2(src + 2, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr);
+ } else {
+ highbd_12_dist_wtd_convolve_x_8tap_sve2(src, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr);
+ }
+
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
+ w, h, conv_params);
+
+ } else {
+ highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params);
+ }
+ } else {
+ if (x_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_x_4tap_sve2(
+ src + 2, src_stride, dst16, dst16_stride, w, h, x_filter_ptr);
+ } else {
+ highbd_12_dist_wtd_convolve_x_8tap_sve2(
+ src, src_stride, dst16, dst16_stride, w, h, x_filter_ptr);
+ }
+ }
+ } else {
+ if (conv_params->do_average) {
+ if (x_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_x_4tap_sve2(src + 2, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr, bd);
+ } else {
+ highbd_dist_wtd_convolve_x_8tap_sve2(src, src_stride, im_block,
+ im_stride, w, h, x_filter_ptr, bd);
+ }
+
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
+ h, conv_params, bd);
+ } else {
+ highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params, bd);
+ }
+ } else {
+ if (x_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_x_4tap_sve2(
+ src + 2, src_stride, dst16, dst16_stride, w, h, x_filter_ptr, bd);
+ } else {
+ highbd_dist_wtd_convolve_x_8tap_sve2(
+ src, src_stride, dst16, dst16_stride, w, h, x_filter_ptr, bd);
+ }
+ }
+ }
+}
+
+static INLINE uint16x4_t highbd_12_convolve8_4_y(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS + 2);
+}
+
+static INLINE uint16x8_t highbd_12_convolve8_8_y(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS + 2),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS + 2));
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_y_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (12 + FILTER_BITS)) + (1 << (12 + FILTER_BITS - 1)));
+ const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ int16_t *s = (int16_t *)src;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 = highbd_12_convolve8_4_y(s0123, s4567, y_filter, offset);
+ uint16x4_t d1 = highbd_12_convolve8_4_y(s1234, s5678, y_filter, offset);
+ uint16x4_t d2 = highbd_12_convolve8_4_y(s2345, s6789, y_filter, offset);
+ uint16x4_t d3 = highbd_12_convolve8_4_y(s3456, s789A, y_filter, offset);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_y(s0123, s4567, y_filter, offset);
+ uint16x8_t d1 = highbd_12_convolve8_8_y(s1234, s5678, y_filter, offset);
+ uint16x8_t d2 = highbd_12_convolve8_8_y(s2345, s6789, y_filter, offset);
+ uint16x8_t d3 = highbd_12_convolve8_8_y(s3456, s789A, y_filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, ROUND0_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS),
+ vqrshrun_n_s32(sum4567, ROUND0_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_y_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr, const int bd) {
+ const int64x2_t offset =
+ vdupq_n_s64((1 << (bd + FILTER_BITS)) + (1 << (bd + FILTER_BITS - 1)));
+ const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ int16_t *s = (int16_t *)src;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 = highbd_convolve8_4_y(s0123, s4567, y_filter, offset);
+ uint16x4_t d1 = highbd_convolve8_4_y(s1234, s5678, y_filter, offset);
+ uint16x4_t d2 = highbd_convolve8_4_y(s2345, s6789, y_filter, offset);
+ uint16x4_t d3 = highbd_convolve8_4_y(s3456, s789A, y_filter, offset);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 = highbd_convolve8_8_y(s0123, s4567, y_filter, offset);
+ uint16x8_t d1 = highbd_convolve8_8_y(s1234, s5678, y_filter, offset);
+ uint16x8_t d2 = highbd_convolve8_8_y(s2345, s6789, y_filter, offset);
+ uint16x8_t d3 = highbd_convolve8_8_y(s3456, s789A, y_filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void av1_highbd_dist_wtd_convolve_y_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn,
+ ConvolveParams *conv_params, int bd) {
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+ if (y_filter_taps != 8) {
+ av1_highbd_dist_wtd_convolve_y_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn,
+ conv_params, bd);
+ return;
+ }
+
+ int dst16_stride = conv_params->dst_stride;
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+ assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
+
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+ src -= vert_offset * src_stride;
+
+ if (bd == 12) {
+ if (conv_params->do_average) {
+ highbd_12_dist_wtd_convolve_y_8tap_sve2(src, src_stride, im_block,
+ im_stride, w, h, y_filter_ptr);
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
+ w, h, conv_params);
+ } else {
+ highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params);
+ }
+ } else {
+ highbd_12_dist_wtd_convolve_y_8tap_sve2(src, src_stride, dst16,
+ dst16_stride, w, h, y_filter_ptr);
+ }
+ } else {
+ if (conv_params->do_average) {
+ highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, im_block, im_stride,
+ w, h, y_filter_ptr, bd);
+ if (conv_params->use_dist_wtd_comp_avg) {
+ highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
+ h, conv_params, bd);
+ } else {
+ highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+ conv_params, bd);
+ }
+ } else {
+ highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, dst16, dst16_stride,
+ w, h, y_filter_ptr, bd);
+ }
+ }
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x2_t offset = vdupq_n_s64(1 << (12 + FILTER_BITS - 2));
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset);
+ uint16x8_t d3 = highbd_12_convolve8_8_x(s3, filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)src;
+ do {
+ int16x8_t s0[8], s1[8], s2[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4],
+ &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], &s1[4],
+ &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], &s2[4],
+ &s2[5], &s2[6], &s2[7]);
+
+ uint16x8_t d0 = highbd_12_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_12_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_12_convolve8_8_x(s2, filter, offset);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x2_t offset = vdupq_n_s64(1 << (bd + FILTER_BITS - 2));
+ const int16x8_t filter = vld1q_s16(x_filter_ptr);
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset);
+ uint16x8_t d3 = highbd_convolve8_8_x(s3, filter, offset);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)src;
+ do {
+ int16x8_t s0[8], s1[8], s2[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3], &s0[4],
+ &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3], &s1[4],
+ &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3], &s2[4],
+ &s2[5], &s2[6], &s2[7]);
+
+ uint16x8_t d0 = highbd_convolve8_8_x(s0, filter, offset);
+ uint16x8_t d1 = highbd_convolve8_8_x(s1, filter, offset);
+ uint16x8_t d2 = highbd_convolve8_8_x(s2, filter, offset);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+}
+
+static INLINE void highbd_12_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr) {
+ const int64x2_t offset = vdupq_n_s64(1 << (12 + FILTER_BITS - 1));
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_12_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+
+ uint16x4_t d0 = highbd_12_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_12_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_12_convolve4_4_x(s2, filter, offset, permute_tbl);
+
+ store_u16_4x3(dst, dst_stride, d0, d1, d2);
+
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_12_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+
+ uint16x8_t d0 = highbd_12_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_12_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_12_convolve4_8_x(s2, filter, offset, idx);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr, const int bd) {
+ const int64x2_t offset = vdupq_n_s64(1 << (bd + FILTER_BITS - 1));
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ // We are only doing 8-tap and 4-tap vertical convolutions, therefore we know
+ // that im_h % 4 = 3, so we can do the loop across the whole block 4 rows at
+ // a time and then process the last 3 rows separately.
+
+ if (width == 4) {
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl);
+ uint16x4_t d3 = highbd_convolve4_4_x(s3, filter, offset, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+
+ uint16x4_t d0 = highbd_convolve4_4_x(s0, filter, offset, permute_tbl);
+ uint16x4_t d1 = highbd_convolve4_4_x(s1, filter, offset, permute_tbl);
+ uint16x4_t d2 = highbd_convolve4_4_x(s2, filter, offset, permute_tbl);
+
+ store_u16_4x3(dst, dst_stride, d0, d1, d2);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx);
+ uint16x8_t d3 = highbd_convolve4_8_x(s3, filter, offset, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 4);
+
+ // Process final 3 rows.
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+
+ uint16x8_t d0 = highbd_convolve4_8_x(s0, filter, offset, idx);
+ uint16x8_t d1 = highbd_convolve4_8_x(s1, filter, offset, idx);
+ uint16x8_t d2 = highbd_convolve4_8_x(s2, filter, offset, idx);
+
+ store_u16_8x3(dst, dst_stride, d0, d1, d2);
+
+ s += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_2d_v(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+
+ return vqrshrun_n_s32(sum0123, COMPOUND_ROUND1_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_2d_v(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ int64x2_t offset) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ return vcombine_u16(vqrshrun_n_s32(sum0123, COMPOUND_ROUND1_BITS),
+ vqrshrun_n_s32(sum4567, COMPOUND_ROUND1_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_vert_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr, int offset) {
+ const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+ const int64x2_t offset_s64 = vdupq_n_s64(offset);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ int16_t *s = (int16_t *)src;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 =
+ highbd_convolve8_4_2d_v(s0123, s4567, y_filter, offset_s64);
+ uint16x4_t d1 =
+ highbd_convolve8_4_2d_v(s1234, s5678, y_filter, offset_s64);
+ uint16x4_t d2 =
+ highbd_convolve8_4_2d_v(s2345, s6789, y_filter, offset_s64);
+ uint16x4_t d3 =
+ highbd_convolve8_4_2d_v(s3456, s789A, y_filter, offset_s64);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 =
+ highbd_convolve8_8_2d_v(s0123, s4567, y_filter, offset_s64);
+ uint16x8_t d1 =
+ highbd_convolve8_8_2d_v(s1234, s5678, y_filter, offset_s64);
+ uint16x8_t d2 =
+ highbd_convolve8_8_2d_v(s2345, s6789, y_filter, offset_s64);
+ uint16x8_t d3 =
+ highbd_convolve8_8_2d_v(s3456, s789A, y_filter, offset_s64);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_2d_v(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum = vmlal_lane_s16(offset, s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_2d_v(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const int32x4_t offset) {
+ int32x4_t sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS),
+ vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS));
+}
+
+static INLINE void highbd_dist_wtd_convolve_2d_vert_4tap_neon(
+ const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+ int w, int h, const int16_t *y_filter_ptr, const int offset) {
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+ const int32x4_t offset_vec = vdupq_n_s32(offset);
+
+ if (w == 4) {
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 =
+ highbd_convolve4_4_2d_v(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x4_t d1 =
+ highbd_convolve4_4_2d_v(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x4_t d2 =
+ highbd_convolve4_4_2d_v(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x4_t d3 =
+ highbd_convolve4_4_2d_v(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ } else {
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 =
+ highbd_convolve4_8_2d_v(s0, s1, s2, s3, y_filter, offset_vec);
+ uint16x8_t d1 =
+ highbd_convolve4_8_2d_v(s1, s2, s3, s4, y_filter, offset_vec);
+ uint16x8_t d2 =
+ highbd_convolve4_8_2d_v(s2, s3, s4, s5, y_filter, offset_vec);
+ uint16x8_t d3 =
+ highbd_convolve4_8_2d_v(s3, s4, s5, s6, y_filter, offset_vec);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w != 0);
+ }
+}
+
+void av1_highbd_dist_wtd_convolve_2d_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int subpel_x_qn,
+ const int subpel_y_qn, ConvolveParams *conv_params, int bd) {
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block2[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ int dst16_stride = conv_params->dst_stride;
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+ const int clamped_x_taps = x_filter_taps < 4 ? 4 : x_filter_taps;
+
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+ const int clamped_y_taps = y_filter_taps < 4 ? 4 : y_filter_taps;
+
+ if (x_filter_taps == 6 || y_filter_taps == 6) {
+ av1_highbd_dist_wtd_convolve_2d_neon(
+ src, src_stride, dst, dst_stride, w, h, filter_params_x,
+ filter_params_y, subpel_x_qn, subpel_y_qn, conv_params, bd);
+ return;
+ }
+
+ const int im_h = h + clamped_y_taps - 1;
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = clamped_y_taps / 2 - 1;
+ const int horiz_offset = clamped_x_taps / 2 - 1;
+ const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int round_offset_conv_y = (1 << y_offset_bits);
+
+ const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
+
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+ if (bd == 12) {
+ if (x_filter_taps <= 4) {
+ highbd_12_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr);
+ } else {
+ highbd_12_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr);
+ }
+ } else {
+ if (x_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_2d_horiz_4tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, bd);
+ } else {
+ highbd_dist_wtd_convolve_2d_horiz_8tap_sve2(
+ src_ptr, src_stride, im_block, im_stride, w, im_h, x_filter_ptr, bd);
+ }
+ }
+
+ if (conv_params->do_average) {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_2d_vert_4tap_neon(im_block, im_stride, im_block2,
+ im_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ } else {
+ highbd_dist_wtd_convolve_2d_vert_8tap_sve2(im_block, im_stride, im_block2,
+ im_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ }
+ if (conv_params->use_dist_wtd_comp_avg) {
+ if (bd == 12) {
+ highbd_12_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride,
+ w, h, conv_params);
+
+ } else {
+ highbd_dist_wtd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w,
+ h, conv_params, bd);
+ }
+ } else {
+ if (bd == 12) {
+ highbd_12_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
+ conv_params);
+
+ } else {
+ highbd_comp_avg_neon(im_block2, im_stride, dst, dst_stride, w, h,
+ conv_params, bd);
+ }
+ }
+ } else {
+ if (y_filter_taps <= 4) {
+ highbd_dist_wtd_convolve_2d_vert_4tap_neon(
+ im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ } else {
+ highbd_dist_wtd_convolve_2d_vert_8tap_sve2(
+ im_block, im_stride, dst16, dst16_stride, w, h, y_filter_ptr,
+ round_offset_conv_y);
+ }
+ }
+}
diff --git a/third_party/aom/av1/common/arm/highbd_convolve_sve2.c b/third_party/aom/av1/common/arm/highbd_convolve_sve2.c
new file mode 100644
index 0000000000..82eb12fcea
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_convolve_sve2.c
@@ -0,0 +1,1720 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_ports/mem.h"
+#include "av1/common/convolve.h"
+#include "av1/common/filter.h"
+#include "av1/common/arm/highbd_convolve_sve2.h"
+
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = {
+ 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
+ 4, 5, 6, 7, 5, 6, 7, 0, 6, 7, 0, 1, 7, 0, 1, 2,
+};
+
+static INLINE uint16x4_t convolve12_4_x(
+ int16x8_t s0, int16x8_t s1, int16x8_t filter_0_7, int16x8_t filter_4_11,
+ const int64x2_t offset, uint16x8x4_t permute_tbl, uint16x4_t max) {
+ int16x8_t permuted_samples[6];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int32x4_t res0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(res0123, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t convolve12_8_x(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t filter_0_7,
+ int16x8_t filter_4_11, int64x2_t offset,
+ uint16x8x4_t permute_tbl,
+ uint16x8_t max) {
+ int16x8_t permuted_samples[8];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+ permuted_samples[6] = aom_tbl2_s16(s1, s2, permute_tbl.val[2]);
+ permuted_samples[7] = aom_tbl2_s16(s1, s2, permute_tbl.val[3]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int64x2_t sum45 =
+ aom_svdot_lane_s16(offset, permuted_samples[2], filter_0_7, 0);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[4], filter_0_7, 1);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[6], filter_4_11, 1);
+
+ int64x2_t sum67 =
+ aom_svdot_lane_s16(offset, permuted_samples[3], filter_0_7, 0);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[5], filter_0_7, 1);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[7], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve_x_sr_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, int bd) {
+ // This shim allows to do only one rounding shift instead of two.
+ const int64x2_t offset = vdupq_n_s64(1 << (conv_params->round_0 - 1));
+
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x4_t permute_tbl = vld1q_u16_x4(kDotProdTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 = vreinterpretq_u16_u64(vcombine_u64(
+ vdup_n_u64(0), vdup_n_u64(svcnth() * 0x0001000000000000ULL)));
+ permute_tbl.val[2] = vaddq_u16(permute_tbl.val[2], correction0);
+
+ uint16x8_t correction1 = vreinterpretq_u16_u64(
+ vcombine_u64(vdup_n_u64(svcnth() * 0x0001000100000000ULL),
+ vdup_n_u64(svcnth() * 0x0001000100010000ULL)));
+ permute_tbl.val[3] = vaddq_u16(permute_tbl.val[3], correction1);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ const int16_t *s = (const int16_t *)src;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x4(s, src_stride, &s0, &s2, &s4, &s6);
+ load_s16_8x4(s + 8, src_stride, &s1, &s3, &s5, &s7);
+
+ uint16x4_t d0 = convolve12_4_x(s0, s1, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x4_t d1 = convolve12_4_x(s2, s3, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x4_t d2 = convolve12_4_x(s4, s5, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x4_t d3 = convolve12_4_x(s6, s7, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11;
+ load_s16_8x4(s, src_stride, &s0, &s3, &s6, &s9);
+ load_s16_8x4(s + 8, src_stride, &s1, &s4, &s7, &s10);
+ load_s16_8x4(s + 16, src_stride, &s2, &s5, &s8, &s11);
+
+ uint16x8_t d0 = convolve12_8_x(s0, s1, s2, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x8_t d1 = convolve12_8_x(s3, s4, s5, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x8_t d2 = convolve12_8_x(s6, s7, s8, y_filter_0_7, y_filter_4_11,
+ offset, permute_tbl, max);
+ uint16x8_t d3 = convolve12_8_x(s9, s10, s11, y_filter_0_7,
+ y_filter_4_11, offset, permute_tbl, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE uint16x8_t convolve8_8_x(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset, uint16x8_t max) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve_x_sr_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, int bd) {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ // This shim allows to do only one rounding shift instead of two.
+ const int64_t offset = 1 << (conv_params->round_0 - 1);
+ const int64x2_t offset_lo = vcombine_s64((int64x1_t)(offset), vdup_n_s64(0));
+
+ const int16x8_t filter = vld1q_s16(y_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = convolve8_8_x(s0, filter, offset_lo, max);
+ uint16x8_t d1 = convolve8_8_x(s1, filter, offset_lo, max);
+ uint16x8_t d2 = convolve8_8_x(s2, filter, offset_lo, max);
+ uint16x8_t d3 = convolve8_8_x(s3, filter, offset_lo, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+}
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDeinterleaveTbl[8]) = {
+ 0, 2, 4, 6, 1, 3, 5, 7,
+};
+// clang-format on
+
+static INLINE uint16x4_t convolve4_4_x(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset,
+ uint16x8x2_t permute_tbl,
+ uint16x4_t max) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t convolve4_8_x(int16x8_t s0[4], int16x8_t filter,
+ int64x2_t offset, uint16x8_t tbl,
+ uint16x8_t max) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0415 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum2637 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0415, FILTER_BITS),
+ vqrshrun_n_s32(sum2637, FILTER_BITS));
+ res = aom_tbl_u16(res, tbl);
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve_x_sr_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr,
+ ConvolveParams *conv_params, int bd) {
+ // This shim allows to do only one rounding shift instead of two.
+ const int64x2_t offset = vdupq_n_s64(1 << (conv_params->round_0 - 1));
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ const int16_t *s = (const int16_t *)(src);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = convolve4_4_x(s0, filter, offset, permute_tbl, max);
+ uint16x4_t d1 = convolve4_4_x(s1, filter, offset, permute_tbl, max);
+ uint16x4_t d2 = convolve4_4_x(s2, filter, offset, permute_tbl, max);
+ uint16x4_t d3 = convolve4_4_x(s3, filter, offset, permute_tbl, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[4], s1[4], s2[4], s3[4];
+ load_s16_8x4(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3]);
+ load_s16_8x4(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3]);
+ load_s16_8x4(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3]);
+ load_s16_8x4(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3]);
+
+ uint16x8_t d0 = convolve4_8_x(s0, filter, offset, idx, max);
+ uint16x8_t d1 = convolve4_8_x(s1, filter, offset, idx, max);
+ uint16x8_t d2 = convolve4_8_x(s2, filter, offset, idx, max);
+ uint16x8_t d3 = convolve4_8_x(s3, filter, offset, idx, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+void av1_highbd_convolve_x_sr_sve2(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const int subpel_x_qn,
+ ConvolveParams *conv_params, int bd) {
+ if (w == 2 || h == 2) {
+ av1_highbd_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn, conv_params, bd);
+ return;
+ }
+
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+
+ if (x_filter_taps == 6) {
+ av1_highbd_convolve_x_sr_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn, conv_params,
+ bd);
+ return;
+ }
+
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+ src -= horiz_offset;
+
+ if (x_filter_taps == 12) {
+ highbd_convolve_x_sr_12tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ x_filter_ptr, conv_params, bd);
+ return;
+ }
+
+ if (x_filter_taps == 8) {
+ highbd_convolve_x_sr_8tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ x_filter_ptr, conv_params, bd);
+ return;
+ }
+
+ highbd_convolve_x_sr_4tap_sve2(src + 2, src_stride, dst, dst_stride, w, h,
+ x_filter_ptr, conv_params, bd);
+}
+
+static INLINE uint16x4_t highbd_convolve12_4_y(int16x8_t s0[2], int16x8_t s1[2],
+ int16x8_t s2[2],
+ int16x8_t filter_0_7,
+ int16x8_t filter_4_11,
+ uint16x4_t max) {
+ int64x2_t sum[2];
+
+ sum[0] = aom_svdot_lane_s16(vdupq_n_s64(0), s0[0], filter_0_7, 0);
+ sum[0] = aom_svdot_lane_s16(sum[0], s1[0], filter_0_7, 1);
+ sum[0] = aom_svdot_lane_s16(sum[0], s2[0], filter_4_11, 1);
+
+ sum[1] = aom_svdot_lane_s16(vdupq_n_s64(0), s0[1], filter_0_7, 0);
+ sum[1] = aom_svdot_lane_s16(sum[1], s1[1], filter_0_7, 1);
+ sum[1] = aom_svdot_lane_s16(sum[1], s2[1], filter_4_11, 1);
+
+ int32x4_t res_s32 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[1]));
+
+ uint16x4_t res = vqrshrun_n_s32(res_s32, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE void highbd_convolve_y_sr_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr, int bd) {
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+
+ do {
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+ int h = height;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA;
+ load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8,
+ &s9, &sA);
+ s += 11 * src_stride;
+
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2], s4567[2], s5678[2],
+ s6789[2], s789A[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+ transpose_concat_4x4(s4, s5, s6, s7, s4567);
+ transpose_concat_4x4(s5, s6, s7, s8, s5678);
+ transpose_concat_4x4(s6, s7, s8, s9, s6789);
+ transpose_concat_4x4(s7, s8, s9, sA, s789A);
+
+ do {
+ int16x4_t sB, sC, sD, sE;
+ load_s16_4x4(s, src_stride, &sB, &sC, &sD, &sE);
+
+ int16x8_t s89AB[2], s9ABC[2], sABCD[2], sBCDE[2];
+ transpose_concat_4x4(sB, sC, sD, sE, sBCDE);
+
+ // Use the above transpose and reuse data from the previous loop to get
+ // the rest.
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[0], s89AB);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[1], s9ABC);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[2], sABCD);
+
+ uint16x4_t d0 = highbd_convolve12_4_y(s0123, s4567, s89AB, y_filter_0_7,
+ y_filter_4_11, max);
+ uint16x4_t d1 = highbd_convolve12_4_y(s1234, s5678, s9ABC, y_filter_0_7,
+ y_filter_4_11, max);
+ uint16x4_t d2 = highbd_convolve12_4_y(s2345, s6789, sABCD, y_filter_0_7,
+ y_filter_4_11, max);
+ uint16x4_t d3 = highbd_convolve12_4_y(s3456, s789A, sBCDE, y_filter_0_7,
+ y_filter_4_11, max);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s4567[0] = s89AB[0];
+ s4567[1] = s89AB[1];
+ s5678[0] = s9ABC[0];
+ s5678[1] = s9ABC[1];
+ s6789[0] = sABCD[0];
+ s6789[1] = sABCD[1];
+ s789A[0] = sBCDE[0];
+ s789A[1] = sBCDE[1];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 4;
+ dst += 4;
+ width -= 4;
+ } while (width != 0);
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2],
+ int16x8_t samples_hi[2],
+ int16x8_t filter,
+ uint16x4_t max) {
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4],
+ int16x8_t samples_hi[4],
+ int16x8_t filter,
+ uint16x8_t max) {
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 =
+ aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_y_sr_8tap_sve2(const uint16_t *src, ptrdiff_t src_stride,
+ uint16_t *dst, ptrdiff_t dst_stride,
+ int width, int height,
+ const int16_t *filter_y, int bd) {
+ assert(w >= 4 && h >= 4);
+
+ const int16x8_t y_filter = vld1q_s16(filter_y);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 = highbd_convolve8_4_y(s0123, s4567, y_filter, max);
+ uint16x4_t d1 = highbd_convolve8_4_y(s1234, s5678, y_filter, max);
+ uint16x4_t d2 = highbd_convolve8_4_y(s2345, s6789, y_filter, max);
+ uint16x4_t d3 = highbd_convolve8_4_y(s3456, s789A, y_filter, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 = highbd_convolve8_8_y(s0123, s4567, y_filter, max);
+ uint16x8_t d1 = highbd_convolve8_8_y(s1234, s5678, y_filter, max);
+ uint16x8_t d2 = highbd_convolve8_8_y(s2345, s6789, y_filter, max);
+ uint16x8_t d3 = highbd_convolve8_8_y(s3456, s789A, y_filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_y(int16x8_t samples[2],
+ int16x8_t filter,
+ uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[1], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_y(int16x8_t samples[4],
+ int16x8_t filter,
+ uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[1], filter, 0);
+ int64x2_t sum45 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[2], filter, 0);
+ int64x2_t sum67 = aom_svdot_lane_s16(vdupq_n_s64(0), samples[3], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+ vqrshrun_n_s32(sum4567, FILTER_BITS));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_y_sr_4tap_sve2(const uint16_t *src, ptrdiff_t src_stride,
+ uint16_t *dst, ptrdiff_t dst_stride,
+ int width, int height,
+ const int16_t *filter_y, int bd) {
+ assert(w >= 4 && h >= 4);
+
+ const int16x8_t y_filter =
+ vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ uint16x4_t d0 = highbd_convolve4_4_y(s0123, y_filter, max);
+ uint16x4_t d1 = highbd_convolve4_4_y(s1234, y_filter, max);
+ uint16x4_t d2 = highbd_convolve4_4_y(s2345, y_filter, max);
+ uint16x4_t d3 = highbd_convolve4_4_y(s3456, y_filter, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample
+ // permute required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ uint16x8_t d0 = highbd_convolve4_8_y(s0123, y_filter, max);
+ uint16x8_t d1 = highbd_convolve4_8_y(s1234, y_filter, max);
+ uint16x8_t d2 = highbd_convolve4_8_y(s2345, y_filter, max);
+ uint16x8_t d3 = highbd_convolve4_8_y(s3456, y_filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void av1_highbd_convolve_y_sr_sve2(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_y_qn, int bd) {
+ if (w == 2 || h == 2) {
+ av1_highbd_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn, bd);
+ return;
+ }
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+ if (y_filter_taps == 6) {
+ av1_highbd_convolve_y_sr_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn, bd);
+ return;
+ }
+
+ const int vert_offset = filter_params_y->taps / 2 - 1;
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+ src -= vert_offset * src_stride;
+
+ if (y_filter_taps > 8) {
+ highbd_convolve_y_sr_12tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ y_filter_ptr, bd);
+ return;
+ }
+
+ if (y_filter_taps == 4) {
+ highbd_convolve_y_sr_4tap_sve2(src + 2 * src_stride, src_stride, dst,
+ dst_stride, w, h, y_filter_ptr, bd);
+ return;
+ }
+
+ highbd_convolve_y_sr_8tap_sve2(src, src_stride, dst, dst_stride, w, h,
+ y_filter_ptr, bd);
+}
+
+static INLINE uint16x4_t convolve12_4_2d_h(
+ int16x8_t s0, int16x8_t s1, int16x8_t filter_0_7, int16x8_t filter_4_11,
+ const int64x2_t offset, int32x4_t shift, uint16x8x4_t permute_tbl) {
+ int16x8_t permuted_samples[6];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ return vqmovun_s32(sum0123);
+}
+
+static INLINE uint16x8_t convolve12_8_2d_h(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t filter_0_7,
+ int16x8_t filter_4_11,
+ int64x2_t offset, int32x4_t shift,
+ uint16x8x4_t permute_tbl) {
+ int16x8_t permuted_samples[8];
+ permuted_samples[0] = aom_tbl_s16(s0, permute_tbl.val[0]);
+ permuted_samples[1] = aom_tbl_s16(s0, permute_tbl.val[1]);
+ permuted_samples[2] = aom_tbl2_s16(s0, s1, permute_tbl.val[2]);
+ permuted_samples[3] = aom_tbl2_s16(s0, s1, permute_tbl.val[3]);
+ permuted_samples[4] = aom_tbl_s16(s1, permute_tbl.val[0]);
+ permuted_samples[5] = aom_tbl_s16(s1, permute_tbl.val[1]);
+ permuted_samples[6] = aom_tbl2_s16(s1, s2, permute_tbl.val[2]);
+ permuted_samples[7] = aom_tbl2_s16(s1, s2, permute_tbl.val[3]);
+
+ int64x2_t sum01 =
+ aom_svdot_lane_s16(offset, permuted_samples[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[2], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, permuted_samples[4], filter_4_11, 1);
+
+ int64x2_t sum23 =
+ aom_svdot_lane_s16(offset, permuted_samples[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[3], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, permuted_samples[5], filter_4_11, 1);
+
+ int64x2_t sum45 =
+ aom_svdot_lane_s16(offset, permuted_samples[2], filter_0_7, 0);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[4], filter_0_7, 1);
+ sum45 = aom_svdot_lane_s16(sum45, permuted_samples[6], filter_4_11, 1);
+
+ int64x2_t sum67 =
+ aom_svdot_lane_s16(offset, permuted_samples[3], filter_0_7, 0);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[5], filter_0_7, 1);
+ sum67 = aom_svdot_lane_s16(sum67, permuted_samples[7], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ sum4567 = vqrshlq_s32(sum4567, shift);
+
+ return vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+}
+
+static INLINE void highbd_convolve_2d_sr_horiz_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, const int x_offset) {
+ const int64x2_t offset = vdupq_n_s64(x_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x4_t permute_tbl = vld1q_u16_x4(kDotProdTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 = vreinterpretq_u16_u64(vcombine_u64(
+ vdup_n_u64(0), vdup_n_u64(svcnth() * 0x0001000000000000ULL)));
+ permute_tbl.val[2] = vaddq_u16(permute_tbl.val[2], correction0);
+
+ uint16x8_t correction1 = vreinterpretq_u16_u64(
+ vcombine_u64(vdup_n_u64(svcnth() * 0x0001000100000000ULL),
+ vdup_n_u64(svcnth() * 0x0001000100010000ULL)));
+ permute_tbl.val[3] = vaddq_u16(permute_tbl.val[3], correction1);
+
+ if (width == 4) {
+ const int16_t *s = (const int16_t *)src;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x4(s, src_stride, &s0, &s2, &s4, &s6);
+ load_s16_8x4(s + 8, src_stride, &s1, &s3, &s5, &s7);
+
+ uint16x4_t d0 = convolve12_4_2d_h(s0, s1, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+ uint16x4_t d1 = convolve12_4_2d_h(s2, s3, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+ uint16x4_t d2 = convolve12_4_2d_h(s4, s5, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+ uint16x4_t d3 = convolve12_4_2d_h(s6, s7, y_filter_0_7, y_filter_4_11,
+ offset, shift, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ dst += 4 * dst_stride;
+ s += 4 * src_stride;
+ height -= 4;
+ } while (height > 0);
+ } else {
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11;
+ load_s16_8x4(s, src_stride, &s0, &s3, &s6, &s9);
+ load_s16_8x4(s + 8, src_stride, &s1, &s4, &s7, &s10);
+ load_s16_8x4(s + 16, src_stride, &s2, &s5, &s8, &s11);
+
+ uint16x8_t d0 =
+ convolve12_8_2d_h(s0, s1, s2, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+ uint16x8_t d1 =
+ convolve12_8_2d_h(s3, s4, s5, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+ uint16x8_t d2 =
+ convolve12_8_2d_h(s6, s7, s8, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+ uint16x8_t d3 =
+ convolve12_8_2d_h(s9, s10, s11, y_filter_0_7, y_filter_4_11, offset,
+ shift, permute_tbl);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ }
+}
+
+static INLINE uint16x8_t convolve8_8_2d_h(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset, int32x4_t shift) {
+ int64x2_t sum[8];
+ sum[0] = aom_sdotq_s16(offset, s0[0], filter);
+ sum[1] = aom_sdotq_s16(offset, s0[1], filter);
+ sum[2] = aom_sdotq_s16(offset, s0[2], filter);
+ sum[3] = aom_sdotq_s16(offset, s0[3], filter);
+ sum[4] = aom_sdotq_s16(offset, s0[4], filter);
+ sum[5] = aom_sdotq_s16(offset, s0[5], filter);
+ sum[6] = aom_sdotq_s16(offset, s0[6], filter);
+ sum[7] = aom_sdotq_s16(offset, s0[7], filter);
+
+ sum[0] = vpaddq_s64(sum[0], sum[1]);
+ sum[2] = vpaddq_s64(sum[2], sum[3]);
+ sum[4] = vpaddq_s64(sum[4], sum[5]);
+ sum[6] = vpaddq_s64(sum[6], sum[7]);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum[0]), vmovn_s64(sum[2]));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum[4]), vmovn_s64(sum[6]));
+
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ sum4567 = vqrshlq_s32(sum4567, shift);
+
+ return vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+}
+
+static INLINE void highbd_convolve_2d_sr_horiz_8tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, const int x_offset) {
+ const int64x2_t offset = vdupq_n_s64(x_offset);
+ const int64x2_t offset_lo = vcombine_s64(vget_low_s64(offset), vdup_n_s64(0));
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+ const int16x8_t filter = vld1q_s16(y_filter_ptr);
+
+ do {
+ const int16_t *s = (const int16_t *)src;
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = convolve8_8_2d_h(s0, filter, offset_lo, shift);
+ uint16x8_t d1 = convolve8_8_2d_h(s1, filter, offset_lo, shift);
+ uint16x8_t d2 = convolve8_8_2d_h(s2, filter, offset_lo, shift);
+ uint16x8_t d3 = convolve8_8_2d_h(s3, filter, offset_lo, shift);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+}
+
+static INLINE uint16x4_t convolve4_4_2d_h(int16x8_t s0, int16x8_t filter,
+ int64x2_t offset, int32x4_t shift,
+ uint16x8x2_t permute_tbl) {
+ int16x8_t permuted_samples0 = aom_tbl_s16(s0, permute_tbl.val[0]);
+ int16x8_t permuted_samples1 = aom_tbl_s16(s0, permute_tbl.val[1]);
+
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, permuted_samples0, filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, permuted_samples1, filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ return vqmovun_s32(sum0123);
+}
+
+static INLINE uint16x8_t convolve4_8_2d_h(int16x8_t s0[8], int16x8_t filter,
+ int64x2_t offset, int32x4_t shift,
+ uint16x8_t tbl) {
+ int64x2_t sum04 = aom_svdot_lane_s16(offset, s0[0], filter, 0);
+ int64x2_t sum15 = aom_svdot_lane_s16(offset, s0[1], filter, 0);
+ int64x2_t sum26 = aom_svdot_lane_s16(offset, s0[2], filter, 0);
+ int64x2_t sum37 = aom_svdot_lane_s16(offset, s0[3], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum04), vmovn_s64(sum15));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum26), vmovn_s64(sum37));
+
+ sum0123 = vqrshlq_s32(sum0123, shift);
+ sum4567 = vqrshlq_s32(sum4567, shift);
+
+ uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+ return aom_tbl_u16(res, tbl);
+}
+
+static INLINE void highbd_convolve_2d_sr_horiz_4tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *x_filter_ptr,
+ ConvolveParams *conv_params, const int x_offset) {
+ const int64x2_t offset = vdupq_n_s64(x_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+ const int16x4_t x_filter = vld1_s16(x_filter_ptr + 2);
+ const int16x8_t filter = vcombine_s16(x_filter, vdup_n_s16(0));
+
+ if (width == 4) {
+ const int16_t *s = (const int16_t *)(src);
+
+ uint16x8x2_t permute_tbl = vld1q_u16_x2(kDotProdTbl);
+
+ do {
+ int16x8_t s0, s1, s2, s3;
+ load_s16_8x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 = convolve4_4_2d_h(s0, filter, offset, shift, permute_tbl);
+ uint16x4_t d1 = convolve4_4_2d_h(s1, filter, offset, shift, permute_tbl);
+ uint16x4_t d2 = convolve4_4_2d_h(s2, filter, offset, shift, permute_tbl);
+ uint16x4_t d3 = convolve4_4_2d_h(s3, filter, offset, shift, permute_tbl);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ } else {
+ uint16x8_t idx = vld1q_u16(kDeinterleaveTbl);
+
+ do {
+ const int16_t *s = (const int16_t *)(src);
+ uint16_t *d = dst;
+ int w = width;
+
+ do {
+ int16x8_t s0[8], s1[8], s2[8], s3[8];
+ load_s16_8x8(s + 0 * src_stride, 1, &s0[0], &s0[1], &s0[2], &s0[3],
+ &s0[4], &s0[5], &s0[6], &s0[7]);
+ load_s16_8x8(s + 1 * src_stride, 1, &s1[0], &s1[1], &s1[2], &s1[3],
+ &s1[4], &s1[5], &s1[6], &s1[7]);
+ load_s16_8x8(s + 2 * src_stride, 1, &s2[0], &s2[1], &s2[2], &s2[3],
+ &s2[4], &s2[5], &s2[6], &s2[7]);
+ load_s16_8x8(s + 3 * src_stride, 1, &s3[0], &s3[1], &s3[2], &s3[3],
+ &s3[4], &s3[5], &s3[6], &s3[7]);
+
+ uint16x8_t d0 = convolve4_8_2d_h(s0, filter, offset, shift, idx);
+ uint16x8_t d1 = convolve4_8_2d_h(s1, filter, offset, shift, idx);
+ uint16x8_t d2 = convolve4_8_2d_h(s2, filter, offset, shift, idx);
+ uint16x8_t d3 = convolve4_8_2d_h(s3, filter, offset, shift, idx);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ w -= 8;
+ } while (w != 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve12_4_2d_v(
+ int16x8_t s0[2], int16x8_t s1[2], int16x8_t s2[2], int16x8_t filter_0_7,
+ int16x8_t filter_4_11, int32x4_t shift, int64x2_t offset, uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, s0[0], filter_0_7, 0);
+ sum01 = aom_svdot_lane_s16(sum01, s1[0], filter_0_7, 1);
+ sum01 = aom_svdot_lane_s16(sum01, s2[0], filter_4_11, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, s0[1], filter_0_7, 0);
+ sum23 = aom_svdot_lane_s16(sum23, s1[1], filter_0_7, 1);
+ sum23 = aom_svdot_lane_s16(sum23, s2[1], filter_4_11, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vshlq_s32(sum0123, shift);
+
+ uint16x4_t res = vqmovun_s32(sum0123);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE void highbd_convolve_2d_sr_vert_12tap_sve2(
+ const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+ int width, int height, const int16_t *y_filter_ptr,
+ ConvolveParams *conv_params, int bd, const int y_offset) {
+ const int64x2_t offset = vdupq_n_s64(y_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_1);
+
+ const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+ const int16x8_t y_filter_4_11 = vld1q_s16(y_filter_ptr + 4);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+
+ do {
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = (uint16_t *)dst;
+ int h = height;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA;
+ load_s16_4x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8,
+ &s9, &sA);
+ s += 11 * src_stride;
+
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2], s4567[2], s5678[2],
+ s6789[2], s789A[2];
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+ transpose_concat_4x4(s4, s5, s6, s7, s4567);
+ transpose_concat_4x4(s5, s6, s7, s8, s5678);
+ transpose_concat_4x4(s6, s7, s8, s9, s6789);
+ transpose_concat_4x4(s7, s8, s9, sA, s789A);
+
+ do {
+ int16x4_t sB, sC, sD, sE;
+ load_s16_4x4(s, src_stride, &sB, &sC, &sD, &sE);
+
+ int16x8_t s89AB[2], s9ABC[2], sABCD[2], sBCDE[2];
+ transpose_concat_4x4(sB, sC, sD, sE, sBCDE);
+
+ // Use the above transpose and reuse data from the previous loop to get
+ // the rest.
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[0], s89AB);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[1], s9ABC);
+ aom_tbl2x2_s16(s789A, sBCDE, merge_block_tbl.val[2], sABCD);
+
+ uint16x4_t d0 = highbd_convolve12_4_2d_v(
+ s0123, s4567, s89AB, y_filter_0_7, y_filter_4_11, shift, offset, max);
+ uint16x4_t d1 = highbd_convolve12_4_2d_v(
+ s1234, s5678, s9ABC, y_filter_0_7, y_filter_4_11, shift, offset, max);
+ uint16x4_t d2 = highbd_convolve12_4_2d_v(
+ s2345, s6789, sABCD, y_filter_0_7, y_filter_4_11, shift, offset, max);
+ uint16x4_t d3 = highbd_convolve12_4_2d_v(
+ s3456, s789A, sBCDE, y_filter_0_7, y_filter_4_11, shift, offset, max);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s4567[0] = s89AB[0];
+ s4567[1] = s89AB[1];
+ s5678[0] = s9ABC[0];
+ s5678[1] = s9ABC[1];
+ s6789[0] = sABCD[0];
+ s6789[1] = sABCD[1];
+ s789A[0] = sBCDE[0];
+ s789A[1] = sBCDE[1];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 4;
+ dst += 4;
+ width -= 4;
+ } while (width != 0);
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_2d_v(
+ int16x8_t samples_lo[2], int16x8_t samples_hi[2], int16x8_t filter,
+ int32x4_t shift, int64x2_t offset, uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vshlq_s32(sum0123, shift);
+
+ uint16x4_t res = vqmovun_s32(sum0123);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_2d_v(
+ int16x8_t samples_lo[4], int16x8_t samples_hi[4], int16x8_t filter,
+ int32x4_t shift, int64x2_t offset, uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+ sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+ sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+ sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+ sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ sum0123 = vshlq_s32(sum0123, shift);
+ sum4567 = vshlq_s32(sum4567, shift);
+
+ uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_2d_sr_vert_8tap_sve2(const uint16_t *src,
+ ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, int width,
+ int height, const int16_t *filter_y,
+ ConvolveParams *conv_params, int bd,
+ const int y_offset) {
+ assert(w >= 4 && h >= 4);
+ const int64x2_t offset = vdupq_n_s64(y_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_1);
+ const int16x8_t y_filter = vld1q_s16(filter_y);
+
+ uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+ // Scale indices by size of the true vector length to avoid reading from an
+ // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+ uint16x8_t correction0 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+ merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+ uint16x8_t correction1 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+ merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+ uint16x8_t correction2 =
+ vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+ merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)src;
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x4_t s7, s8, s9, s10;
+ load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x4_t d0 =
+ highbd_convolve8_4_2d_v(s0123, s4567, y_filter, shift, offset, max);
+ uint16x4_t d1 =
+ highbd_convolve8_4_2d_v(s1234, s5678, y_filter, shift, offset, max);
+ uint16x4_t d2 =
+ highbd_convolve8_4_2d_v(s2345, s6789, y_filter, shift, offset, max);
+ uint16x4_t d3 =
+ highbd_convolve8_4_2d_v(s3456, s789A, y_filter, shift, offset, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)src;
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6;
+ load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ do {
+ int16x8_t s7, s8, s9, s10;
+ load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+ // Transpose and shuffle the 4 lines that were loaded.
+ transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+ // Merge new data into block from previous iteration.
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+ aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+ uint16x8_t d0 =
+ highbd_convolve8_8_2d_v(s0123, s4567, y_filter, shift, offset, max);
+ uint16x8_t d1 =
+ highbd_convolve8_8_2d_v(s1234, s5678, y_filter, shift, offset, max);
+ uint16x8_t d2 =
+ highbd_convolve8_8_2d_v(s2345, s6789, y_filter, shift, offset, max);
+ uint16x8_t d3 =
+ highbd_convolve8_8_2d_v(s3456, s789A, y_filter, shift, offset, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Prepare block for next iteration - re-using as much as possible.
+ // Shuffle everything up four rows.
+ s0123[0] = s4567[0];
+ s0123[1] = s4567[1];
+ s0123[2] = s4567[2];
+ s0123[3] = s4567[3];
+ s1234[0] = s5678[0];
+ s1234[1] = s5678[1];
+ s1234[2] = s5678[2];
+ s1234[3] = s5678[3];
+ s2345[0] = s6789[0];
+ s2345[1] = s6789[1];
+ s2345[2] = s6789[2];
+ s2345[3] = s6789[3];
+ s3456[0] = s789A[0];
+ s3456[1] = s789A[1];
+ s3456[2] = s789A[2];
+ s3456[3] = s789A[3];
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+static INLINE uint16x4_t highbd_convolve4_4_2d_v(int16x8_t samples[2],
+ int16x8_t filter,
+ int32x4_t shift,
+ int64x2_t offset,
+ uint16x4_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples[1], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ sum0123 = vshlq_s32(sum0123, shift);
+
+ uint16x4_t res = vqmovun_s32(sum0123);
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8_2d_v(int16x8_t samples[4],
+ int16x8_t filter,
+ int32x4_t shift,
+ int64x2_t offset,
+ uint16x8_t max) {
+ int64x2_t sum01 = aom_svdot_lane_s16(offset, samples[0], filter, 0);
+ int64x2_t sum23 = aom_svdot_lane_s16(offset, samples[1], filter, 0);
+ int64x2_t sum45 = aom_svdot_lane_s16(offset, samples[2], filter, 0);
+ int64x2_t sum67 = aom_svdot_lane_s16(offset, samples[3], filter, 0);
+
+ int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+ int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+
+ sum0123 = vshlq_s32(sum0123, shift);
+ sum4567 = vshlq_s32(sum4567, shift);
+
+ uint16x8_t res = vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+ return vminq_u16(res, max);
+}
+
+void highbd_convolve_2d_sr_vert_4tap_sve2(const uint16_t *src,
+ ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, int width,
+ int height, const int16_t *filter_y,
+ ConvolveParams *conv_params, int bd,
+ const int y_offset) {
+ assert(w >= 4 && h >= 4);
+ const int64x2_t offset = vdupq_n_s64(y_offset);
+ const int32x4_t shift = vdupq_n_s32(-conv_params->round_1);
+
+ const int16x8_t y_filter =
+ vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0));
+
+ if (width == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ int16_t *s = (int16_t *)(src);
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample permute
+ // required before computing the dot product.
+ int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+ transpose_concat_4x4(s0, s1, s2, s3, s0123);
+ transpose_concat_4x4(s1, s2, s3, s4, s1234);
+ transpose_concat_4x4(s2, s3, s4, s5, s2345);
+ transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+ uint16x4_t d0 =
+ highbd_convolve4_4_2d_v(s0123, y_filter, shift, offset, max);
+ uint16x4_t d1 =
+ highbd_convolve4_4_2d_v(s1234, y_filter, shift, offset, max);
+ uint16x4_t d2 =
+ highbd_convolve4_4_2d_v(s2345, y_filter, shift, offset, max);
+ uint16x4_t d3 =
+ highbd_convolve4_4_2d_v(s3456, y_filter, shift, offset, max);
+
+ store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ dst += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int h = height;
+ int16_t *s = (int16_t *)(src);
+ uint16_t *d = dst;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ // This operation combines a conventional transpose and the sample
+ // permute required before computing the dot product.
+ int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+ transpose_concat_8x4(s0, s1, s2, s3, s0123);
+ transpose_concat_8x4(s1, s2, s3, s4, s1234);
+ transpose_concat_8x4(s2, s3, s4, s5, s2345);
+ transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+ uint16x8_t d0 =
+ highbd_convolve4_8_2d_v(s0123, y_filter, shift, offset, max);
+ uint16x8_t d1 =
+ highbd_convolve4_8_2d_v(s1234, y_filter, shift, offset, max);
+ uint16x8_t d2 =
+ highbd_convolve4_8_2d_v(s2345, y_filter, shift, offset, max);
+ uint16x8_t d3 =
+ highbd_convolve4_8_2d_v(s3456, y_filter, shift, offset, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ // Shuffle everything up four rows.
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h != 0);
+ src += 8;
+ dst += 8;
+ width -= 8;
+ } while (width != 0);
+ }
+}
+
+void av1_highbd_convolve_2d_sr_sve2(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_qn,
+ const int subpel_y_qn,
+ ConvolveParams *conv_params, int bd) {
+ if (w == 2 || h == 2) {
+ av1_highbd_convolve_2d_sr_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y, subpel_x_qn,
+ subpel_y_qn, conv_params, bd);
+ return;
+ }
+
+ DECLARE_ALIGNED(16, uint16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+ const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+ if (x_filter_taps == 6 || y_filter_taps == 6) {
+ av1_highbd_convolve_2d_sr_neon(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y,
+ subpel_x_qn, subpel_y_qn, conv_params, bd);
+ return;
+ }
+
+ const int clamped_x_taps = x_filter_taps < 4 ? 4 : x_filter_taps;
+ const int clamped_y_taps = y_filter_taps < 4 ? 4 : y_filter_taps;
+
+ const int im_stride = MAX_SB_SIZE;
+ const int vert_offset = clamped_y_taps / 2 - 1;
+ const int horiz_offset = clamped_x_taps / 2 - 1;
+ const int x_offset = (1 << (bd + FILTER_BITS - 1));
+ const int y_offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ // The extra shim of (1 << (conv_params->round_1 - 1)) allows us to do a
+ // simple shift left instead of a rounding saturating shift left.
+ const int y_offset =
+ (1 << (conv_params->round_1 - 1)) - (1 << (y_offset_bits - 1));
+
+ const uint16_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
+
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+ const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_y, subpel_y_qn & SUBPEL_MASK);
+ const int im_h = h + clamped_y_taps - 1;
+
+ if (x_filter_taps > 8) {
+ highbd_convolve_2d_sr_horiz_12tap_sve2(src_ptr, src_stride, im_block,
+ im_stride, w, im_h, x_filter_ptr,
+ conv_params, x_offset);
+
+ highbd_convolve_2d_sr_vert_12tap_sve2(im_block, im_stride, dst, dst_stride,
+ w, h, y_filter_ptr, conv_params, bd,
+ y_offset);
+ return;
+ }
+
+ if (x_filter_taps <= 4) {
+ highbd_convolve_2d_sr_horiz_4tap_sve2(src_ptr, src_stride, im_block,
+ im_stride, w, im_h, x_filter_ptr,
+ conv_params, x_offset);
+ } else {
+ highbd_convolve_2d_sr_horiz_8tap_sve2(src_ptr, src_stride, im_block,
+ im_stride, w, im_h, x_filter_ptr,
+ conv_params, x_offset);
+ }
+
+ if (y_filter_taps <= 4) {
+ highbd_convolve_2d_sr_vert_4tap_sve2(im_block, im_stride, dst, dst_stride,
+ w, h, y_filter_ptr, conv_params, bd,
+ y_offset);
+ } else {
+ highbd_convolve_2d_sr_vert_8tap_sve2(im_block, im_stride, dst, dst_stride,
+ w, h, y_filter_ptr, conv_params, bd,
+ y_offset);
+ }
+}
diff --git a/third_party/aom/av1/common/arm/highbd_convolve_sve2.h b/third_party/aom/av1/common/arm/highbd_convolve_sve2.h
new file mode 100644
index 0000000000..05e23deef4
--- /dev/null
+++ b/third_party/aom/av1/common/arm/highbd_convolve_sve2.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
+#define AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
+
+#include <arm_neon.h>
+
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdMergeBlockTbl[24]) = {
+ // Shift left and insert new last column in transposed 4x4 block.
+ 1, 2, 3, 0, 5, 6, 7, 4,
+ // Shift left and insert two new columns in transposed 4x4 block.
+ 2, 3, 0, 1, 6, 7, 4, 5,
+ // Shift left and insert three new columns in transposed 4x4 block.
+ 3, 0, 1, 2, 7, 4, 5, 6,
+};
+// clang-format on
+
+static INLINE void transpose_concat_4x4(int16x4_t s0, int16x4_t s1,
+ int16x4_t s2, int16x4_t s3,
+ int16x8_t res[2]) {
+ // Transpose 16-bit elements and concatenate result rows as follows:
+ // s0: 00, 01, 02, 03
+ // s1: 10, 11, 12, 13
+ // s2: 20, 21, 22, 23
+ // s3: 30, 31, 32, 33
+ //
+ // res[0]: 00 10 20 30 01 11 21 31
+ // res[1]: 02 12 22 32 03 13 23 33
+
+ int16x8_t s0q = vcombine_s16(s0, vdup_n_s16(0));
+ int16x8_t s1q = vcombine_s16(s1, vdup_n_s16(0));
+ int16x8_t s2q = vcombine_s16(s2, vdup_n_s16(0));
+ int16x8_t s3q = vcombine_s16(s3, vdup_n_s16(0));
+
+ int32x4_t s01 = vreinterpretq_s32_s16(vzip1q_s16(s0q, s1q));
+ int32x4_t s23 = vreinterpretq_s32_s16(vzip1q_s16(s2q, s3q));
+
+ int32x4x2_t s0123 = vzipq_s32(s01, s23);
+
+ res[0] = vreinterpretq_s16_s32(s0123.val[0]);
+ res[1] = vreinterpretq_s16_s32(s0123.val[1]);
+}
+
+static INLINE void transpose_concat_8x4(int16x8_t s0, int16x8_t s1,
+ int16x8_t s2, int16x8_t s3,
+ int16x8_t res[4]) {
+ // Transpose 16-bit elements and concatenate result rows as follows:
+ // s0: 00, 01, 02, 03, 04, 05, 06, 07
+ // s1: 10, 11, 12, 13, 14, 15, 16, 17
+ // s2: 20, 21, 22, 23, 24, 25, 26, 27
+ // s3: 30, 31, 32, 33, 34, 35, 36, 37
+ //
+ // res[0]: 00 10 20 30 01 11 21 31
+ // res[1]: 02 12 22 32 03 13 23 33
+ // res[2]: 04 14 24 34 05 15 25 35
+ // res[3]: 06 16 26 36 07 17 27 37
+
+ int16x8x2_t tr01_16 = vzipq_s16(s0, s1);
+ int16x8x2_t tr23_16 = vzipq_s16(s2, s3);
+ int32x4x2_t tr01_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[0]),
+ vreinterpretq_s32_s16(tr23_16.val[0]));
+ int32x4x2_t tr23_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[1]),
+ vreinterpretq_s32_s16(tr23_16.val[1]));
+
+ res[0] = vreinterpretq_s16_s32(tr01_32.val[0]);
+ res[1] = vreinterpretq_s16_s32(tr01_32.val[1]);
+ res[2] = vreinterpretq_s16_s32(tr23_32.val[0]);
+ res[3] = vreinterpretq_s16_s32(tr23_32.val[1]);
+}
+
+static INLINE void aom_tbl2x4_s16(int16x8_t t0[4], int16x8_t t1[4],
+ uint16x8_t tbl, int16x8_t res[4]) {
+ res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+ res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+ res[2] = aom_tbl2_s16(t0[2], t1[2], tbl);
+ res[3] = aom_tbl2_s16(t0[3], t1[3], tbl);
+}
+
+static INLINE void aom_tbl2x2_s16(int16x8_t t0[2], int16x8_t t1[2],
+ uint16x8_t tbl, int16x8_t res[2]) {
+ res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+ res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+}
+
+#endif // AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c
index c6f1e3ad92..89647bc921 100644
--- a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c
+++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.c
@@ -23,8 +23,8 @@
#include "config/av1_rtcd.h"
#include "highbd_warp_plane_neon.h"
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[4];
load_filters_4(f, sx, alpha);
@@ -57,8 +57,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[8];
load_filters_8(f, sx, alpha);
@@ -111,8 +111,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -144,8 +144,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -197,7 +197,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -213,7 +214,8 @@ static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
return m0123;
}
-static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -238,8 +240,8 @@ static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
return (int32x4x2_t){ { m0123, m4567 } };
}
-static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]),
@@ -262,8 +264,8 @@ static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
return horizontal_add_4d_s32x4(m0123);
}
-static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0 = tmp[0];
int16x8_t s1 = tmp[1];
int16x8_t s2 = tmp[2];
diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h
index 3b8982898e..48af4a707b 100644
--- a/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h
+++ b/third_party/aom/av1/common/arm/highbd_warp_plane_neon.h
@@ -23,29 +23,31 @@
#include "av1/common/warped_motion.h"
#include "config/av1_rtcd.h"
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
- int sx, int alpha);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha);
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
- int sx, int alpha);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha);
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
- int sx);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx);
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
- int sx);
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx);
-static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy);
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
+ int sy);
-static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy);
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
+ int sy);
-static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
- int gamma);
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
+ int sy, int gamma);
-static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy,
- int gamma);
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
+ int sy, int gamma);
-static INLINE int16x8_t load_filters_1(int ofs) {
+static AOM_FORCE_INLINE int16x8_t load_filters_1(int ofs) {
const int ofs0 = ROUND_POWER_OF_TWO(ofs, WARPEDDIFF_PREC_BITS);
const int16_t *base =
@@ -53,7 +55,8 @@ static INLINE int16x8_t load_filters_1(int ofs) {
return vld1q_s16(base + ofs0 * 8);
}
-static INLINE void load_filters_4(int16x8_t out[], int ofs, int stride) {
+static AOM_FORCE_INLINE void load_filters_4(int16x8_t out[], int ofs,
+ int stride) {
const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS);
const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS);
const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS);
@@ -67,7 +70,8 @@ static INLINE void load_filters_4(int16x8_t out[], int ofs, int stride) {
out[3] = vld1q_s16(base + ofs3 * 8);
}
-static INLINE void load_filters_8(int16x8_t out[], int ofs, int stride) {
+static AOM_FORCE_INLINE void load_filters_8(int16x8_t out[], int ofs,
+ int stride) {
const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS);
const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS);
const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS);
@@ -89,16 +93,18 @@ static INLINE void load_filters_8(int16x8_t out[], int ofs, int stride) {
out[7] = vld1q_s16(base + ofs7 * 8);
}
-static INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val, int bd) {
+static AOM_FORCE_INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val,
+ int bd) {
const int limit = (1 << bd) - 1;
return vqmovun_s32(vminq_s32(val, vdupq_n_s32(limit)));
}
-static INLINE void warp_affine_horizontal(const uint16_t *ref, int width,
- int height, int stride, int p_width,
- int16_t alpha, int16_t beta, int iy4,
- int sx4, int ix4, int16x8_t tmp[],
- int bd) {
+static AOM_FORCE_INLINE void warp_affine_horizontal(const uint16_t *ref,
+ int width, int height,
+ int stride, int p_width,
+ int16_t alpha, int16_t beta,
+ int iy4, int sx4, int ix4,
+ int16x8_t tmp[], int bd) {
const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
if (ix4 <= -7) {
@@ -197,7 +203,7 @@ static INLINE void warp_affine_horizontal(const uint16_t *ref, int width,
}
}
-static INLINE void highbd_vertical_filter_4x1_f4(
+static AOM_FORCE_INLINE void highbd_vertical_filter_4x1_f4(
uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride,
bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd,
int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) {
@@ -253,7 +259,7 @@ static INLINE void highbd_vertical_filter_4x1_f4(
vst1_u16(dst16, res0);
}
-static INLINE void highbd_vertical_filter_8x1_f8(
+static AOM_FORCE_INLINE void highbd_vertical_filter_8x1_f8(
uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride,
bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd,
int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) {
@@ -328,7 +334,7 @@ static INLINE void highbd_vertical_filter_8x1_f8(
vst1_u16(dst16 + 4, res1);
}
-static INLINE void warp_affine_vertical(
+static AOM_FORCE_INLINE void warp_affine_vertical(
uint16_t *pred, int p_width, int p_height, int p_stride, int bd,
uint16_t *dst, int dst_stride, bool is_compound, bool do_average,
bool use_dist_wtd_comp_avg, int fwd, int bwd, int16_t gamma, int16_t delta,
@@ -354,7 +360,7 @@ static INLINE void warp_affine_vertical(
}
}
-static INLINE void highbd_warp_affine_common(
+static AOM_FORCE_INLINE void highbd_warp_affine_common(
const int32_t *mat, const uint16_t *ref, int width, int height, int stride,
uint16_t *pred, int p_col, int p_row, int p_width, int p_height,
int p_stride, int subsampling_x, int subsampling_y, int bd,
diff --git a/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c b/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c
index 7a14f21846..87e033fd00 100644
--- a/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c
+++ b/third_party/aom/av1/common/arm/highbd_warp_plane_sve.c
@@ -15,7 +15,7 @@
#include <arm_neon_sve_bridge.h>
#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/arm/transpose_neon.h"
#include "aom_ports/mem.h"
@@ -24,8 +24,8 @@
#include "config/av1_rtcd.h"
#include "highbd_warp_plane_neon.h"
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[4];
load_filters_4(f, sx, alpha);
@@ -55,8 +55,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
- int sx, int alpha) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha) {
int16x8_t f[8];
load_filters_8(f, sx, alpha);
@@ -103,8 +103,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -133,8 +133,8 @@ static INLINE int16x8_t highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
}
-static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
- int sx) {
+static AOM_FORCE_INLINE int16x8_t
+highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx) {
int16x8_t f = load_filters_1(sx);
int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
@@ -180,7 +180,8 @@ static INLINE int16x8_t highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd,
return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
}
-static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -197,7 +198,8 @@ static INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp, int sy) {
return m0123;
}
-static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
+ int sy) {
const int16x8_t f = load_filters_1(sy);
const int16x4_t f0123 = vget_low_s16(f);
const int16x4_t f4567 = vget_high_s16(f);
@@ -223,8 +225,8 @@ static INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp, int sy) {
return (int32x4x2_t){ { m0123, m4567 } };
}
-static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]),
@@ -244,8 +246,8 @@ static INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp, int sy,
return vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
}
-static INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp, int sy,
- int gamma) {
+static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
+ int sy, int gamma) {
int16x8_t s0 = tmp[0];
int16x8_t s1 = tmp[1];
int16x8_t s2 = tmp[2];
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.c b/third_party/aom/av1/common/arm/warp_plane_neon.c
index 4723154398..546aa2965b 100644
--- a/third_party/aom/av1/common/arm/warp_plane_neon.c
+++ b/third_party/aom/av1/common/arm/warp_plane_neon.c
@@ -11,8 +11,8 @@
#include "warp_plane_neon.h"
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -39,8 +39,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -75,7 +75,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -101,7 +102,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -135,8 +137,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy) {
int16x4_t s0 = vget_low_s16(src[0]);
int16x4_t s1 = vget_low_s16(src[1]);
int16x4_t s2 = vget_low_s16(src[2]);
@@ -161,8 +163,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
*res = m0123;
}
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
@@ -186,9 +189,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
*res = horizontal_add_4d_s32x4(m0123_pairs);
}
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
@@ -223,10 +227,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
*res_high = m4567;
}
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon.h b/third_party/aom/av1/common/arm/warp_plane_neon.h
index 5afd72f4ab..eece007ef3 100644
--- a/third_party/aom/av1/common/arm/warp_plane_neon.h
+++ b/third_party/aom/av1/common/arm/warp_plane_neon.h
@@ -24,32 +24,37 @@
#include "av1/common/warped_motion.h"
#include "av1/common/scale.h"
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha);
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha);
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx);
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx);
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx);
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy);
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy);
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma);
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma);
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy);
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy);
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma);
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma);
-static INLINE void load_filters_4(int16x8_t out[], int offset, int stride) {
+static AOM_FORCE_INLINE void load_filters_4(int16x8_t out[], int offset,
+ int stride) {
out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >>
WARPEDDIFF_PREC_BITS)));
out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >>
@@ -60,7 +65,8 @@ static INLINE void load_filters_4(int16x8_t out[], int offset, int stride) {
WARPEDDIFF_PREC_BITS)));
}
-static INLINE void load_filters_8(int16x8_t out[], int offset, int stride) {
+static AOM_FORCE_INLINE void load_filters_8(int16x8_t out[], int offset,
+ int stride) {
out[0] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 0 * stride) >>
WARPEDDIFF_PREC_BITS)));
out[1] = vld1q_s16((int16_t *)(av1_warped_filter + ((offset + 1 * stride) >>
@@ -79,16 +85,14 @@ static INLINE void load_filters_8(int16x8_t out[], int offset, int stride) {
WARPEDDIFF_PREC_BITS)));
}
-static INLINE int clamp_iy(int iy, int height) {
+static AOM_FORCE_INLINE int clamp_iy(int iy, int height) {
return clamp(iy, 0, height - 1);
}
-static INLINE void warp_affine_horizontal(const uint8_t *ref, int width,
- int height, int stride, int p_width,
- int p_height, int16_t alpha,
- int16_t beta, const int64_t x4,
- const int64_t y4, const int i,
- int16x8_t tmp[]) {
+static AOM_FORCE_INLINE void warp_affine_horizontal(
+ const uint8_t *ref, int width, int height, int stride, int p_width,
+ int p_height, int16_t alpha, int16_t beta, const int64_t x4,
+ const int64_t y4, const int i, int16x8_t tmp[]) {
const int bd = 8;
const int reduce_bits_horiz = ROUND0_BITS;
const int height_limit = AOMMIN(8, p_height - i) + 7;
@@ -197,7 +201,7 @@ static INLINE void warp_affine_horizontal(const uint8_t *ref, int width,
}
}
-static INLINE void warp_affine_vertical(
+static AOM_FORCE_INLINE void warp_affine_vertical(
uint8_t *pred, int p_width, int p_height, int p_stride, int is_compound,
uint16_t *dst, int dst_stride, int do_average, int use_dist_wtd_comp_avg,
int16_t gamma, int16_t delta, const int64_t y4, const int i, const int j,
@@ -325,7 +329,7 @@ static INLINE void warp_affine_vertical(
}
}
-static INLINE void av1_warp_affine_common(
+static AOM_FORCE_INLINE void av1_warp_affine_common(
const int32_t *mat, const uint8_t *ref, int width, int height, int stride,
uint8_t *pred, int p_col, int p_row, int p_width, int p_height,
int p_stride, int subsampling_x, int subsampling_y,
diff --git a/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c
index 39e3ad99f4..22a1be17b5 100644
--- a/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c
+++ b/third_party/aom/av1/common/arm/warp_plane_neon_i8mm.c
@@ -17,8 +17,8 @@ DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
};
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -45,8 +45,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -83,7 +83,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -112,7 +113,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -149,8 +151,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy) {
int16x4_t s0 = vget_low_s16(src[0]);
int16x4_t s1 = vget_low_s16(src[1]);
int16x4_t s2 = vget_low_s16(src[2]);
@@ -175,8 +177,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
*res = m0123;
}
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
@@ -200,9 +203,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
*res = horizontal_add_4d_s32x4(m0123_pairs);
}
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
@@ -237,10 +241,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
*res_high = m4567;
}
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
diff --git a/third_party/aom/av1/common/arm/warp_plane_sve.c b/third_party/aom/av1/common/arm/warp_plane_sve.c
index 8a4bf5747b..c70b066174 100644
--- a/third_party/aom/av1/common/arm/warp_plane_sve.c
+++ b/third_party/aom/av1/common/arm/warp_plane_sve.c
@@ -11,7 +11,7 @@
#include <arm_neon.h>
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "warp_plane_neon.h"
DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
@@ -20,8 +20,8 @@ DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
};
-static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -48,8 +48,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
- int alpha) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
+ int sx, int alpha) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
// Loading the 8 filter taps
@@ -86,7 +86,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in, int sx,
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -115,7 +116,8 @@ static INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
+ int sx) {
const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
int16x8_t f_s16 =
@@ -152,8 +154,8 @@ static INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in, int sx) {
return vreinterpretq_s16_u16(res);
}
-static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
- int sy) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
+ int32x4_t *res, int sy) {
int16x4_t s0 = vget_low_s16(src[0]);
int16x4_t s1 = vget_low_s16(src[1]);
int16x4_t s2 = vget_low_s16(src[2]);
@@ -178,8 +180,9 @@ static INLINE void vertical_filter_4x1_f1(const int16x8_t *src, int32x4_t *res,
*res = m0123;
}
-static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
- int sy, int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
+ int32x4_t *res, int sy,
+ int gamma) {
int16x8_t s0, s1, s2, s3;
transpose_elems_s16_4x8(
vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
@@ -200,9 +203,10 @@ static INLINE void vertical_filter_4x1_f4(const int16x8_t *src, int32x4_t *res,
*res = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
}
-static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high,
+ int sy) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
@@ -237,10 +241,10 @@ static INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
*res_high = m4567;
}
-static INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
- int32x4_t *res_low,
- int32x4_t *res_high, int sy,
- int gamma) {
+static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
+ int32x4_t *res_low,
+ int32x4_t *res_high, int sy,
+ int gamma) {
int16x8_t s0 = src[0];
int16x8_t s1 = src[1];
int16x8_t s2 = src[2];
diff --git a/third_party/aom/av1/common/av1_common_int.h b/third_party/aom/av1/common/av1_common_int.h
index 4c0cb99d2b..4e14c4a8be 100644
--- a/third_party/aom/av1/common/av1_common_int.h
+++ b/third_party/aom/av1/common/av1_common_int.h
@@ -17,7 +17,7 @@
#include "aom/internal/aom_codec_internal.h"
#include "aom_dsp/flow_estimation/corner_detect.h"
-#include "aom_util/aom_thread.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/alloccommon.h"
#include "av1/common/av1_loopfilter.h"
#include "av1/common/entropy.h"
diff --git a/third_party/aom/av1/common/av1_rtcd_defs.pl b/third_party/aom/av1/common/av1_rtcd_defs.pl
index ef999fbba2..c0831330d1 100644
--- a/third_party/aom/av1/common/av1_rtcd_defs.pl
+++ b/third_party/aom/av1/common/av1_rtcd_defs.pl
@@ -77,6 +77,16 @@ EOF
}
forward_decls qw/av1_common_forward_decls/;
+# Fallbacks for Valgrind support
+# For normal use, we require SSE4.1. However, 32-bit Valgrind does not support
+# SSE4.1, so we include fallbacks for some critical functions to improve
+# performance
+$sse2_x86 = $ssse3_x86 = '';
+if ($opts{arch} eq "x86") {
+ $sse2_x86 = 'sse2';
+ $ssse3_x86 = 'ssse3';
+}
+
# functions that are 64 bit only.
$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
if ($opts{arch} eq "x86_64") {
@@ -345,7 +355,7 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
#fwd txfm
add_proto qw/void av1_lowbd_fwd_txfm/, "const int16_t *src_diff, tran_low_t *coeff, int diff_stride, TxfmParam *txfm_param";
- specialize qw/av1_lowbd_fwd_txfm sse2 sse4_1 avx2 neon/;
+ specialize qw/av1_lowbd_fwd_txfm sse4_1 avx2 neon/, $sse2_x86;
add_proto qw/void av1_fwd_txfm2d_4x8/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
specialize qw/av1_fwd_txfm2d_4x8 sse4_1 neon/;
@@ -436,9 +446,9 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
specialize qw/av1_txb_init_levels sse4_1 avx2 neon/;
add_proto qw/uint64_t av1_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N";
- specialize qw/av1_wedge_sse_from_residuals sse2 avx2 neon/;
+ specialize qw/av1_wedge_sse_from_residuals sse2 avx2 neon sve/;
add_proto qw/int8_t av1_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit";
- specialize qw/av1_wedge_sign_from_residuals sse2 avx2 neon/;
+ specialize qw/av1_wedge_sign_from_residuals sse2 avx2 neon sve/;
add_proto qw/void av1_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N";
specialize qw/av1_wedge_compute_delta_squares sse2 avx2 neon/;
@@ -521,21 +531,21 @@ add_proto qw/void cdef_copy_rect8_16bit_to_16bit/, "uint16_t *dst, int dstride,
# structs as arguments, which makes the v256 type of the intrinsics
# hard to support, so optimizations for this target are disabled.
if ($opts{config} !~ /libs-x86-win32-vs.*/) {
- specialize qw/cdef_find_dir sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_find_dir_dual sse2 ssse3 sse4_1 avx2 neon/;
+ specialize qw/cdef_find_dir sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_find_dir_dual sse4_1 avx2 neon/, "$ssse3_x86";
- specialize qw/cdef_filter_8_0 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_8_1 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_8_2 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_8_3 sse2 ssse3 sse4_1 avx2 neon/;
+ specialize qw/cdef_filter_8_0 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_8_1 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_8_2 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_8_3 sse4_1 avx2 neon/, "$ssse3_x86";
- specialize qw/cdef_filter_16_0 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_16_1 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_16_2 sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_filter_16_3 sse2 ssse3 sse4_1 avx2 neon/;
+ specialize qw/cdef_filter_16_0 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_16_1 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_16_2 sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_filter_16_3 sse4_1 avx2 neon/, "$ssse3_x86";
- specialize qw/cdef_copy_rect8_8bit_to_16bit sse2 ssse3 sse4_1 avx2 neon/;
- specialize qw/cdef_copy_rect8_16bit_to_16bit sse2 ssse3 sse4_1 avx2 neon/;
+ specialize qw/cdef_copy_rect8_8bit_to_16bit sse4_1 avx2 neon/, "$ssse3_x86";
+ specialize qw/cdef_copy_rect8_16bit_to_16bit sse4_1 avx2 neon/, "$ssse3_x86";
}
# WARPED_MOTION / GLOBAL_MOTION functions
@@ -591,20 +601,20 @@ if(aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") {
specialize qw/av1_convolve_y_sr sse2 avx2 neon/;
specialize qw/av1_convolve_y_sr_intrabc neon/;
specialize qw/av1_convolve_2d_scale sse4_1/;
- specialize qw/av1_dist_wtd_convolve_2d sse2 ssse3 avx2 neon neon_dotprod neon_i8mm/;
+ specialize qw/av1_dist_wtd_convolve_2d ssse3 avx2 neon neon_dotprod neon_i8mm/;
specialize qw/av1_dist_wtd_convolve_2d_copy sse2 avx2 neon/;
specialize qw/av1_dist_wtd_convolve_x sse2 avx2 neon neon_dotprod neon_i8mm/;
specialize qw/av1_dist_wtd_convolve_y sse2 avx2 neon/;
if(aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") {
- specialize qw/av1_highbd_dist_wtd_convolve_2d sse4_1 avx2 neon/;
- specialize qw/av1_highbd_dist_wtd_convolve_x sse4_1 avx2 neon/;
- specialize qw/av1_highbd_dist_wtd_convolve_y sse4_1 avx2 neon/;
+ specialize qw/av1_highbd_dist_wtd_convolve_2d sse4_1 avx2 neon sve2/;
+ specialize qw/av1_highbd_dist_wtd_convolve_x sse4_1 avx2 neon sve2/;
+ specialize qw/av1_highbd_dist_wtd_convolve_y sse4_1 avx2 neon sve2/;
specialize qw/av1_highbd_dist_wtd_convolve_2d_copy sse4_1 avx2 neon/;
- specialize qw/av1_highbd_convolve_2d_sr ssse3 avx2 neon/;
+ specialize qw/av1_highbd_convolve_2d_sr ssse3 avx2 neon sve2/;
specialize qw/av1_highbd_convolve_2d_sr_intrabc neon/;
- specialize qw/av1_highbd_convolve_x_sr ssse3 avx2 neon/;
+ specialize qw/av1_highbd_convolve_x_sr ssse3 avx2 neon sve2/;
specialize qw/av1_highbd_convolve_x_sr_intrabc neon/;
- specialize qw/av1_highbd_convolve_y_sr ssse3 avx2 neon/;
+ specialize qw/av1_highbd_convolve_y_sr ssse3 avx2 neon sve2/;
specialize qw/av1_highbd_convolve_y_sr_intrabc neon/;
specialize qw/av1_highbd_convolve_2d_scale sse4_1 neon/;
}
diff --git a/third_party/aom/av1/common/cdef.c b/third_party/aom/av1/common/cdef.c
index 12e9545441..5cec940a8e 100644
--- a/third_party/aom/av1/common/cdef.c
+++ b/third_party/aom/av1/common/cdef.c
@@ -10,15 +10,19 @@
*/
#include <assert.h>
-#include <math.h>
+#include <stddef.h>
#include <string.h>
#include "config/aom_scale_rtcd.h"
#include "aom/aom_integer.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/av1_common_int.h"
#include "av1/common/cdef.h"
#include "av1/common/cdef_block.h"
+#include "av1/common/common.h"
+#include "av1/common/common_data.h"
+#include "av1/common/enums.h"
#include "av1/common/reconinter.h"
#include "av1/common/thread_common.h"
@@ -92,7 +96,7 @@ void av1_cdef_copy_sb8_16_lowbd(uint16_t *const dst, int dstride,
const uint8_t *src, int src_voffset,
int src_hoffset, int sstride, int vsize,
int hsize) {
- const uint8_t *base = &src[src_voffset * sstride + src_hoffset];
+ const uint8_t *base = &src[src_voffset * (ptrdiff_t)sstride + src_hoffset];
cdef_copy_rect8_8bit_to_16bit(dst, dstride, base, sstride, hsize, vsize);
}
@@ -101,7 +105,7 @@ void av1_cdef_copy_sb8_16_highbd(uint16_t *const dst, int dstride,
int src_hoffset, int sstride, int vsize,
int hsize) {
const uint16_t *base =
- &CONVERT_TO_SHORTPTR(src)[src_voffset * sstride + src_hoffset];
+ &CONVERT_TO_SHORTPTR(src)[src_voffset * (ptrdiff_t)sstride + src_hoffset];
cdef_copy_rect8_16bit_to_16bit(dst, dstride, base, sstride, hsize, vsize);
}
@@ -247,7 +251,8 @@ static void cdef_prepare_fb(const AV1_COMMON *const cm, CdefBlockInfo *fb_info,
static INLINE void cdef_filter_fb(CdefBlockInfo *const fb_info, int plane,
uint8_t use_highbitdepth) {
- int offset = fb_info->dst_stride * fb_info->roffset + fb_info->coffset;
+ ptrdiff_t offset =
+ (ptrdiff_t)fb_info->dst_stride * fb_info->roffset + fb_info->coffset;
if (use_highbitdepth) {
av1_cdef_filter_fb(
NULL, CONVERT_TO_SHORTPTR(fb_info->dst + offset), fb_info->dst_stride,
diff --git a/third_party/aom/av1/common/entropymode.h b/third_party/aom/av1/common/entropymode.h
index 09cd6bd1e9..028bd21ae3 100644
--- a/third_party/aom/av1/common/entropymode.h
+++ b/third_party/aom/av1/common/entropymode.h
@@ -12,6 +12,7 @@
#ifndef AOM_AV1_COMMON_ENTROPYMODE_H_
#define AOM_AV1_COMMON_ENTROPYMODE_H_
+#include "aom_ports/bitops.h"
#include "av1/common/entropy.h"
#include "av1/common/entropymv.h"
#include "av1/common/filter.h"
@@ -192,13 +193,7 @@ void av1_setup_past_independence(struct AV1Common *cm);
// Returns (int)ceil(log2(n)).
static INLINE int av1_ceil_log2(int n) {
if (n < 2) return 0;
- int i = 1;
- unsigned int p = 2;
- while (p < (unsigned int)n) {
- i++;
- p = p << 1;
- }
- return i;
+ return get_msb(n - 1) + 1;
}
// Returns the context for palette color index at row 'r' and column 'c',
diff --git a/third_party/aom/av1/common/quant_common.c b/third_party/aom/av1/common/quant_common.c
index b0976287ef..58eb113370 100644
--- a/third_party/aom/av1/common/quant_common.c
+++ b/third_party/aom/av1/common/quant_common.c
@@ -9,10 +9,15 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+#include "config/aom_config.h"
+
+#include "aom/aom_frame_buffer.h"
+#include "aom_scale/yv12config.h"
#include "av1/common/av1_common_int.h"
#include "av1/common/blockd.h"
#include "av1/common/common.h"
#include "av1/common/entropy.h"
+#include "av1/common/filter.h"
#include "av1/common/quant_common.h"
#include "av1/common/seg_common.h"
@@ -274,13 +279,16 @@ const qm_val_t *av1_get_qmatrix(const CommonQuantParams *quant_params,
: quant_params->gqmatrix[NUM_QM_LEVELS - 1][0][qm_tx_size];
}
+#if CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
#define QM_TOTAL_SIZE 3344
// We only use wt_matrix_ref[q] and iwt_matrix_ref[q]
// for q = 0, ..., NUM_QM_LEVELS - 2.
static const qm_val_t wt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE];
static const qm_val_t iwt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE];
+#endif
void av1_qm_init(CommonQuantParams *quant_params, int num_planes) {
+#if CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
for (int q = 0; q < NUM_QM_LEVELS; ++q) {
for (int c = 0; c < num_planes; ++c) {
int current = 0;
@@ -306,6 +314,10 @@ void av1_qm_init(CommonQuantParams *quant_params, int num_planes) {
}
}
}
+#else
+ (void)quant_params;
+ (void)num_planes;
+#endif // CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
}
/* Provide 15 sets of quantization matrices for chroma and luma
@@ -320,6 +332,8 @@ void av1_qm_init(CommonQuantParams *quant_params, int num_planes) {
distances. Matrices for QM level 15 are omitted because they are
not used.
*/
+
+#if CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
static const qm_val_t iwt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE] = {
{
{ /* Luma */
@@ -12873,4 +12887,6 @@ static const qm_val_t wt_matrix_ref[NUM_QM_LEVELS - 1][2][QM_TOTAL_SIZE] = {
33, 33, 32, 32, 32, 32, 34, 33, 33, 33, 32, 32, 32, 32, 34, 33, 33, 33,
32, 32, 32, 32 },
},
-}; \ No newline at end of file
+};
+
+#endif // CONFIG_QUANT_MATRIX || CONFIG_AV1_DECODER
diff --git a/third_party/aom/av1/common/reconintra.c b/third_party/aom/av1/common/reconintra.c
index f68af18cb1..497863e117 100644
--- a/third_party/aom/av1/common/reconintra.c
+++ b/third_party/aom/av1/common/reconintra.c
@@ -1196,7 +1196,8 @@ static void build_directional_and_filter_intra_predictors(
const int need_right = p_angle < 90;
const int need_bottom = p_angle > 180;
if (p_angle != 90 && p_angle != 180) {
- const int ab_le = need_above_left ? 1 : 0;
+ assert(need_above_left);
+ const int ab_le = 1;
if (need_above && need_left && (txwpx + txhpx >= 24)) {
filter_intra_edge_corner(above_row, left_col);
}
@@ -1500,7 +1501,8 @@ static void highbd_build_directional_and_filter_intra_predictors(
const int need_right = p_angle < 90;
const int need_bottom = p_angle > 180;
if (p_angle != 90 && p_angle != 180) {
- const int ab_le = need_above_left ? 1 : 0;
+ assert(need_above_left);
+ const int ab_le = 1;
if (need_above && need_left && (txwpx + txhpx >= 24)) {
highbd_filter_intra_edge_corner(above_row, left_col);
}
diff --git a/third_party/aom/av1/common/resize.c b/third_party/aom/av1/common/resize.c
index 1b348836a5..441323ab1f 100644
--- a/third_party/aom/av1/common/resize.c
+++ b/third_party/aom/av1/common/resize.c
@@ -524,7 +524,7 @@ static void fill_arr_to_col(uint8_t *img, int stride, int len, uint8_t *arr) {
}
}
-bool av1_resize_plane(const uint8_t *const input, int height, int width,
+bool av1_resize_plane(const uint8_t *input, int height, int width,
int in_stride, uint8_t *output, int height2, int width2,
int out_stride) {
int i;
@@ -881,7 +881,7 @@ static void highbd_fill_arr_to_col(uint16_t *img, int stride, int len,
}
}
-void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
+void av1_highbd_resize_plane(const uint8_t *input, int height, int width,
int in_stride, uint8_t *output, int height2,
int width2, int out_stride, int bd) {
int i;
@@ -980,10 +980,9 @@ static bool highbd_upscale_normative_rect(const uint8_t *const input,
}
#endif // CONFIG_AV1_HIGHBITDEPTH
-void av1_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+void av1_resize_frame420(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth) {
if (!av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
oy_stride))
@@ -996,10 +995,9 @@ void av1_resize_frame420(const uint8_t *const y, int y_stride,
abort();
}
-bool av1_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+bool av1_resize_frame422(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth) {
if (!av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
oy_stride))
@@ -1013,10 +1011,9 @@ bool av1_resize_frame422(const uint8_t *const y, int y_stride,
return true;
}
-bool av1_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+bool av1_resize_frame444(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth) {
if (!av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
oy_stride))
@@ -1031,8 +1028,8 @@ bool av1_resize_frame444(const uint8_t *const y, int y_stride,
}
#if CONFIG_AV1_HIGHBITDEPTH
-void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame420(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
@@ -1045,8 +1042,8 @@ void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
owidth / 2, ouv_stride, bd);
}
-void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame422(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
@@ -1059,8 +1056,8 @@ void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
owidth / 2, ouv_stride, bd);
}
-void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame444(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
@@ -1126,7 +1123,7 @@ void av1_resize_and_extend_frame_c(const YV12_BUFFER_CONFIG *src,
bool av1_resize_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int bd,
- const int num_planes) {
+ int num_planes) {
// TODO(dkovalev): replace YV12_BUFFER_CONFIG with aom_image_t
// We use AOMMIN(num_planes, MAX_MB_PLANE) instead of num_planes to quiet
@@ -1246,8 +1243,7 @@ void av1_upscale_normative_and_extend_frame(const AV1_COMMON *cm,
YV12_BUFFER_CONFIG *av1_realloc_and_scale_if_required(
AV1_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
const InterpFilter filter, const int phase, const bool use_optimized_scaler,
- const bool for_psnr, const int border_in_pixels,
- const int num_pyramid_levels) {
+ const bool for_psnr, const int border_in_pixels, const bool alloc_pyramid) {
// If scaling is performed for the sole purpose of calculating PSNR, then our
// target dimensions are superres upscaled width/height. Otherwise our target
// dimensions are coded width/height.
@@ -1267,7 +1263,7 @@ YV12_BUFFER_CONFIG *av1_realloc_and_scale_if_required(
scaled, scaled_width, scaled_height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
border_in_pixels, cm->features.byte_alignment, NULL, NULL, NULL,
- num_pyramid_levels, 0))
+ alloc_pyramid, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate scaled buffer");
@@ -1363,7 +1359,7 @@ static void copy_buffer_config(const YV12_BUFFER_CONFIG *const src,
// TODO(afergs): aom_ vs av1_ functions? Which can I use?
// Upscale decoded image.
void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
- int num_pyramid_levels) {
+ bool alloc_pyramid) {
const int num_planes = av1_num_planes(cm);
if (!av1_superres_scaled(cm)) return;
const SequenceHeader *const seq_params = cm->seq_params;
@@ -1378,7 +1374,7 @@ void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
if (aom_alloc_frame_buffer(
&copy_buffer, aligned_width, cm->height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
- AOM_BORDER_IN_PIXELS, byte_alignment, 0, 0))
+ AOM_BORDER_IN_PIXELS, byte_alignment, false, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate copy buffer for superres upscaling");
@@ -1411,7 +1407,7 @@ void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
cm->superres_upscaled_height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
AOM_BORDER_IN_PIXELS, byte_alignment, fb, cb, cb_priv,
- num_pyramid_levels, 0)) {
+ alloc_pyramid, 0)) {
unlock_buffer_pool(pool);
aom_internal_error(
cm->error, AOM_CODEC_MEM_ERROR,
@@ -1428,7 +1424,7 @@ void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
frame_to_show, cm->superres_upscaled_width,
cm->superres_upscaled_height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
- AOM_BORDER_IN_PIXELS, byte_alignment, num_pyramid_levels, 0))
+ AOM_BORDER_IN_PIXELS, byte_alignment, alloc_pyramid, 0))
aom_internal_error(
cm->error, AOM_CODEC_MEM_ERROR,
"Failed to reallocate current frame buffer for superres upscaling");
diff --git a/third_party/aom/av1/common/resize.h b/third_party/aom/av1/common/resize.h
index 0ba3108f72..d573a538bf 100644
--- a/third_party/aom/av1/common/resize.h
+++ b/third_party/aom/av1/common/resize.h
@@ -20,44 +20,41 @@
extern "C" {
#endif
-bool av1_resize_plane(const uint8_t *const input, int height, int width,
+bool av1_resize_plane(const uint8_t *input, int height, int width,
int in_stride, uint8_t *output, int height2, int width2,
int out_stride);
// TODO(aomedia:3228): In libaom 4.0.0, remove av1_resize_frame420 from
// av1/exports_com and delete this function.
-void av1_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+void av1_resize_frame420(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth);
-bool av1_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+bool av1_resize_frame422(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth);
-bool av1_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
+bool av1_resize_frame444(const uint8_t *y, int y_stride, const uint8_t *u,
+ const uint8_t *v, int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth);
-void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
+void av1_highbd_resize_plane(const uint8_t *input, int height, int width,
int in_stride, uint8_t *output, int height2,
int width2, int out_stride, int bd);
-void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame420(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd);
-void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame422(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd);
-void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
+void av1_highbd_resize_frame444(const uint8_t *y, int y_stride,
+ const uint8_t *u, const uint8_t *v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
@@ -73,12 +70,11 @@ void av1_upscale_normative_and_extend_frame(const AV1_COMMON *cm,
YV12_BUFFER_CONFIG *av1_realloc_and_scale_if_required(
AV1_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
const InterpFilter filter, const int phase, const bool use_optimized_scaler,
- const bool for_psnr, const int border_in_pixels,
- const int num_pyramid_levels);
+ const bool for_psnr, const int border_in_pixels, const bool alloc_pyramid);
bool av1_resize_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int bd,
- const int num_planes);
+ int num_planes);
// Calculates the scaled dimensions from the given original dimensions and the
// resize scale denominator.
@@ -95,7 +91,7 @@ void av1_calculate_scaled_superres_size(int *width, int *height,
void av1_calculate_unscaled_superres_size(int *width, int *height, int denom);
void av1_superres_upscale(AV1_COMMON *cm, BufferPool *const pool,
- int num_pyramid_levels);
+ bool alloc_pyramid);
// Returns 1 if a superres upscaled frame is scaled and 0 otherwise.
static INLINE int av1_superres_scaled(const AV1_COMMON *cm) {
diff --git a/third_party/aom/av1/common/restoration.c b/third_party/aom/av1/common/restoration.c
index 0be126fa65..335fdc8c2a 100644
--- a/third_party/aom/av1/common/restoration.c
+++ b/third_party/aom/av1/common/restoration.c
@@ -11,20 +11,24 @@
*/
#include <math.h>
+#include <stddef.h>
#include "config/aom_config.h"
-#include "config/aom_dsp_rtcd.h"
#include "config/aom_scale_rtcd.h"
+#include "aom/internal/aom_codec_internal.h"
#include "aom_mem/aom_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
+#include "aom_ports/mem.h"
+#include "aom_util/aom_pthread.h"
+
#include "av1/common/av1_common_int.h"
+#include "av1/common/convolve.h"
+#include "av1/common/enums.h"
#include "av1/common/resize.h"
#include "av1/common/restoration.h"
#include "av1/common/thread_common.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_mem/aom_mem.h"
-
-#include "aom_ports/mem.h"
// The 's' values are calculated based on original 'r' and 'e' values in the
// spec using GenSgrprojVtable().
@@ -115,8 +119,9 @@ void av1_loop_restoration_precal(void) {
#endif
}
-static void extend_frame_lowbd(uint8_t *data, int width, int height, int stride,
- int border_horz, int border_vert) {
+static void extend_frame_lowbd(uint8_t *data, int width, int height,
+ ptrdiff_t stride, int border_horz,
+ int border_vert) {
uint8_t *data_p;
int i;
for (i = 0; i < height; ++i) {
@@ -136,7 +141,8 @@ static void extend_frame_lowbd(uint8_t *data, int width, int height, int stride,
#if CONFIG_AV1_HIGHBITDEPTH
static void extend_frame_highbd(uint16_t *data, int width, int height,
- int stride, int border_horz, int border_vert) {
+ ptrdiff_t stride, int border_horz,
+ int border_vert) {
uint16_t *data_p;
int i, j;
for (i = 0; i < height; ++i) {
@@ -988,8 +994,10 @@ void av1_loop_restoration_filter_unit(
int unit_h = limits->v_end - limits->v_start;
int unit_w = limits->h_end - limits->h_start;
- uint8_t *data8_tl = data8 + limits->v_start * stride + limits->h_start;
- uint8_t *dst8_tl = dst8 + limits->v_start * dst_stride + limits->h_start;
+ uint8_t *data8_tl =
+ data8 + limits->v_start * (ptrdiff_t)stride + limits->h_start;
+ uint8_t *dst8_tl =
+ dst8 + limits->v_start * (ptrdiff_t)dst_stride + limits->h_start;
if (unit_rtype == RESTORE_NONE) {
copy_rest_unit(unit_w, unit_h, data8_tl, stride, dst8_tl, dst_stride,
@@ -1074,7 +1082,8 @@ void av1_loop_restoration_filter_frame_init(AV1LrStruct *lr_ctxt,
if (aom_realloc_frame_buffer(
lr_ctxt->dst, frame_width, frame_height, seq_params->subsampling_x,
seq_params->subsampling_y, highbd, AOM_RESTORATION_FRAME_BORDER,
- cm->features.byte_alignment, NULL, NULL, NULL, 0, 0) != AOM_CODEC_OK)
+ cm->features.byte_alignment, NULL, NULL, NULL, false,
+ 0) != AOM_CODEC_OK)
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate restoration dst buffer");
@@ -1349,7 +1358,7 @@ static void save_deblock_boundary_lines(
const int is_uv = plane > 0;
const uint8_t *src_buf = REAL_PTR(use_highbd, frame->buffers[plane]);
const int src_stride = frame->strides[is_uv] << use_highbd;
- const uint8_t *src_rows = src_buf + row * src_stride;
+ const uint8_t *src_rows = src_buf + row * (ptrdiff_t)src_stride;
uint8_t *bdry_buf = is_above ? boundaries->stripe_boundary_above
: boundaries->stripe_boundary_below;
@@ -1404,7 +1413,7 @@ static void save_cdef_boundary_lines(const YV12_BUFFER_CONFIG *frame,
const int is_uv = plane > 0;
const uint8_t *src_buf = REAL_PTR(use_highbd, frame->buffers[plane]);
const int src_stride = frame->strides[is_uv] << use_highbd;
- const uint8_t *src_rows = src_buf + row * src_stride;
+ const uint8_t *src_rows = src_buf + row * (ptrdiff_t)src_stride;
uint8_t *bdry_buf = is_above ? boundaries->stripe_boundary_above
: boundaries->stripe_boundary_below;
diff --git a/third_party/aom/av1/common/thread_common.c b/third_party/aom/av1/common/thread_common.c
index 45695147ff..8a137cc9f7 100644
--- a/third_party/aom/av1/common/thread_common.c
+++ b/third_party/aom/av1/common/thread_common.c
@@ -14,12 +14,19 @@
#include "config/aom_scale_rtcd.h"
#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/txfm_common.h"
#include "aom_mem/aom_mem.h"
+#include "aom_util/aom_pthread.h"
+#include "aom_util/aom_thread.h"
#include "av1/common/av1_loopfilter.h"
+#include "av1/common/blockd.h"
+#include "av1/common/cdef.h"
#include "av1/common/entropymode.h"
+#include "av1/common/enums.h"
#include "av1/common/thread_common.h"
#include "av1/common/reconinter.h"
#include "av1/common/reconintra.h"
+#include "av1/common/restoration.h"
// Set up nsync by width.
static INLINE int get_sync_range(int width) {
diff --git a/third_party/aom/av1/common/thread_common.h b/third_party/aom/av1/common/thread_common.h
index 675687dc98..7e681f322b 100644
--- a/third_party/aom/av1/common/thread_common.h
+++ b/third_party/aom/av1/common/thread_common.h
@@ -16,6 +16,7 @@
#include "av1/common/av1_loopfilter.h"
#include "av1/common/cdef.h"
+#include "aom_util/aom_pthread.h"
#include "aom_util/aom_thread.h"
#ifdef __cplusplus
diff --git a/third_party/aom/av1/common/tile_common.c b/third_party/aom/av1/common/tile_common.c
index b964f259b8..45a189d69a 100644
--- a/third_party/aom/av1/common/tile_common.c
+++ b/third_party/aom/av1/common/tile_common.c
@@ -177,46 +177,16 @@ int av1_get_sb_cols_in_tile(const AV1_COMMON *cm, const TileInfo *tile) {
cm->seq_params->mib_size_log2);
}
-PixelRect av1_get_tile_rect(const TileInfo *tile_info, const AV1_COMMON *cm,
- int is_uv) {
- PixelRect r;
-
- // Calculate position in the Y plane
- r.left = tile_info->mi_col_start * MI_SIZE;
- r.right = tile_info->mi_col_end * MI_SIZE;
- r.top = tile_info->mi_row_start * MI_SIZE;
- r.bottom = tile_info->mi_row_end * MI_SIZE;
-
- // If upscaling is enabled, the tile limits need scaling to match the
- // upscaled frame where the restoration units live. To do this, scale up the
- // top-left and bottom-right of the tile.
- if (av1_superres_scaled(cm)) {
- av1_calculate_unscaled_superres_size(&r.left, &r.top,
- cm->superres_scale_denominator);
- av1_calculate_unscaled_superres_size(&r.right, &r.bottom,
- cm->superres_scale_denominator);
- }
-
- const int frame_w = cm->superres_upscaled_width;
- const int frame_h = cm->superres_upscaled_height;
-
- // Make sure we don't fall off the bottom-right of the frame.
- r.right = AOMMIN(r.right, frame_w);
- r.bottom = AOMMIN(r.bottom, frame_h);
-
- // Convert to coordinates in the appropriate plane
- const int ss_x = is_uv && cm->seq_params->subsampling_x;
- const int ss_y = is_uv && cm->seq_params->subsampling_y;
-
- r.left = ROUND_POWER_OF_TWO(r.left, ss_x);
- r.right = ROUND_POWER_OF_TWO(r.right, ss_x);
- r.top = ROUND_POWER_OF_TWO(r.top, ss_y);
- r.bottom = ROUND_POWER_OF_TWO(r.bottom, ss_y);
-
- return r;
-}
-
-void av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) {
+// Section 7.3.1 of the AV1 spec says, on pages 200-201:
+// It is a requirement of bitstream conformance that the following conditions
+// are met:
+// ...
+// * TileHeight is equal to (use_128x128_superblock ? 128 : 64) for all
+// tiles (i.e. the tile is exactly one superblock high)
+// * TileWidth is identical for all tiles and is an integer multiple of
+// TileHeight (i.e. the tile is an integer number of superblocks wide)
+// ...
+bool av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) {
const CommonTileParams *const tiles = &cm->tiles;
if (tiles->uniform_spacing) {
*w = tiles->width;
@@ -226,7 +196,10 @@ void av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) {
const int tile_width_sb =
tiles->col_start_sb[i + 1] - tiles->col_start_sb[i];
const int tile_w = tile_width_sb * cm->seq_params->mib_size;
- assert(i == 0 || tile_w == *w); // ensure all tiles have same dimension
+ // ensure all tiles have same dimension
+ if (i != 0 && tile_w != *w) {
+ return false;
+ }
*w = tile_w;
}
@@ -234,10 +207,14 @@ void av1_get_uniform_tile_size(const AV1_COMMON *cm, int *w, int *h) {
const int tile_height_sb =
tiles->row_start_sb[i + 1] - tiles->row_start_sb[i];
const int tile_h = tile_height_sb * cm->seq_params->mib_size;
- assert(i == 0 || tile_h == *h); // ensure all tiles have same dimension
+ // ensure all tiles have same dimension
+ if (i != 0 && tile_h != *h) {
+ return false;
+ }
*h = tile_h;
}
}
+ return true;
}
int av1_is_min_tile_width_satisfied(const AV1_COMMON *cm) {
diff --git a/third_party/aom/av1/common/tile_common.h b/third_party/aom/av1/common/tile_common.h
index 5383ae940b..12228c9e94 100644
--- a/third_party/aom/av1/common/tile_common.h
+++ b/third_party/aom/av1/common/tile_common.h
@@ -12,13 +12,14 @@
#ifndef AOM_AV1_COMMON_TILE_COMMON_H_
#define AOM_AV1_COMMON_TILE_COMMON_H_
+#include <stdbool.h>
+
+#include "config/aom_config.h"
+
#ifdef __cplusplus
extern "C" {
#endif
-#include "config/aom_config.h"
-#include "aom_dsp/rect.h"
-
struct AV1Common;
struct SequenceHeader;
struct CommonTileParams;
@@ -43,10 +44,6 @@ void av1_tile_set_col(TileInfo *tile, const struct AV1Common *cm, int col);
int av1_get_sb_rows_in_tile(const struct AV1Common *cm, const TileInfo *tile);
int av1_get_sb_cols_in_tile(const struct AV1Common *cm, const TileInfo *tile);
-// Return the pixel extents of the given tile
-PixelRect av1_get_tile_rect(const TileInfo *tile_info,
- const struct AV1Common *cm, int is_uv);
-
// Define tile maximum width and area
// There is no maximum height since height is limited by area and width limits
// The minimum tile width or height is fixed at one superblock
@@ -56,7 +53,9 @@ PixelRect av1_get_tile_rect(const TileInfo *tile_info,
#define MAX_TILE_AREA_LEVEL_7_AND_ABOVE (4096 * 4608)
#endif
-void av1_get_uniform_tile_size(const struct AV1Common *cm, int *w, int *h);
+// Gets the width and height (in units of MI_SIZE) of the tiles in a tile list.
+// Returns true on success, false on failure.
+bool av1_get_uniform_tile_size(const struct AV1Common *cm, int *w, int *h);
void av1_get_tile_limits(struct AV1Common *const cm);
void av1_calculate_tile_cols(const struct SequenceHeader *const seq_params,
int cm_mi_rows, int cm_mi_cols,
diff --git a/third_party/aom/av1/common/x86/cdef_block_sse2.c b/third_party/aom/av1/common/x86/cdef_block_sse2.c
deleted file mode 100644
index 5ab7ffa2ff..0000000000
--- a/third_party/aom/av1/common/x86/cdef_block_sse2.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_dsp/aom_simd.h"
-#define SIMD_FUNC(name) name##_sse2
-#include "av1/common/cdef_block_simd.h"
-
-void cdef_find_dir_dual_sse2(const uint16_t *img1, const uint16_t *img2,
- int stride, int32_t *var_out_1st,
- int32_t *var_out_2nd, int coeff_shift,
- int *out_dir_1st_8x8, int *out_dir_2nd_8x8) {
- // Process first 8x8.
- *out_dir_1st_8x8 = cdef_find_dir(img1, stride, var_out_1st, coeff_shift);
-
- // Process second 8x8.
- *out_dir_2nd_8x8 = cdef_find_dir(img2, stride, var_out_2nd, coeff_shift);
-}
-
-void cdef_copy_rect8_8bit_to_16bit_sse2(uint16_t *dst, int dstride,
- const uint8_t *src, int sstride,
- int width, int height) {
- int j = 0;
- for (int i = 0; i < height; i++) {
- for (j = 0; j < (width & ~0x7); j += 8) {
- v64 row = v64_load_unaligned(&src[i * sstride + j]);
- v128_store_unaligned(&dst[i * dstride + j], v128_unpack_u8_s16(row));
- }
- for (; j < width; j++) {
- dst[i * dstride + j] = src[i * sstride + j];
- }
- }
-}
diff --git a/third_party/aom/av1/common/x86/cdef_block_ssse3.c b/third_party/aom/av1/common/x86/cdef_block_ssse3.c
index 0fb36eb6e0..14eb6c9e31 100644
--- a/third_party/aom/av1/common/x86/cdef_block_ssse3.c
+++ b/third_party/aom/av1/common/x86/cdef_block_ssse3.c
@@ -9,6 +9,17 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+// Include SSSE3 CDEF code only for 32-bit x86, to support Valgrind.
+// For normal use, we require SSE4.1, so cdef_*_sse4_1 will be used instead of
+// these functions. However, 32-bit Valgrind does not support SSE4.1, so we
+// include a fallback to SSSE3 to improve performance
+
+#include "config/aom_config.h"
+
+#if !AOM_ARCH_X86
+#error "cdef_block_ssse3.c is included for compatibility with 32-bit x86 only"
+#endif // !AOM_ARCH_X86
+
#include "aom_dsp/aom_simd.h"
#define SIMD_FUNC(name) name##_ssse3
#include "av1/common/cdef_block_simd.h"
diff --git a/third_party/aom/av1/common/x86/convolve_2d_avx2.c b/third_party/aom/av1/common/x86/convolve_2d_avx2.c
index 1b39a0a8d5..d4c1169cc3 100644
--- a/third_party/aom/av1/common/x86/convolve_2d_avx2.c
+++ b/third_party/aom/av1/common/x86/convolve_2d_avx2.c
@@ -21,13 +21,11 @@
#include "av1/common/convolve.h"
-void av1_convolve_2d_sr_general_avx2(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_x,
- const InterpFilterParams *filter_params_y,
- const int subpel_x_qn,
- const int subpel_y_qn,
- ConvolveParams *conv_params) {
+static void convolve_2d_sr_general_avx2(
+ const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int subpel_x_qn,
+ const int subpel_y_qn, ConvolveParams *conv_params) {
if (filter_params_x->taps > 8) {
const int bd = 8;
int im_stride = 8, i;
@@ -150,9 +148,9 @@ void av1_convolve_2d_sr_avx2(
const bool use_general = (tap_x == 12 || tap_y == 12);
if (use_general) {
- av1_convolve_2d_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
- filter_params_x, filter_params_y,
- subpel_x_q4, subpel_y_q4, conv_params);
+ convolve_2d_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y, subpel_x_q4,
+ subpel_y_q4, conv_params);
} else {
av1_convolve_2d_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_x, filter_params_y,
diff --git a/third_party/aom/av1/common/x86/convolve_2d_sse2.c b/third_party/aom/av1/common/x86/convolve_2d_sse2.c
index 1b85f37294..68971eacc1 100644
--- a/third_party/aom/av1/common/x86/convolve_2d_sse2.c
+++ b/third_party/aom/av1/common/x86/convolve_2d_sse2.c
@@ -19,12 +19,11 @@
#include "aom_dsp/x86/convolve_common_intrin.h"
#include "av1/common/convolve.h"
-void av1_convolve_2d_sr_12tap_sse2(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_x,
- const InterpFilterParams *filter_params_y,
- const int subpel_x_qn, const int subpel_y_qn,
- ConvolveParams *conv_params) {
+static void convolve_2d_sr_12tap_sse2(
+ const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int subpel_x_qn,
+ const int subpel_y_qn, ConvolveParams *conv_params) {
const int bd = 8;
DECLARE_ALIGNED(16, int16_t,
@@ -231,9 +230,9 @@ void av1_convolve_2d_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst,
filter_params_x, filter_params_y, subpel_x_qn,
subpel_y_qn, conv_params);
} else {
- av1_convolve_2d_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
- filter_params_x, filter_params_y,
- subpel_x_qn, subpel_y_qn, conv_params);
+ convolve_2d_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y, subpel_x_qn,
+ subpel_y_qn, conv_params);
}
} else {
const int bd = 8;
diff --git a/third_party/aom/av1/common/x86/convolve_sse2.c b/third_party/aom/av1/common/x86/convolve_sse2.c
index 012e75c1ae..6383567a48 100644
--- a/third_party/aom/av1/common/x86/convolve_sse2.c
+++ b/third_party/aom/av1/common/x86/convolve_sse2.c
@@ -75,10 +75,10 @@ static INLINE __m128i convolve_hi_y(const __m128i *const s,
return convolve(ss, coeffs);
}
-void av1_convolve_y_sr_12tap_sse2(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_y,
- int subpel_y_qn) {
+static void convolve_y_sr_12tap_sse2(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_y,
+ int subpel_y_qn) {
const int fo_vert = filter_params_y->taps / 2 - 1;
const uint8_t *src_ptr = src - fo_vert * src_stride;
const __m128i round_const = _mm_set1_epi32((1 << FILTER_BITS) >> 1);
@@ -185,8 +185,8 @@ void av1_convolve_y_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst,
av1_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h,
filter_params_y, subpel_y_qn);
} else {
- av1_convolve_y_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
- filter_params_y, subpel_y_qn);
+ convolve_y_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_qn);
}
} else {
const int fo_vert = filter_params_y->taps / 2 - 1;
@@ -337,11 +337,11 @@ void av1_convolve_y_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst,
}
}
-void av1_convolve_x_sr_12tap_sse2(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_x,
- int subpel_x_qn,
- ConvolveParams *conv_params) {
+static void convolve_x_sr_12tap_sse2(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ int subpel_x_qn,
+ ConvolveParams *conv_params) {
const int fo_horiz = filter_params_x->taps / 2 - 1;
const uint8_t *src_ptr = src - fo_horiz;
const int bits = FILTER_BITS - conv_params->round_0;
@@ -402,8 +402,8 @@ void av1_convolve_x_sr_sse2(const uint8_t *src, int src_stride, uint8_t *dst,
av1_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h,
filter_params_x, subpel_x_qn, conv_params);
} else {
- av1_convolve_x_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
- filter_params_x, subpel_x_qn, conv_params);
+ convolve_x_sr_12tap_sse2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_qn, conv_params);
}
} else {
const int fo_horiz = filter_params_x->taps / 2 - 1;
diff --git a/third_party/aom/av1/common/x86/jnt_convolve_sse2.c b/third_party/aom/av1/common/x86/jnt_convolve_sse2.c
index 8c5d9918fb..d5d2db7455 100644
--- a/third_party/aom/av1/common/x86/jnt_convolve_sse2.c
+++ b/third_party/aom/av1/common/x86/jnt_convolve_sse2.c
@@ -375,232 +375,3 @@ void av1_dist_wtd_convolve_y_sse2(const uint8_t *src, int src_stride,
} while (j < w);
}
}
-
-void av1_dist_wtd_convolve_2d_sse2(const uint8_t *src, int src_stride,
- uint8_t *dst0, int dst_stride0, int w, int h,
- const InterpFilterParams *filter_params_x,
- const InterpFilterParams *filter_params_y,
- const int subpel_x_qn, const int subpel_y_qn,
- ConvolveParams *conv_params) {
- CONV_BUF_TYPE *dst = conv_params->dst;
- int dst_stride = conv_params->dst_stride;
- const int bd = 8;
-
- DECLARE_ALIGNED(16, int16_t,
- im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]);
- int im_h = h + filter_params_y->taps - 1;
- int im_stride = MAX_SB_SIZE;
- int i, j;
- const int fo_vert = filter_params_y->taps / 2 - 1;
- const int fo_horiz = filter_params_x->taps / 2 - 1;
- const int do_average = conv_params->do_average;
- const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
- const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
-
- const __m128i zero = _mm_setzero_si128();
-
- const int w0 = conv_params->fwd_offset;
- const int w1 = conv_params->bck_offset;
- const __m128i wt0 = _mm_set1_epi16(w0);
- const __m128i wt1 = _mm_set1_epi16(w1);
- const __m128i wt = _mm_unpacklo_epi16(wt0, wt1);
-
- const int offset_0 =
- bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
- const int offset = (1 << offset_0) + (1 << (offset_0 - 1));
- const __m128i offset_const = _mm_set1_epi16(offset);
- const int rounding_shift =
- 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
- const __m128i rounding_const = _mm_set1_epi16((1 << rounding_shift) >> 1);
-
- /* Horizontal filter */
- {
- const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
- const __m128i coeffs_x = _mm_loadu_si128((__m128i *)x_filter);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_x, coeffs_x);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_x, coeffs_x);
-
- // coeffs 0 1 0 1 0 1 0 1
- const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
- // coeffs 2 3 2 3 2 3 2 3
- const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
- // coeffs 4 5 4 5 4 5 4 5
- const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
- // coeffs 6 7 6 7 6 7 6 7
- const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
-
- const __m128i round_const = _mm_set1_epi32(
- ((1 << conv_params->round_0) >> 1) + (1 << (bd + FILTER_BITS - 1)));
- const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_0);
-
- for (i = 0; i < im_h; ++i) {
- for (j = 0; j < w; j += 8) {
- __m128i temp_lo, temp_hi;
- const __m128i data =
- _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j]);
-
- const __m128i src_lo = _mm_unpacklo_epi8(data, zero);
- const __m128i src_hi = _mm_unpackhi_epi8(data, zero);
-
- // Filter even-index pixels
- const __m128i res_0 = _mm_madd_epi16(src_lo, coeff_01);
- temp_lo = _mm_srli_si128(src_lo, 4);
- temp_hi = _mm_slli_si128(src_hi, 12);
- const __m128i src_2 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
- temp_lo = _mm_srli_si128(src_lo, 8);
- temp_hi = _mm_slli_si128(src_hi, 8);
- const __m128i src_4 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
- temp_lo = _mm_srli_si128(src_lo, 12);
- temp_hi = _mm_slli_si128(src_hi, 4);
- const __m128i src_6 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
-
- __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4),
- _mm_add_epi32(res_2, res_6));
- res_even =
- _mm_sra_epi32(_mm_add_epi32(res_even, round_const), round_shift);
-
- // Filter odd-index pixels
- temp_lo = _mm_srli_si128(src_lo, 2);
- temp_hi = _mm_slli_si128(src_hi, 14);
- const __m128i src_1 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
- temp_lo = _mm_srli_si128(src_lo, 6);
- temp_hi = _mm_slli_si128(src_hi, 10);
- const __m128i src_3 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
- temp_lo = _mm_srli_si128(src_lo, 10);
- temp_hi = _mm_slli_si128(src_hi, 6);
- const __m128i src_5 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
- temp_lo = _mm_srli_si128(src_lo, 14);
- temp_hi = _mm_slli_si128(src_hi, 2);
- const __m128i src_7 = _mm_or_si128(temp_hi, temp_lo);
- const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
-
- __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5),
- _mm_add_epi32(res_3, res_7));
- res_odd =
- _mm_sra_epi32(_mm_add_epi32(res_odd, round_const), round_shift);
-
- // Pack in the column order 0, 2, 4, 6, 1, 3, 5, 7
- __m128i res = _mm_packs_epi32(res_even, res_odd);
- _mm_store_si128((__m128i *)&im_block[i * im_stride + j], res);
- }
- }
- }
-
- /* Vertical filter */
- {
- const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
- filter_params_y, subpel_y_qn & SUBPEL_MASK);
- const __m128i coeffs_y = _mm_loadu_si128((__m128i *)y_filter);
-
- // coeffs 0 1 0 1 2 3 2 3
- const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_y, coeffs_y);
- // coeffs 4 5 4 5 6 7 6 7
- const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_y, coeffs_y);
-
- // coeffs 0 1 0 1 0 1 0 1
- const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
- // coeffs 2 3 2 3 2 3 2 3
- const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
- // coeffs 4 5 4 5 4 5 4 5
- const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
- // coeffs 6 7 6 7 6 7 6 7
- const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
-
- const __m128i round_const = _mm_set1_epi32(
- ((1 << conv_params->round_1) >> 1) -
- (1 << (bd + 2 * FILTER_BITS - conv_params->round_0 - 1)));
- const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1);
-
- for (i = 0; i < h; ++i) {
- for (j = 0; j < w; j += 8) {
- // Filter even-index pixels
- const int16_t *data = &im_block[i * im_stride + j];
- const __m128i src_0 =
- _mm_unpacklo_epi16(*(__m128i *)(data + 0 * im_stride),
- *(__m128i *)(data + 1 * im_stride));
- const __m128i src_2 =
- _mm_unpacklo_epi16(*(__m128i *)(data + 2 * im_stride),
- *(__m128i *)(data + 3 * im_stride));
- const __m128i src_4 =
- _mm_unpacklo_epi16(*(__m128i *)(data + 4 * im_stride),
- *(__m128i *)(data + 5 * im_stride));
- const __m128i src_6 =
- _mm_unpacklo_epi16(*(__m128i *)(data + 6 * im_stride),
- *(__m128i *)(data + 7 * im_stride));
-
- const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01);
- const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
- const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
- const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
-
- const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
- _mm_add_epi32(res_4, res_6));
-
- // Filter odd-index pixels
- const __m128i src_1 =
- _mm_unpackhi_epi16(*(__m128i *)(data + 0 * im_stride),
- *(__m128i *)(data + 1 * im_stride));
- const __m128i src_3 =
- _mm_unpackhi_epi16(*(__m128i *)(data + 2 * im_stride),
- *(__m128i *)(data + 3 * im_stride));
- const __m128i src_5 =
- _mm_unpackhi_epi16(*(__m128i *)(data + 4 * im_stride),
- *(__m128i *)(data + 5 * im_stride));
- const __m128i src_7 =
- _mm_unpackhi_epi16(*(__m128i *)(data + 6 * im_stride),
- *(__m128i *)(data + 7 * im_stride));
-
- const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
- const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
- const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
- const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
-
- const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
- _mm_add_epi32(res_5, res_7));
-
- // Rearrange pixels back into the order 0 ... 7
- const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
- const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
-
- const __m128i res_lo_round =
- _mm_sra_epi32(_mm_add_epi32(res_lo, round_const), round_shift);
- const __m128i res_hi_round =
- _mm_sra_epi32(_mm_add_epi32(res_hi, round_const), round_shift);
-
- const __m128i res_16b = _mm_packs_epi32(res_lo_round, res_hi_round);
- const __m128i res_unsigned = _mm_add_epi16(res_16b, offset_const);
-
- // Accumulate values into the destination buffer
- if (do_average) {
- const __m128i data_ref_0 =
- _mm_loadu_si128((__m128i *)(&dst[i * dst_stride + j]));
-
- const __m128i comp_avg_res =
- comp_avg(&data_ref_0, &res_unsigned, &wt, use_dist_wtd_comp_avg);
-
- const __m128i round_result = convolve_rounding(
- &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
-
- const __m128i res_8 = _mm_packus_epi16(round_result, round_result);
-
- if (w > 4)
- _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_8);
- else
- *(int *)(&dst0[i * dst_stride0 + j]) = _mm_cvtsi128_si32(res_8);
- } else {
- _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_unsigned);
- }
- }
- }
- }
-}
diff --git a/third_party/aom/av1/decoder/decodeframe.c b/third_party/aom/av1/decoder/decodeframe.c
index bb09347e1c..c027308ff3 100644
--- a/third_party/aom/av1/decoder/decodeframe.c
+++ b/third_party/aom/av1/decoder/decodeframe.c
@@ -14,20 +14,23 @@
#include <stddef.h>
#include "config/aom_config.h"
-#include "config/aom_dsp_rtcd.h"
#include "config/aom_scale_rtcd.h"
-#include "config/av1_rtcd.h"
#include "aom/aom_codec.h"
+#include "aom/aom_image.h"
+#include "aom/internal/aom_codec_internal.h"
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/binary_codes_reader.h"
#include "aom_dsp/bitreader.h"
#include "aom_dsp/bitreader_buffer.h"
+#include "aom_dsp/txfm_common.h"
#include "aom_mem/aom_mem.h"
#include "aom_ports/aom_timer.h"
#include "aom_ports/mem.h"
#include "aom_ports/mem_ops.h"
#include "aom_scale/aom_scale.h"
+#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "aom_util/aom_thread.h"
#if CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
@@ -35,33 +38,41 @@
#endif // CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
#include "av1/common/alloccommon.h"
+#include "av1/common/av1_common_int.h"
+#include "av1/common/blockd.h"
#include "av1/common/cdef.h"
#include "av1/common/cfl.h"
-#if CONFIG_INSPECTION
-#include "av1/decoder/inspection.h"
-#endif
+#include "av1/common/common_data.h"
#include "av1/common/common.h"
#include "av1/common/entropy.h"
#include "av1/common/entropymode.h"
#include "av1/common/entropymv.h"
+#include "av1/common/enums.h"
#include "av1/common/frame_buffers.h"
#include "av1/common/idct.h"
+#include "av1/common/mv.h"
#include "av1/common/mvref_common.h"
+#include "av1/common/obmc.h"
#include "av1/common/pred_common.h"
#include "av1/common/quant_common.h"
#include "av1/common/reconinter.h"
#include "av1/common/reconintra.h"
#include "av1/common/resize.h"
+#include "av1/common/restoration.h"
+#include "av1/common/scale.h"
#include "av1/common/seg_common.h"
#include "av1/common/thread_common.h"
#include "av1/common/tile_common.h"
#include "av1/common/warped_motion.h"
-#include "av1/common/obmc.h"
+
#include "av1/decoder/decodeframe.h"
#include "av1/decoder/decodemv.h"
#include "av1/decoder/decoder.h"
#include "av1/decoder/decodetxb.h"
#include "av1/decoder/detokenize.h"
+#if CONFIG_INSPECTION
+#include "av1/decoder/inspection.h"
+#endif
#define ACCT_STR __func__
@@ -1935,8 +1946,8 @@ static AOM_INLINE void setup_buffer_pool(AV1_COMMON *cm) {
&cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
AOM_DEC_BORDER_IN_PIXELS, cm->features.byte_alignment,
- &cm->cur_frame->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv, 0,
- 0)) {
+ &cm->cur_frame->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv,
+ false, 0)) {
unlock_buffer_pool(pool);
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
@@ -2293,7 +2304,11 @@ static const uint8_t *get_ls_tile_buffers(
const int tile_col_size_bytes = pbi->tile_col_size_bytes;
const int tile_size_bytes = pbi->tile_size_bytes;
int tile_width, tile_height;
- av1_get_uniform_tile_size(cm, &tile_width, &tile_height);
+ if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) {
+ aom_internal_error(
+ &pbi->error, AOM_CODEC_CORRUPT_FRAME,
+ "Not all the tiles in the tile list have the same size.");
+ }
const int tile_copy_mode =
((AOMMAX(tile_width, tile_height) << MI_SIZE_LOG2) <= 256) ? 1 : 0;
// Read tile column sizes for all columns (we need the last tile buffer)
@@ -2302,8 +2317,16 @@ static const uint8_t *get_ls_tile_buffers(
size_t tile_col_size;
if (!is_last) {
+ if (tile_col_size_bytes > data_end - data) {
+ aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
+ "Not enough data to read tile_col_size");
+ }
tile_col_size = mem_get_varsize(data, tile_col_size_bytes);
data += tile_col_size_bytes;
+ if (tile_col_size > (size_t)(data_end - data)) {
+ aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
+ "tile_col_data_end[%d] is out of bound", c);
+ }
tile_col_data_end[c] = data + tile_col_size;
} else {
tile_col_size = data_end - data;
@@ -3871,8 +3894,8 @@ static AOM_INLINE void read_bitdepth(
#endif
}
-void av1_read_film_grain_params(AV1_COMMON *cm,
- struct aom_read_bit_buffer *rb) {
+static void read_film_grain_params(AV1_COMMON *cm,
+ struct aom_read_bit_buffer *rb) {
aom_film_grain_t *pars = &cm->film_grain_params;
const SequenceHeader *const seq_params = cm->seq_params;
@@ -4040,7 +4063,7 @@ static AOM_INLINE void read_film_grain(AV1_COMMON *cm,
struct aom_read_bit_buffer *rb) {
if (cm->seq_params->film_grain_params_present &&
(cm->show_frame || cm->showable_frame)) {
- av1_read_film_grain_params(cm, rb);
+ read_film_grain_params(cm, rb);
} else {
memset(&cm->film_grain_params, 0, sizeof(cm->film_grain_params));
}
@@ -4768,7 +4791,7 @@ static int read_uncompressed_header(AV1Decoder *pbi,
seq_params->max_frame_height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
AOM_BORDER_IN_PIXELS, features->byte_alignment,
- &buf->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv, 0,
+ &buf->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv, false,
0)) {
decrease_ref_count(buf, pool);
unlock_buffer_pool(pool);
diff --git a/third_party/aom/av1/decoder/decodemv.h b/third_party/aom/av1/decoder/decodemv.h
index 3d8629c9a5..7e77c030f8 100644
--- a/third_party/aom/av1/decoder/decodemv.h
+++ b/third_party/aom/av1/decoder/decodemv.h
@@ -20,6 +20,8 @@
extern "C" {
#endif
+int av1_neg_deinterleave(int diff, int ref, int max);
+
void av1_read_mode_info(AV1Decoder *const pbi, DecoderCodingBlock *dcb,
aom_reader *r, int x_mis, int y_mis);
diff --git a/third_party/aom/av1/decoder/decoder.c b/third_party/aom/av1/decoder/decoder.c
index 32e94840be..a886ed469c 100644
--- a/third_party/aom/av1/decoder/decoder.c
+++ b/third_party/aom/av1/decoder/decoder.c
@@ -21,6 +21,7 @@
#include "aom_mem/aom_mem.h"
#include "aom_ports/aom_timer.h"
#include "aom_scale/aom_scale.h"
+#include "aom_util/aom_pthread.h"
#include "aom_util/aom_thread.h"
#include "av1/common/alloccommon.h"
diff --git a/third_party/aom/av1/decoder/dthread.h b/third_party/aom/av1/decoder/dthread.h
index f82b9d8ccf..b0f6fda829 100644
--- a/third_party/aom/av1/decoder/dthread.h
+++ b/third_party/aom/av1/decoder/dthread.h
@@ -14,7 +14,6 @@
#include "config/aom_config.h"
-#include "aom_util/aom_thread.h"
#include "aom/internal/aom_codec_internal.h"
#ifdef __cplusplus
diff --git a/third_party/aom/av1/decoder/obu.c b/third_party/aom/av1/decoder/obu.c
index 0e31ce9404..e0b2d87c32 100644
--- a/third_party/aom/av1/decoder/obu.c
+++ b/third_party/aom/av1/decoder/obu.c
@@ -367,16 +367,13 @@ static uint32_t read_one_tile_group_obu(
return header_size + tg_payload_size;
}
-static void alloc_tile_list_buffer(AV1Decoder *pbi) {
+static void alloc_tile_list_buffer(AV1Decoder *pbi, int tile_width_in_pixels,
+ int tile_height_in_pixels) {
// The resolution of the output frame is read out from the bitstream. The data
// are stored in the order of Y plane, U plane and V plane. As an example, for
// image format 4:2:0, the output frame of U plane and V plane is 1/4 of the
// output frame.
AV1_COMMON *const cm = &pbi->common;
- int tile_width, tile_height;
- av1_get_uniform_tile_size(cm, &tile_width, &tile_height);
- const int tile_width_in_pixels = tile_width * MI_SIZE;
- const int tile_height_in_pixels = tile_height * MI_SIZE;
const int output_frame_width =
(pbi->output_frame_width_in_tiles_minus_1 + 1) * tile_width_in_pixels;
const int output_frame_height =
@@ -396,7 +393,7 @@ static void alloc_tile_list_buffer(AV1Decoder *pbi) {
cm->seq_params->subsampling_y,
(cm->seq_params->use_highbitdepth &&
(cm->seq_params->bit_depth > AOM_BITS_8)),
- 0, cm->features.byte_alignment, 0, 0))
+ 0, cm->features.byte_alignment, false, 0))
aom_internal_error(&pbi->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate the tile list output buffer");
}
@@ -424,13 +421,10 @@ static void yv12_tile_copy(const YV12_BUFFER_CONFIG *src, int hstart1,
return;
}
-static void copy_decoded_tile_to_tile_list_buffer(AV1Decoder *pbi,
- int tile_idx) {
+static void copy_decoded_tile_to_tile_list_buffer(AV1Decoder *pbi, int tile_idx,
+ int tile_width_in_pixels,
+ int tile_height_in_pixels) {
AV1_COMMON *const cm = &pbi->common;
- int tile_width, tile_height;
- av1_get_uniform_tile_size(cm, &tile_width, &tile_height);
- const int tile_width_in_pixels = tile_width * MI_SIZE;
- const int tile_height_in_pixels = tile_height * MI_SIZE;
const int ssy = cm->seq_params->subsampling_y;
const int ssx = cm->seq_params->subsampling_x;
const int num_planes = av1_num_planes(cm);
@@ -501,13 +495,31 @@ static uint32_t read_and_decode_one_tile_list(AV1Decoder *pbi,
pbi->output_frame_width_in_tiles_minus_1 = aom_rb_read_literal(rb, 8);
pbi->output_frame_height_in_tiles_minus_1 = aom_rb_read_literal(rb, 8);
pbi->tile_count_minus_1 = aom_rb_read_literal(rb, 16);
+
+ // The output frame is used to store the decoded tile list. The decoded tile
+ // list has to fit into 1 output frame.
+ if ((pbi->tile_count_minus_1 + 1) >
+ (pbi->output_frame_width_in_tiles_minus_1 + 1) *
+ (pbi->output_frame_height_in_tiles_minus_1 + 1)) {
+ pbi->error.error_code = AOM_CODEC_CORRUPT_FRAME;
+ return 0;
+ }
+
if (pbi->tile_count_minus_1 > MAX_TILES - 1) {
pbi->error.error_code = AOM_CODEC_CORRUPT_FRAME;
return 0;
}
+ int tile_width, tile_height;
+ if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) {
+ pbi->error.error_code = AOM_CODEC_CORRUPT_FRAME;
+ return 0;
+ }
+ const int tile_width_in_pixels = tile_width * MI_SIZE;
+ const int tile_height_in_pixels = tile_height * MI_SIZE;
+
// Allocate output frame buffer for the tile list.
- alloc_tile_list_buffer(pbi);
+ alloc_tile_list_buffer(pbi, tile_width_in_pixels, tile_height_in_pixels);
uint32_t tile_list_info_bytes = 4;
tile_list_payload_size += tile_list_info_bytes;
@@ -558,7 +570,8 @@ static uint32_t read_and_decode_one_tile_list(AV1Decoder *pbi,
assert(data <= data_end);
// Copy the decoded tile to the tile list output buffer.
- copy_decoded_tile_to_tile_list_buffer(pbi, tile_idx);
+ copy_decoded_tile_to_tile_list_buffer(pbi, tile_idx, tile_width_in_pixels,
+ tile_height_in_pixels);
tile_idx++;
}
diff --git a/third_party/aom/av1/encoder/allintra_vis.c b/third_party/aom/av1/encoder/allintra_vis.c
index 8dcef5fc85..87becb80ef 100644
--- a/third_party/aom/av1/encoder/allintra_vis.c
+++ b/third_party/aom/av1/encoder/allintra_vis.c
@@ -13,6 +13,8 @@
#include "config/aom_config.h"
+#include "aom_util/aom_pthread.h"
+
#if CONFIG_TFLITE
#include "tensorflow/lite/c/c_api.h"
#include "av1/encoder/deltaq4_model.c"
@@ -588,7 +590,7 @@ void av1_set_mb_wiener_variance(AV1_COMP *cpi) {
&cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0))
+ NULL, cpi->alloc_pyramid, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
av1_alloc_mb_wiener_var_pred_buf(&cpi->common, &cpi->td);
diff --git a/third_party/aom/av1/encoder/aq_cyclicrefresh.c b/third_party/aom/av1/encoder/aq_cyclicrefresh.c
index f48ff11e51..1aa8dde323 100644
--- a/third_party/aom/av1/encoder/aq_cyclicrefresh.c
+++ b/third_party/aom/av1/encoder/aq_cyclicrefresh.c
@@ -15,6 +15,7 @@
#include "av1/common/pred_common.h"
#include "av1/common/seg_common.h"
#include "av1/encoder/aq_cyclicrefresh.h"
+#include "av1/encoder/encoder_utils.h"
#include "av1/encoder/ratectrl.h"
#include "av1/encoder/segmentation.h"
#include "av1/encoder/tokenize.h"
@@ -295,6 +296,7 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
const CommonModeInfoParams *const mi_params = &cm->mi_params;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
unsigned char *const seg_map = cpi->enc_seg.map;
+ unsigned char *const active_map_4x4 = cpi->active_map.map;
int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame;
int xmis, ymis, x, y;
uint64_t sb_sad = 0;
@@ -302,7 +304,12 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
uint64_t thresh_sad = INT64_MAX;
const int mi_rows = mi_params->mi_rows, mi_cols = mi_params->mi_cols;
const int mi_stride = mi_cols;
- memset(seg_map, CR_SEGMENT_ID_BASE, mi_rows * mi_cols);
+ // Don't set seg_map to 0 if active_maps is enabled. Active_maps will set
+ // seg_map to either 7 or 0 (AM_SEGMENT_ID_INACTIVE/ACTIVE), and cyclic
+ // refresh set below (segment 1 or 2) will only be set for ACTIVE blocks.
+ if (!cpi->active_map.enabled) {
+ memset(seg_map, CR_SEGMENT_ID_BASE, mi_rows * mi_cols);
+ }
sb_cols = (mi_cols + cm->seq_params->mib_size - 1) / cm->seq_params->mib_size;
sb_rows = (mi_rows + cm->seq_params->mib_size - 1) / cm->seq_params->mib_size;
sbs_in_frame = sb_cols * sb_rows;
@@ -357,7 +364,10 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
// for possible boost/refresh (segment 1). The segment id may get
// reset to 0 later if block gets coded anything other than low motion.
// If the block_sad (sb_sad) is very low label it for refresh anyway.
- if (cr->map[bl_index2] == 0 || sb_sad < thresh_sad_low) {
+ // If active_maps is enabled, only allow for setting on ACTIVE blocks.
+ if ((cr->map[bl_index2] == 0 || sb_sad < thresh_sad_low) &&
+ (!cpi->active_map.enabled ||
+ active_map_4x4[bl_index2] == AM_SEGMENT_ID_ACTIVE)) {
sum_map += 4;
} else if (cr->map[bl_index2] < 0) {
cr->map[bl_index2]++;
@@ -380,7 +390,8 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
cr->sb_index = i;
if (cr->target_num_seg_blocks == 0) {
// Disable segmentation, seg_map is already set to 0 above.
- av1_disable_segmentation(&cm->seg);
+ // Don't disable if active_map is being used.
+ if (!cpi->active_map.enabled) av1_disable_segmentation(&cm->seg);
}
}
@@ -423,8 +434,6 @@ void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) {
// function av1_cyclic_reset_segment_skip(). Skipping over
// 4x4 will therefore have small bdrate loss (~0.2%), so
// we use it only for speed > 9 for now.
- // Also if loop-filter deltas is applied via segment, then
- // we need to set cr->skip_over4x4 = 1.
cr->skip_over4x4 = (cpi->oxcf.speed > 9) ? 1 : 0;
// should we enable cyclic refresh on this frame.
@@ -450,6 +459,15 @@ void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) {
else
cr->percent_refresh = 10 + cr->percent_refresh_adjustment;
+ if (cpi->active_map.enabled) {
+ // Scale down the percent_refresh to target the active blocks only.
+ cr->percent_refresh =
+ cr->percent_refresh * (100 - cpi->rc.percent_blocks_inactive) / 100;
+ if (cr->percent_refresh == 0) {
+ cr->apply_cyclic_refresh = 0;
+ }
+ }
+
cr->max_qdelta_perc = 60;
cr->time_for_refresh = 0;
cr->use_block_sad_scene_det =
@@ -543,10 +561,14 @@ void av1_cyclic_refresh_setup(AV1_COMP *const cpi) {
if (resolution_change) av1_cyclic_refresh_reset_resize(cpi);
if (!cr->apply_cyclic_refresh) {
- // Set segmentation map to 0 and disable.
- unsigned char *const seg_map = cpi->enc_seg.map;
- memset(seg_map, 0, cm->mi_params.mi_rows * cm->mi_params.mi_cols);
- av1_disable_segmentation(&cm->seg);
+ // Don't disable and set seg_map to 0 if active_maps is enabled, unless
+ // whole frame is set as inactive (since we only apply cyclic_refresh to
+ // active blocks).
+ if (!cpi->active_map.enabled || cpi->rc.percent_blocks_inactive == 100) {
+ unsigned char *const seg_map = cpi->enc_seg.map;
+ memset(seg_map, 0, cm->mi_params.mi_rows * cm->mi_params.mi_cols);
+ av1_disable_segmentation(&cm->seg);
+ }
if (frame_is_intra_only(cm) || scene_change_detected ||
cpi->ppi->rtc_ref.bias_recovery_frame) {
cr->sb_index = 0;
@@ -574,9 +596,11 @@ void av1_cyclic_refresh_setup(AV1_COMP *const cpi) {
cr->thresh_rate_sb = INT64_MAX;
}
// Set up segmentation.
- // Clear down the segment map.
av1_enable_segmentation(&cm->seg);
- av1_clearall_segfeatures(seg);
+ if (!cpi->active_map.enabled) {
+ // Clear down the segment map, only if active_maps is not enabled.
+ av1_clearall_segfeatures(seg);
+ }
// Note: setting temporal_update has no effect, as the seg-map coding method
// (temporal or spatial) is determined in
@@ -644,6 +668,10 @@ void av1_cyclic_refresh_reset_resize(AV1_COMP *const cpi) {
int av1_cyclic_refresh_disable_lf_cdef(AV1_COMP *const cpi) {
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const int qindex = cpi->common.quant_params.base_qindex;
+ if (cpi->active_map.enabled &&
+ cpi->rc.percent_blocks_inactive >
+ cpi->sf.rt_sf.thresh_active_maps_skip_lf_cdef)
+ return 1;
if (cpi->rc.frames_since_key > 30 && cr->percent_refresh > 0 &&
cr->counter_encode_maxq_scene_change > 300 / cr->percent_refresh &&
cpi->rc.frame_source_sad < 1000 &&
diff --git a/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c b/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c
index 63aad0b785..52803a9838 100644
--- a/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c
+++ b/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c
@@ -14,7 +14,7 @@
#include "config/aom_config.h"
#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
int64_t av1_block_error_sve(const tran_low_t *coeff, const tran_low_t *dqcoeff,
diff --git a/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c b/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c
index 5a52e701a2..919521fec7 100644
--- a/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c
+++ b/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c
@@ -23,7 +23,15 @@
#define SSE_STRIDE (BW + 4)
// clang-format off
+// Table used to pad the first and last columns and apply the sliding window.
+DECLARE_ALIGNED(16, static const uint8_t, kLoadPad[4][16]) = {
+ { 2, 2, 2, 3, 4, 255, 255, 255, 255, 2, 2, 3, 4, 5, 255, 255 },
+ { 255, 255, 2, 3, 4, 5, 6, 255, 255, 255, 255, 3, 4, 5, 6, 7 },
+ { 0, 1, 2, 3, 4, 255, 255, 255, 255, 1, 2, 3, 4, 5, 255, 255 },
+ { 255, 255, 2, 3, 4, 5, 5, 255, 255, 255, 255, 3, 4, 5, 5, 5 }
+};
+// For columns that don't need to be padded it's just a simple mask.
DECLARE_ALIGNED(16, static const uint8_t, kSlidingWindowMask[]) = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00,
@@ -56,22 +64,6 @@ static INLINE void get_abs_diff(const uint8_t *frame1, const uint32_t stride1,
} while (++i < block_height);
}
-static INLINE uint8x16_t load_and_pad(const uint8_t *src, const uint32_t col,
- const uint32_t block_width) {
- uint8x8_t s = vld1_u8(src);
-
- if (col == 0) {
- const uint8_t lane2 = vget_lane_u8(s, 2);
- s = vset_lane_u8(lane2, s, 0);
- s = vset_lane_u8(lane2, s, 1);
- } else if (col >= block_width - 4) {
- const uint8_t lane5 = vget_lane_u8(s, 5);
- s = vset_lane_u8(lane5, s, 6);
- s = vset_lane_u8(lane5, s, 7);
- }
- return vcombine_u8(s, s);
-}
-
static void apply_temporal_filter(
const uint8_t *frame, const unsigned int stride, const uint32_t block_width,
const uint32_t block_height, const int *subblock_mses,
@@ -84,6 +76,10 @@ static void apply_temporal_filter(
uint32_t acc_5x5_neon[BH][BW];
const uint8x16x2_t vmask = vld1q_u8_x2(kSlidingWindowMask);
+ const uint8x16_t pad_tbl0 = vld1q_u8(kLoadPad[0]);
+ const uint8x16_t pad_tbl1 = vld1q_u8(kLoadPad[1]);
+ const uint8x16_t pad_tbl2 = vld1q_u8(kLoadPad[2]);
+ const uint8x16_t pad_tbl3 = vld1q_u8(kLoadPad[3]);
// Traverse 4 columns at a time - first and last two columns need padding.
for (uint32_t col = 0; col < block_width; col += 4) {
@@ -92,9 +88,18 @@ static void apply_temporal_filter(
// Load, pad (for first and last two columns) and mask 3 rows from the top.
for (int i = 2; i < 5; i++) {
- const uint8x16_t s = load_and_pad(src, col, block_width);
- vsrc[i][0] = vandq_u8(s, vmask.val[0]);
- vsrc[i][1] = vandq_u8(s, vmask.val[1]);
+ uint8x8_t s = vld1_u8(src);
+ uint8x16_t s_dup = vcombine_u8(s, s);
+ if (col == 0) {
+ vsrc[i][0] = vqtbl1q_u8(s_dup, pad_tbl0);
+ vsrc[i][1] = vqtbl1q_u8(s_dup, pad_tbl1);
+ } else if (col >= block_width - 4) {
+ vsrc[i][0] = vqtbl1q_u8(s_dup, pad_tbl2);
+ vsrc[i][1] = vqtbl1q_u8(s_dup, pad_tbl3);
+ } else {
+ vsrc[i][0] = vandq_u8(s_dup, vmask.val[0]);
+ vsrc[i][1] = vandq_u8(s_dup, vmask.val[1]);
+ }
src += SSE_STRIDE;
}
@@ -130,9 +135,18 @@ static void apply_temporal_filter(
if (row <= block_height - 4) {
// Load next row into the bottom of the sliding window.
- uint8x16_t s = load_and_pad(src, col, block_width);
- vsrc[4][0] = vandq_u8(s, vmask.val[0]);
- vsrc[4][1] = vandq_u8(s, vmask.val[1]);
+ uint8x8_t s = vld1_u8(src);
+ uint8x16_t s_dup = vcombine_u8(s, s);
+ if (col == 0) {
+ vsrc[4][0] = vqtbl1q_u8(s_dup, pad_tbl0);
+ vsrc[4][1] = vqtbl1q_u8(s_dup, pad_tbl1);
+ } else if (col >= block_width - 4) {
+ vsrc[4][0] = vqtbl1q_u8(s_dup, pad_tbl2);
+ vsrc[4][1] = vqtbl1q_u8(s_dup, pad_tbl3);
+ } else {
+ vsrc[4][0] = vandq_u8(s_dup, vmask.val[0]);
+ vsrc[4][1] = vandq_u8(s_dup, vmask.val[1]);
+ }
src += SSE_STRIDE;
} else {
// Pad the bottom 2 rows.
diff --git a/third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c b/third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c
new file mode 100644
index 0000000000..521601a3f3
--- /dev/null
+++ b/third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
+#include "aom_dsp/arm/sum_neon.h"
+#include "av1/common/reconinter.h"
+
+uint64_t av1_wedge_sse_from_residuals_sve(const int16_t *r1, const int16_t *d,
+ const uint8_t *m, int N) {
+ assert(N % 64 == 0);
+
+ // Predicate pattern with first 8 elements true.
+ const svbool_t pattern = svptrue_pat_b16(SV_VL8);
+ int64x2_t sse[2] = { vdupq_n_s64(0), vdupq_n_s64(0) };
+
+ int i = 0;
+ do {
+ int32x4_t sum[4];
+ int16x8_t sum_s16[2];
+
+ const int16x8_t r1_l = vld1q_s16(r1 + i);
+ const int16x8_t r1_h = vld1q_s16(r1 + i + 8);
+ const int16x8_t d_l = vld1q_s16(d + i);
+ const int16x8_t d_h = vld1q_s16(d + i + 8);
+
+ // Use a zero-extending load to widen the vector elements.
+ const int16x8_t m_l = svget_neonq_s16(svld1ub_s16(pattern, m + i));
+ const int16x8_t m_h = svget_neonq_s16(svld1ub_s16(pattern, m + i + 8));
+
+ sum[0] = vshll_n_s16(vget_low_s16(r1_l), WEDGE_WEIGHT_BITS);
+ sum[1] = vshll_n_s16(vget_high_s16(r1_l), WEDGE_WEIGHT_BITS);
+ sum[2] = vshll_n_s16(vget_low_s16(r1_h), WEDGE_WEIGHT_BITS);
+ sum[3] = vshll_n_s16(vget_high_s16(r1_h), WEDGE_WEIGHT_BITS);
+
+ sum[0] = vmlal_s16(sum[0], vget_low_s16(m_l), vget_low_s16(d_l));
+ sum[1] = vmlal_s16(sum[1], vget_high_s16(m_l), vget_high_s16(d_l));
+ sum[2] = vmlal_s16(sum[2], vget_low_s16(m_h), vget_low_s16(d_h));
+ sum[3] = vmlal_s16(sum[3], vget_high_s16(m_h), vget_high_s16(d_h));
+
+ sum_s16[0] = vcombine_s16(vqmovn_s32(sum[0]), vqmovn_s32(sum[1]));
+ sum_s16[1] = vcombine_s16(vqmovn_s32(sum[2]), vqmovn_s32(sum[3]));
+
+ sse[0] = aom_sdotq_s16(sse[0], sum_s16[0], sum_s16[0]);
+ sse[1] = aom_sdotq_s16(sse[1], sum_s16[1], sum_s16[1]);
+
+ i += 16;
+ } while (i < N);
+
+ const uint64_t csse =
+ (uint64_t)horizontal_add_s64x2(vaddq_s64(sse[0], sse[1]));
+ return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS);
+}
+
+int8_t av1_wedge_sign_from_residuals_sve(const int16_t *ds, const uint8_t *m,
+ int N, int64_t limit) {
+ assert(N % 16 == 0);
+
+ // Predicate pattern with first 8 elements true.
+ svbool_t pattern = svptrue_pat_b16(SV_VL8);
+ int64x2_t acc_l = vdupq_n_s64(0);
+ int64x2_t acc_h = vdupq_n_s64(0);
+
+ do {
+ const int16x8_t ds_l = vld1q_s16(ds);
+ const int16x8_t ds_h = vld1q_s16(ds + 8);
+
+ // Use a zero-extending load to widen the vector elements.
+ const int16x8_t m_l = svget_neonq_s16(svld1ub_s16(pattern, m));
+ const int16x8_t m_h = svget_neonq_s16(svld1ub_s16(pattern, m + 8));
+
+ acc_l = aom_sdotq_s16(acc_l, ds_l, m_l);
+ acc_h = aom_sdotq_s16(acc_h, ds_h, m_h);
+
+ ds += 16;
+ m += 16;
+ N -= 16;
+ } while (N != 0);
+
+ const int64x2_t sum = vaddq_s64(acc_l, acc_h);
+ return horizontal_add_s64x2(sum) > limit;
+}
diff --git a/third_party/aom/av1/encoder/av1_temporal_denoiser.c b/third_party/aom/av1/encoder/av1_temporal_denoiser.c
index 3012df6311..d4a1625612 100644
--- a/third_party/aom/av1/encoder/av1_temporal_denoiser.c
+++ b/third_party/aom/av1/encoder/av1_temporal_denoiser.c
@@ -489,7 +489,7 @@ static int av1_denoiser_realloc_svc_helper(AV1_COMMON *cm,
&denoiser->running_avg_y[fb_idx], cm->width, cm->height,
cm->seq_params->subsampling_x, cm->seq_params->subsampling_y,
cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
if (fail) {
av1_denoiser_free(denoiser);
return 1;
@@ -577,7 +577,7 @@ int av1_denoiser_alloc(AV1_COMMON *cm, struct SVC *svc, AV1_DENOISER *denoiser,
fail = aom_alloc_frame_buffer(
&denoiser->running_avg_y[i + denoiser->num_ref_frames * layer],
denoise_width, denoise_height, ssx, ssy, use_highbitdepth, border,
- legacy_byte_alignment, 0, 0);
+ legacy_byte_alignment, false, 0);
if (fail) {
av1_denoiser_free(denoiser);
return 1;
@@ -589,7 +589,7 @@ int av1_denoiser_alloc(AV1_COMMON *cm, struct SVC *svc, AV1_DENOISER *denoiser,
fail = aom_alloc_frame_buffer(
&denoiser->mc_running_avg_y[layer], denoise_width, denoise_height, ssx,
- ssy, use_highbitdepth, border, legacy_byte_alignment, 0, 0);
+ ssy, use_highbitdepth, border, legacy_byte_alignment, false, 0);
if (fail) {
av1_denoiser_free(denoiser);
return 1;
@@ -600,7 +600,7 @@ int av1_denoiser_alloc(AV1_COMMON *cm, struct SVC *svc, AV1_DENOISER *denoiser,
// layer.
fail = aom_alloc_frame_buffer(&denoiser->last_source, width, height, ssx, ssy,
use_highbitdepth, border, legacy_byte_alignment,
- 0, 0);
+ false, 0);
if (fail) {
av1_denoiser_free(denoiser);
return 1;
diff --git a/third_party/aom/av1/encoder/bitstream.c b/third_party/aom/av1/encoder/bitstream.c
index 219784fedf..9981871147 100644
--- a/third_party/aom/av1/encoder/bitstream.c
+++ b/third_party/aom/av1/encoder/bitstream.c
@@ -3391,8 +3391,8 @@ int av1_write_uleb_obu_size(size_t obu_header_size, size_t obu_payload_size,
return AOM_CODEC_OK;
}
-size_t av1_obu_memmove(size_t obu_header_size, size_t obu_payload_size,
- uint8_t *data) {
+static size_t obu_memmove(size_t obu_header_size, size_t obu_payload_size,
+ uint8_t *data) {
const size_t length_field_size = aom_uleb_size_in_bytes(obu_payload_size);
const size_t move_dst_offset = length_field_size + obu_header_size;
const size_t move_src_offset = obu_header_size;
@@ -3581,7 +3581,7 @@ static void write_large_scale_tile_obu_size(
*total_size += lst_obu->tg_hdr_size;
const uint32_t obu_payload_size = *total_size - lst_obu->tg_hdr_size;
const size_t length_field_size =
- av1_obu_memmove(lst_obu->tg_hdr_size, obu_payload_size, dst);
+ obu_memmove(lst_obu->tg_hdr_size, obu_payload_size, dst);
if (av1_write_uleb_obu_size(lst_obu->tg_hdr_size, obu_payload_size, dst) !=
AOM_CODEC_OK)
assert(0);
@@ -3806,7 +3806,7 @@ void av1_write_last_tile_info(
const uint32_t obu_payload_size =
(uint32_t)(*curr_tg_data_size) - obu_header_size;
const size_t length_field_size =
- av1_obu_memmove(obu_header_size, obu_payload_size, curr_tg_start);
+ obu_memmove(obu_header_size, obu_payload_size, curr_tg_start);
if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size,
curr_tg_start) != AOM_CODEC_OK) {
assert(0);
@@ -4015,8 +4015,8 @@ static void write_tile_obu_size(AV1_COMP *const cpi, uint8_t *const dst,
// to pack the smaller bitstream of such frames. This function computes the
// number of required number of workers based on setup time overhead and job
// dispatch time overhead for given tiles and available workers.
-int calc_pack_bs_mt_workers(const TileDataEnc *tile_data, int num_tiles,
- int avail_workers, bool pack_bs_mt_enabled) {
+static int calc_pack_bs_mt_workers(const TileDataEnc *tile_data, int num_tiles,
+ int avail_workers, bool pack_bs_mt_enabled) {
if (!pack_bs_mt_enabled) return 1;
uint64_t frame_abs_sum_level = 0;
@@ -4141,8 +4141,7 @@ static size_t av1_write_metadata_array(AV1_COMP *const cpi, uint8_t *dst) {
OBU_METADATA, 0, dst);
obu_payload_size =
av1_write_metadata_obu(current_metadata, dst + obu_header_size);
- length_field_size =
- av1_obu_memmove(obu_header_size, obu_payload_size, dst);
+ length_field_size = obu_memmove(obu_header_size, obu_payload_size, dst);
if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, dst) ==
AOM_CODEC_OK) {
const size_t obu_size = obu_header_size + obu_payload_size;
@@ -4192,7 +4191,7 @@ int av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size,
obu_payload_size =
av1_write_sequence_header_obu(cm->seq_params, data + obu_header_size);
const size_t length_field_size =
- av1_obu_memmove(obu_header_size, obu_payload_size, data);
+ obu_memmove(obu_header_size, obu_payload_size, data);
if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, data) !=
AOM_CODEC_OK) {
return AOM_CODEC_ERROR;
@@ -4217,7 +4216,7 @@ int av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size,
obu_payload_size = write_frame_header_obu(cpi, &cpi->td.mb.e_mbd, &saved_wb,
data + obu_header_size, 1);
- length_field = av1_obu_memmove(obu_header_size, obu_payload_size, data);
+ length_field = obu_memmove(obu_header_size, obu_payload_size, data);
if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, data) !=
AOM_CODEC_OK) {
return AOM_CODEC_ERROR;
diff --git a/third_party/aom/av1/encoder/bitstream.h b/third_party/aom/av1/encoder/bitstream.h
index 12e8a630db..d037039593 100644
--- a/third_party/aom/av1/encoder/bitstream.h
+++ b/third_party/aom/av1/encoder/bitstream.h
@@ -21,6 +21,7 @@ extern "C" {
#include "av1/common/enums.h"
#include "av1/encoder/level.h"
#include "aom_dsp/bitwriter.h"
+#include "aom_util/aom_pthread.h"
struct aom_write_bit_buffer;
struct AV1_COMP;
diff --git a/third_party/aom/av1/encoder/block.h b/third_party/aom/av1/encoder/block.h
index 33d2d8c2a0..1baf3f942e 100644
--- a/third_party/aom/av1/encoder/block.h
+++ b/third_party/aom/av1/encoder/block.h
@@ -1348,6 +1348,9 @@ typedef struct macroblock {
//! Motion vector from superblock MV derived from int_pro_motion() in
// the variance_partitioning.
int_mv sb_me_mv;
+ //! Flag to indicate if a fixed partition should be used, only if the
+ // speed feature rt_sf->use_fast_fixed_part is enabled.
+ int sb_force_fixed_part;
//! SSE of the current predictor.
unsigned int pred_sse[REF_FRAMES];
//! Prediction for ML based partition.
diff --git a/third_party/aom/av1/encoder/cnn.c b/third_party/aom/av1/encoder/cnn.c
index 598b362753..b019ace685 100644
--- a/third_party/aom/av1/encoder/cnn.c
+++ b/third_party/aom/av1/encoder/cnn.c
@@ -138,14 +138,16 @@ static bool concat_tensor(const TENSOR *src, TENSOR *dst) {
return true;
}
-int check_tensor_equal_dims(TENSOR *t1, TENSOR *t2) {
+#ifndef NDEBUG
+static int check_tensor_equal_dims(TENSOR *t1, TENSOR *t2) {
return (t1->width == t2->width && t1->height == t2->height);
}
-int check_tensor_equal_size(TENSOR *t1, TENSOR *t2) {
+static int check_tensor_equal_size(TENSOR *t1, TENSOR *t2) {
return (t1->channels == t2->channels && t1->width == t2->width &&
t1->height == t2->height);
}
+#endif // NDEBUG
void av1_find_cnn_layer_output_size(int in_width, int in_height,
const CNN_LAYER_CONFIG *layer_config,
@@ -189,8 +191,8 @@ void av1_find_cnn_layer_output_size(int in_width, int in_height,
}
}
-void find_cnn_out_channels(const CNN_LAYER_CONFIG *layer_config,
- int channels_per_branch[]) {
+static void find_cnn_out_channels(const CNN_LAYER_CONFIG *layer_config,
+ int channels_per_branch[]) {
int branch = layer_config->branch;
const CNN_BRANCH_CONFIG *branch_config = &layer_config->branch_config;
for (int b = 0; b < CNN_MAX_BRANCHES; ++b) {
diff --git a/third_party/aom/av1/encoder/encode_strategy.c b/third_party/aom/av1/encoder/encode_strategy.c
index 35ca83c3f4..db77dc0e3c 100644
--- a/third_party/aom/av1/encoder/encode_strategy.c
+++ b/third_party/aom/av1/encoder/encode_strategy.c
@@ -712,20 +712,6 @@ int av1_get_refresh_frame_flags(
}
#if !CONFIG_REALTIME_ONLY
-void setup_mi(AV1_COMP *const cpi, YV12_BUFFER_CONFIG *src) {
- AV1_COMMON *const cm = &cpi->common;
- const int num_planes = av1_num_planes(cm);
- MACROBLOCK *const x = &cpi->td.mb;
- MACROBLOCKD *const xd = &x->e_mbd;
-
- av1_setup_src_planes(x, src, 0, 0, num_planes, cm->seq_params->sb_size);
-
- av1_setup_block_planes(xd, cm->seq_params->subsampling_x,
- cm->seq_params->subsampling_y, num_planes);
-
- set_mi_offsets(&cm->mi_params, xd, 0, 0);
-}
-
// Apply temporal filtering to source frames and encode the filtered frame.
// If the current frame does not require filtering, this function is identical
// to av1_encode() except that tpl is not performed.
@@ -819,7 +805,7 @@ static int denoise_and_encode(AV1_COMP *const cpi, uint8_t *const dest,
oxcf->frm_dim_cfg.height, cm->seq_params->subsampling_x,
cm->seq_params->subsampling_y, cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0);
+ NULL, cpi->alloc_pyramid, 0);
if (ret)
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate tf_buf_second_arf");
@@ -923,7 +909,7 @@ static int denoise_and_encode(AV1_COMP *const cpi, uint8_t *const dest,
if (apply_filtering && is_psnr_calc_enabled(cpi)) {
cpi->source = av1_realloc_and_scale_if_required(
cm, source_buffer, &cpi->scaled_source, cm->features.interp_filter, 0,
- false, true, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ false, true, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
cpi->unscaled_source = source_buffer;
}
#if CONFIG_COLLECT_COMPONENT_TIMING
@@ -1702,8 +1688,7 @@ int av1_encode_strategy(AV1_COMP *const cpi, size_t *const size,
// This is used in rtc temporal filter case. Use true source in the PSNR
// calculation.
- if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf &&
- cpi->common.current_frame.frame_type != KEY_FRAME) {
+ if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf) {
assert(cpi->orig_source.buffer_alloc_sz > 0);
cpi->source = &cpi->orig_source;
}
@@ -1758,9 +1743,9 @@ int av1_encode_strategy(AV1_COMP *const cpi, size_t *const size,
cpi->svc.temporal_layer_id == 0 &&
cpi->unscaled_source->y_width == cpi->svc.source_last_TL0.y_width &&
cpi->unscaled_source->y_height == cpi->svc.source_last_TL0.y_height) {
- aom_yv12_copy_y(cpi->unscaled_source, &cpi->svc.source_last_TL0);
- aom_yv12_copy_u(cpi->unscaled_source, &cpi->svc.source_last_TL0);
- aom_yv12_copy_v(cpi->unscaled_source, &cpi->svc.source_last_TL0);
+ aom_yv12_copy_y(cpi->unscaled_source, &cpi->svc.source_last_TL0, 1);
+ aom_yv12_copy_u(cpi->unscaled_source, &cpi->svc.source_last_TL0, 1);
+ aom_yv12_copy_v(cpi->unscaled_source, &cpi->svc.source_last_TL0, 1);
}
return AOM_CODEC_OK;
diff --git a/third_party/aom/av1/encoder/encodeframe.c b/third_party/aom/av1/encoder/encodeframe.c
index e2213a8355..a9214f77c2 100644
--- a/third_party/aom/av1/encoder/encodeframe.c
+++ b/third_party/aom/av1/encoder/encodeframe.c
@@ -23,7 +23,7 @@
#include "aom_dsp/binary_codes_writer.h"
#include "aom_ports/mem.h"
#include "aom_ports/aom_timer.h"
-
+#include "aom_util/aom_pthread.h"
#if CONFIG_MISMATCH_DEBUG
#include "aom_util/debug_util.h"
#endif // CONFIG_MISMATCH_DEBUG
@@ -536,8 +536,8 @@ static AOM_INLINE void encode_nonrd_sb(AV1_COMP *cpi, ThreadData *td,
#endif
// Set the partition
if (sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip ||
- (sf->rt_sf.use_fast_fixed_part &&
- x->content_state_sb.source_sad_nonrd < kMedSad)) {
+ (sf->rt_sf.use_fast_fixed_part && x->sb_force_fixed_part == 1 &&
+ !frame_is_intra_only(cm))) {
// set a fixed-size partition
av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
BLOCK_SIZE bsize_select = sf->part_sf.fixed_partition_size;
@@ -1054,8 +1054,13 @@ static AOM_INLINE bool is_calc_src_content_needed(AV1_COMP *cpi,
// The threshold is determined based on kLowSad and kHighSad threshold and
// test results.
- const uint64_t thresh_low = 15000;
- const uint64_t thresh_high = 40000;
+ uint64_t thresh_low = 15000;
+ uint64_t thresh_high = 40000;
+
+ if (cpi->sf.rt_sf.increase_source_sad_thresh) {
+ thresh_low = thresh_low << 1;
+ thresh_high = thresh_high << 1;
+ }
if (avg_64x64_blk_sad > thresh_low && avg_64x64_blk_sad < thresh_high) {
do_calc_src_content = false;
@@ -1203,6 +1208,7 @@ static AOM_INLINE void encode_sb_row(AV1_COMP *cpi, ThreadData *td,
x->sb_me_block = 0;
x->sb_me_partition = 0;
x->sb_me_mv.as_int = 0;
+ x->sb_force_fixed_part = 1;
if (cpi->oxcf.mode == ALLINTRA) {
x->intra_sb_rdmult_modifier = 128;
@@ -1231,7 +1237,7 @@ static AOM_INLINE void encode_sb_row(AV1_COMP *cpi, ThreadData *td,
// Grade the temporal variation of the sb, the grade will be used to decide
// fast mode search strategy for coding blocks
- grade_source_content_sb(cpi, x, tile_data, mi_row, mi_col);
+ if (!seg_skip) grade_source_content_sb(cpi, x, tile_data, mi_row, mi_col);
// encode the superblock
if (use_nonrd_mode) {
@@ -2337,7 +2343,7 @@ void av1_encode_frame(AV1_COMP *cpi) {
// a source or a ref frame should have an image pyramid allocated.
// Check here so that issues can be caught early in debug mode
#if !defined(NDEBUG) && !CONFIG_REALTIME_ONLY
- if (cpi->image_pyramid_levels > 0) {
+ if (cpi->alloc_pyramid) {
assert(cpi->source->y_pyramid);
for (int ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame);
diff --git a/third_party/aom/av1/encoder/encodeframe_utils.c b/third_party/aom/av1/encoder/encodeframe_utils.c
index 949837184a..a8e4a88396 100644
--- a/third_party/aom/av1/encoder/encodeframe_utils.c
+++ b/third_party/aom/av1/encoder/encodeframe_utils.c
@@ -15,6 +15,7 @@
#include "av1/encoder/encoder.h"
#include "av1/encoder/encodeframe_utils.h"
+#include "av1/encoder/encoder_utils.h"
#include "av1/encoder/rdopt.h"
void av1_set_ssim_rdmult(const AV1_COMP *const cpi, int *errorperbit,
@@ -306,6 +307,7 @@ void av1_update_state(const AV1_COMP *const cpi, ThreadData *td,
// Else for cyclic refresh mode update the segment map, set the segment id
// and then update the quantizer.
if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ &&
+ mi_addr->segment_id != AM_SEGMENT_ID_INACTIVE &&
!cpi->rc.rtc_external_ratectrl) {
av1_cyclic_refresh_update_segment(cpi, x, mi_row, mi_col, bsize,
ctx->rd_stats.rate, ctx->rd_stats.dist,
@@ -1431,6 +1433,10 @@ void av1_source_content_sb(AV1_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
if ((tmp_sse - tmp_variance) < (sum_sq_thresh >> 1))
x->content_state_sb.low_sumdiff = 1;
+ if (tmp_sse > ((avg_source_sse_threshold_high * 7) >> 3) &&
+ !x->content_state_sb.lighting_change && !x->content_state_sb.low_sumdiff)
+ x->sb_force_fixed_part = 0;
+
if (!cpi->sf.rt_sf.use_rtc_tf || cpi->rc.high_source_sad ||
cpi->rc.frame_source_sad > 20000 || cpi->svc.number_spatial_layers > 1)
return;
diff --git a/third_party/aom/av1/encoder/encoder.c b/third_party/aom/av1/encoder/encoder.c
index fe053af5cc..1ddbfda08b 100644
--- a/third_party/aom/av1/encoder/encoder.c
+++ b/third_party/aom/av1/encoder/encoder.c
@@ -35,6 +35,7 @@
#include "aom_ports/aom_timer.h"
#include "aom_ports/mem.h"
#include "aom_scale/aom_scale.h"
+#include "aom_util/aom_pthread.h"
#if CONFIG_BITSTREAM_DEBUG
#include "aom_util/debug_util.h"
#endif // CONFIG_BITSTREAM_DEBUG
@@ -152,24 +153,33 @@ int av1_set_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
unsigned char *const active_map_4x4 = cpi->active_map.map;
const int mi_rows = mi_params->mi_rows;
const int mi_cols = mi_params->mi_cols;
- const int row_scale = mi_size_high_log2[BLOCK_16X16];
- const int col_scale = mi_size_wide_log2[BLOCK_16X16];
cpi->active_map.update = 0;
- assert(mi_rows % 2 == 0);
- assert(mi_cols % 2 == 0);
+ cpi->rc.percent_blocks_inactive = 0;
+ assert(mi_rows % 2 == 0 && mi_rows > 0);
+ assert(mi_cols % 2 == 0 && mi_cols > 0);
if (new_map_16x16) {
- for (int r = 0; r < (mi_rows >> row_scale); ++r) {
- for (int c = 0; c < (mi_cols >> col_scale); ++c) {
- const uint8_t val = new_map_16x16[r * cols + c]
+ int num_samples = 0;
+ int num_blocks_inactive = 0;
+ for (int r = 0; r < mi_rows; r += 4) {
+ for (int c = 0; c < mi_cols; c += 4) {
+ const uint8_t val = new_map_16x16[(r >> 2) * cols + (c >> 2)]
? AM_SEGMENT_ID_ACTIVE
: AM_SEGMENT_ID_INACTIVE;
- active_map_4x4[(2 * r + 0) * mi_cols + (c + 0)] = val;
- active_map_4x4[(2 * r + 0) * mi_cols + (c + 1)] = val;
- active_map_4x4[(2 * r + 1) * mi_cols + (c + 0)] = val;
- active_map_4x4[(2 * r + 1) * mi_cols + (c + 1)] = val;
+ num_samples++;
+ if (val == AM_SEGMENT_ID_INACTIVE) num_blocks_inactive++;
+ const int row_max = AOMMIN(4, mi_rows - r);
+ const int col_max = AOMMIN(4, mi_cols - c);
+ for (int x = 0; x < row_max; ++x) {
+ for (int y = 0; y < col_max; ++y) {
+ active_map_4x4[(r + x) * mi_cols + (c + y)] = val;
+ }
+ }
}
}
cpi->active_map.enabled = 1;
+ cpi->active_map.update = 1;
+ cpi->rc.percent_blocks_inactive =
+ (num_blocks_inactive * 100) / num_samples;
}
return 0;
}
@@ -943,14 +953,9 @@ void av1_change_config(struct AV1_COMP *cpi, const AV1EncoderConfig *oxcf,
#if CONFIG_REALTIME_ONLY
assert(!oxcf->tool_cfg.enable_global_motion);
- cpi->image_pyramid_levels = 0;
+ cpi->alloc_pyramid = false;
#else
- if (oxcf->tool_cfg.enable_global_motion) {
- cpi->image_pyramid_levels =
- global_motion_pyr_levels[default_global_motion_method];
- } else {
- cpi->image_pyramid_levels = 0;
- }
+ cpi->alloc_pyramid = oxcf->tool_cfg.enable_global_motion;
#endif // CONFIG_REALTIME_ONLY
}
@@ -2208,7 +2213,7 @@ void av1_set_frame_size(AV1_COMP *cpi, int width, int height) {
&cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0))
+ NULL, cpi->alloc_pyramid, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
@@ -2389,7 +2394,10 @@ static void loopfilter_frame(AV1_COMP *cpi, AV1_COMMON *cm) {
const int use_loopfilter =
is_loopfilter_used(cm) && !cpi->mt_info.pipeline_lpf_mt_with_enc;
- const int use_cdef = is_cdef_used(cm);
+ const int use_cdef =
+ is_cdef_used(cm) && (!cpi->active_map.enabled ||
+ cpi->rc.percent_blocks_inactive <=
+ cpi->sf.rt_sf.thresh_active_maps_skip_lf_cdef);
const int use_superres = av1_superres_scaled(cm);
const int use_restoration = is_restoration_used(cm);
@@ -2498,7 +2506,8 @@ static int encode_without_recode(AV1_COMP *cpi) {
&cpi->svc.source_last_TL0, cpi->oxcf.frm_dim_cfg.width,
cpi->oxcf.frm_dim_cfg.height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0)) {
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false,
+ 0)) {
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate buffer for source_last_TL0");
}
@@ -2547,7 +2556,7 @@ static int encode_without_recode(AV1_COMP *cpi) {
cpi->source = av1_realloc_and_scale_if_required(
cm, unscaled, &cpi->scaled_source, filter_scaler, phase_scaler, true,
- false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
if (frame_is_intra_only(cm) || resize_pending != 0) {
const int current_size =
(cm->mi_params.mi_rows * cm->mi_params.mi_cols) >> 2;
@@ -2570,7 +2579,7 @@ static int encode_without_recode(AV1_COMP *cpi) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source, filter_scaler,
phase_scaler, true, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->alloc_pyramid);
}
if (cpi->sf.rt_sf.use_temporal_noise_estimate) {
@@ -2647,12 +2656,8 @@ static int encode_without_recode(AV1_COMP *cpi) {
av1_setup_frame(cpi);
}
}
-
- if (q_cfg->aq_mode == CYCLIC_REFRESH_AQ) {
- suppress_active_map(cpi);
- av1_cyclic_refresh_setup(cpi);
- }
av1_apply_active_map(cpi);
+ if (q_cfg->aq_mode == CYCLIC_REFRESH_AQ) av1_cyclic_refresh_setup(cpi);
if (cm->seg.enabled) {
if (!cm->seg.update_data && cm->prev_frame) {
segfeatures_copy(&cm->seg, &cm->prev_frame->seg);
@@ -2667,26 +2672,26 @@ static int encode_without_recode(AV1_COMP *cpi) {
cm->cur_frame->seg.enabled = cm->seg.enabled;
// This is for rtc temporal filtering case.
- if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf &&
- cm->current_frame.frame_type != KEY_FRAME) {
+ if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf) {
const SequenceHeader *seq_params = cm->seq_params;
if (cpi->orig_source.buffer_alloc_sz == 0 ||
- cpi->last_source->y_width != cpi->source->y_width ||
- cpi->last_source->y_height != cpi->source->y_height) {
+ cpi->rc.prev_coded_width != cpi->oxcf.frm_dim_cfg.width ||
+ cpi->rc.prev_coded_height != cpi->oxcf.frm_dim_cfg.height) {
// Allocate a source buffer to store the true source for psnr calculation.
if (aom_alloc_frame_buffer(
&cpi->orig_source, cpi->oxcf.frm_dim_cfg.width,
cpi->oxcf.frm_dim_cfg.height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0))
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false,
+ 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate scaled buffer");
}
- aom_yv12_copy_y(cpi->source, &cpi->orig_source);
- aom_yv12_copy_u(cpi->source, &cpi->orig_source);
- aom_yv12_copy_v(cpi->source, &cpi->orig_source);
+ aom_yv12_copy_y(cpi->source, &cpi->orig_source, 1);
+ aom_yv12_copy_u(cpi->source, &cpi->orig_source, 1);
+ aom_yv12_copy_v(cpi->source, &cpi->orig_source, 1);
}
#if CONFIG_COLLECT_COMPONENT_TIMING
@@ -2725,9 +2730,9 @@ static int encode_without_recode(AV1_COMP *cpi) {
(cm->width != cpi->unscaled_source->y_crop_width ||
cm->height != cpi->unscaled_source->y_crop_height)) {
cpi->scaled_last_source_available = 1;
- aom_yv12_copy_y(&cpi->scaled_source, &cpi->scaled_last_source);
- aom_yv12_copy_u(&cpi->scaled_source, &cpi->scaled_last_source);
- aom_yv12_copy_v(&cpi->scaled_source, &cpi->scaled_last_source);
+ aom_yv12_copy_y(&cpi->scaled_source, &cpi->scaled_last_source, 1);
+ aom_yv12_copy_u(&cpi->scaled_source, &cpi->scaled_last_source, 1);
+ aom_yv12_copy_v(&cpi->scaled_source, &cpi->scaled_last_source, 1);
}
#if CONFIG_COLLECT_COMPONENT_TIMING
@@ -2846,7 +2851,7 @@ static int encode_with_recode_loop(AV1_COMP *cpi, size_t *size, uint8_t *dest) {
}
cpi->source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_source, &cpi->scaled_source, EIGHTTAP_REGULAR, 0,
- false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ false, false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
#if CONFIG_TUNE_BUTTERAUGLI
if (oxcf->tune_cfg.tuning == AOM_TUNE_BUTTERAUGLI) {
@@ -2866,7 +2871,7 @@ static int encode_with_recode_loop(AV1_COMP *cpi, size_t *size, uint8_t *dest) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
EIGHTTAP_REGULAR, 0, false, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->alloc_pyramid);
}
int scale_references = 0;
@@ -4042,7 +4047,7 @@ int av1_encode(AV1_COMP *const cpi, uint8_t *const dest,
}
#if CONFIG_DENOISE
-static int apply_denoise_2d(AV1_COMP *cpi, YV12_BUFFER_CONFIG *sd,
+static int apply_denoise_2d(AV1_COMP *cpi, const YV12_BUFFER_CONFIG *sd,
int block_size, float noise_level,
int64_t time_stamp, int64_t end_time) {
AV1_COMMON *const cm = &cpi->common;
@@ -4077,7 +4082,7 @@ static int apply_denoise_2d(AV1_COMP *cpi, YV12_BUFFER_CONFIG *sd,
#endif
int av1_receive_raw_frame(AV1_COMP *cpi, aom_enc_frame_flags_t frame_flags,
- YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ const YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
int64_t end_time) {
AV1_COMMON *const cm = &cpi->common;
const SequenceHeader *const seq_params = cm->seq_params;
@@ -4139,8 +4144,7 @@ int av1_receive_raw_frame(AV1_COMP *cpi, aom_enc_frame_flags_t frame_flags,
#endif // CONFIG_DENOISE
if (av1_lookahead_push(cpi->ppi->lookahead, sd, time_stamp, end_time,
- use_highbitdepth, cpi->image_pyramid_levels,
- frame_flags)) {
+ use_highbitdepth, cpi->alloc_pyramid, frame_flags)) {
aom_set_error(cm->error, AOM_CODEC_ERROR, "av1_lookahead_push() failed");
res = -1;
}
diff --git a/third_party/aom/av1/encoder/encoder.h b/third_party/aom/av1/encoder/encoder.h
index e87ab9be1f..4de5d426ce 100644
--- a/third_party/aom/av1/encoder/encoder.h
+++ b/third_party/aom/av1/encoder/encoder.h
@@ -21,6 +21,7 @@
#include "config/aom_config.h"
#include "aom/aomcx.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/alloccommon.h"
#include "av1/common/av1_common_int.h"
@@ -3631,10 +3632,10 @@ typedef struct AV1_COMP {
unsigned int zeromv_skip_thresh_exit_part[BLOCK_SIZES_ALL];
/*!
- * Number of downsampling pyramid levels to allocate for each frame
+ * Should we allocate a downsampling pyramid for each frame buffer?
* This is currently only used for global motion
*/
- int image_pyramid_levels;
+ bool alloc_pyramid;
#if CONFIG_SALIENCY_MAP
/*!
@@ -3808,7 +3809,7 @@ int av1_init_parallel_frame_context(const AV1_COMP_DATA *const first_cpi_data,
* copy of the pointer.
*/
int av1_receive_raw_frame(AV1_COMP *cpi, aom_enc_frame_flags_t frame_flags,
- YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ const YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
int64_t end_time_stamp);
/*!\brief Encode a frame
@@ -4310,7 +4311,7 @@ static AOM_INLINE int is_psnr_calc_enabled(const AV1_COMP *cpi) {
const AV1_COMMON *const cm = &cpi->common;
return cpi->ppi->b_calculate_psnr && !is_stat_generation_stage(cpi) &&
- cm->show_frame;
+ cm->show_frame && !cpi->is_dropped_frame;
}
static INLINE int is_frame_resize_pending(const AV1_COMP *const cpi) {
diff --git a/third_party/aom/av1/encoder/encoder_alloc.h b/third_party/aom/av1/encoder/encoder_alloc.h
index ce48496d48..f24d4b0a10 100644
--- a/third_party/aom/av1/encoder/encoder_alloc.h
+++ b/third_party/aom/av1/encoder/encoder_alloc.h
@@ -439,8 +439,7 @@ static AOM_INLINE YV12_BUFFER_CONFIG *realloc_and_scale_source(
&cpi->scaled_source, scaled_width, scaled_height,
cm->seq_params->subsampling_x, cm->seq_params->subsampling_y,
cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS,
- cm->features.byte_alignment, NULL, NULL, NULL,
- cpi->image_pyramid_levels, 0))
+ cm->features.byte_alignment, NULL, NULL, NULL, cpi->alloc_pyramid, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to reallocate scaled source buffer");
assert(cpi->scaled_source.y_crop_width == scaled_width);
diff --git a/third_party/aom/av1/encoder/encoder_utils.c b/third_party/aom/av1/encoder/encoder_utils.c
index c35873d207..1f81a530c9 100644
--- a/third_party/aom/av1/encoder/encoder_utils.c
+++ b/third_party/aom/av1/encoder/encoder_utils.c
@@ -9,8 +9,11 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+#include <string.h>
+
#include "aom/aomcx.h"
+#include "av1/common/av1_common_int.h"
#include "av1/encoder/bitstream.h"
#include "av1/encoder/encodeframe.h"
#include "av1/encoder/encoder.h"
@@ -421,11 +424,13 @@ void av1_apply_active_map(AV1_COMP *cpi) {
struct segmentation *const seg = &cpi->common.seg;
unsigned char *const seg_map = cpi->enc_seg.map;
const unsigned char *const active_map = cpi->active_map.map;
- int i;
assert(AM_SEGMENT_ID_ACTIVE == CR_SEGMENT_ID_BASE);
- if (frame_is_intra_only(&cpi->common)) {
+ // Disable the active_maps on intra_only frames or if the
+ // input map for the current frame has no inactive blocks.
+ if (frame_is_intra_only(&cpi->common) ||
+ cpi->rc.percent_blocks_inactive == 0) {
cpi->active_map.enabled = 0;
cpi->active_map.update = 1;
}
@@ -434,8 +439,7 @@ void av1_apply_active_map(AV1_COMP *cpi) {
if (cpi->active_map.enabled) {
const int num_mis =
cpi->common.mi_params.mi_rows * cpi->common.mi_params.mi_cols;
- for (i = 0; i < num_mis; ++i)
- if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
+ memcpy(seg_map, active_map, sizeof(active_map[0]) * num_mis);
av1_enable_segmentation(seg);
av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF_Y_H);
@@ -725,7 +729,7 @@ void av1_scale_references(AV1_COMP *cpi, const InterpFilter filter,
RefCntBuffer *ref_fb = get_ref_frame_buf(cm, ref_frame);
if (aom_yv12_realloc_with_new_border(
&ref_fb->buf, AOM_BORDER_IN_PIXELS,
- cm->features.byte_alignment, cpi->image_pyramid_levels,
+ cm->features.byte_alignment, cpi->alloc_pyramid,
num_planes) != 0) {
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
@@ -749,7 +753,7 @@ void av1_scale_references(AV1_COMP *cpi, const InterpFilter filter,
&new_fb->buf, cm->width, cm->height,
cm->seq_params->subsampling_x, cm->seq_params->subsampling_y,
cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS,
- cm->features.byte_alignment, NULL, NULL, NULL, 0, 0)) {
+ cm->features.byte_alignment, NULL, NULL, NULL, false, 0)) {
if (force_scaling) {
// Release the reference acquired in the get_free_fb() call above.
--new_fb->ref_count;
@@ -1087,12 +1091,12 @@ void av1_determine_sc_tools_with_encoding(AV1_COMP *cpi, const int q_orig) {
cpi->source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_source, &cpi->scaled_source, cm->features.interp_filter,
- 0, false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ 0, false, false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
if (cpi->unscaled_last_source != NULL) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
cm->features.interp_filter, 0, false, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->alloc_pyramid);
}
av1_setup_frame(cpi);
diff --git a/third_party/aom/av1/encoder/encodetxb.c b/third_party/aom/av1/encoder/encodetxb.c
index 5fe2a497c7..701c5489fe 100644
--- a/third_party/aom/av1/encoder/encodetxb.c
+++ b/third_party/aom/av1/encoder/encodetxb.c
@@ -134,14 +134,14 @@ int av1_get_eob_pos_token(const int eob, int *const extra) {
}
#if CONFIG_ENTROPY_STATS
-void av1_update_eob_context(int cdf_idx, int eob, TX_SIZE tx_size,
- TX_CLASS tx_class, PLANE_TYPE plane,
- FRAME_CONTEXT *ec_ctx, FRAME_COUNTS *counts,
- uint8_t allow_update_cdf) {
+static void update_eob_context(int cdf_idx, int eob, TX_SIZE tx_size,
+ TX_CLASS tx_class, PLANE_TYPE plane,
+ FRAME_CONTEXT *ec_ctx, FRAME_COUNTS *counts,
+ uint8_t allow_update_cdf) {
#else
-void av1_update_eob_context(int eob, TX_SIZE tx_size, TX_CLASS tx_class,
- PLANE_TYPE plane, FRAME_CONTEXT *ec_ctx,
- uint8_t allow_update_cdf) {
+static void update_eob_context(int eob, TX_SIZE tx_size, TX_CLASS tx_class,
+ PLANE_TYPE plane, FRAME_CONTEXT *ec_ctx,
+ uint8_t allow_update_cdf) {
#endif
int eob_extra;
const int eob_pt = av1_get_eob_pos_token(eob, &eob_extra);
@@ -623,11 +623,11 @@ void av1_update_and_record_txb_context(int plane, int block, int blk_row,
td->rd_counts.tx_type_used[tx_size][tx_type]++;
#if CONFIG_ENTROPY_STATS
- av1_update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx,
- td->counts, allow_update_cdf);
+ update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx,
+ td->counts, allow_update_cdf);
#else
- av1_update_eob_context(eob, tx_size, tx_class, plane_type, ec_ctx,
- allow_update_cdf);
+ update_eob_context(eob, tx_size, tx_class, plane_type, ec_ctx,
+ allow_update_cdf);
#endif
DECLARE_ALIGNED(16, int8_t, coeff_contexts[MAX_TX_SQUARE]);
@@ -785,8 +785,8 @@ void av1_record_txb_context(int plane, int block, int blk_row, int blk_col,
#if CONFIG_ENTROPY_STATS
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
- av1_update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx,
- td->counts, 0 /*allow_update_cdf*/);
+ update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx,
+ td->counts, 0 /*allow_update_cdf*/);
DECLARE_ALIGNED(16, int8_t, coeff_contexts[MAX_TX_SQUARE]);
av1_get_nz_map_contexts(levels, scan, eob, tx_size, tx_class,
diff --git a/third_party/aom/av1/encoder/ethread.c b/third_party/aom/av1/encoder/ethread.c
index d6a806d504..755535ba51 100644
--- a/third_party/aom/av1/encoder/ethread.c
+++ b/third_party/aom/av1/encoder/ethread.c
@@ -12,6 +12,8 @@
#include <assert.h>
#include <stdbool.h>
+#include "aom_util/aom_pthread.h"
+
#include "av1/common/warped_motion.h"
#include "av1/common/thread_common.h"
@@ -1415,7 +1417,7 @@ static AOM_INLINE void sync_fpmt_workers(AV1_PRIMARY *ppi,
int num_workers = ppi->p_mt_info.p_num_workers;
int had_error = 0;
// Points to error in the earliest display order frame in the parallel set.
- const struct aom_internal_error_info *error;
+ const struct aom_internal_error_info *error = NULL;
// Encoding ends.
for (int i = num_workers - 1; i >= 0; --i) {
@@ -2227,8 +2229,8 @@ void av1_tpl_dealloc(AV1TplRowMultiThreadSync *tpl_sync) {
}
// Allocate memory for tpl row synchronization.
-void av1_tpl_alloc(AV1TplRowMultiThreadSync *tpl_sync, AV1_COMMON *cm,
- int mb_rows) {
+static void av1_tpl_alloc(AV1TplRowMultiThreadSync *tpl_sync, AV1_COMMON *cm,
+ int mb_rows) {
tpl_sync->rows = mb_rows;
#if CONFIG_MULTITHREAD
{
diff --git a/third_party/aom/av1/encoder/firstpass.c b/third_party/aom/av1/encoder/firstpass.c
index e20b6c177e..b94a50714a 100644
--- a/third_party/aom/av1/encoder/firstpass.c
+++ b/third_party/aom/av1/encoder/firstpass.c
@@ -22,6 +22,7 @@
#include "aom_ports/mem.h"
#include "aom_scale/aom_scale.h"
#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/entropymv.h"
#include "av1/common/quant_common.h"
diff --git a/third_party/aom/av1/encoder/global_motion.c b/third_party/aom/av1/encoder/global_motion.c
index 73910de121..0ae47809c6 100644
--- a/third_party/aom/av1/encoder/global_motion.c
+++ b/third_party/aom/av1/encoder/global_motion.c
@@ -30,83 +30,6 @@
// Border over which to compute the global motion
#define ERRORADV_BORDER 0
-/* clang-format off */
-// Error metric used for global motion evaluation.
-// For 8-bit input, the pixel error used to index this table will always
-// be between -255 and +255. But for 10- and 12-bit input, we use interpolation
-// which means that we need to support indices of -256 and +256 as well.
-// Therefore, the table is offset so that logical index 0 corresponds to
-// error_measure_lut[256].
-const int error_measure_lut[513] = {
- // pow 0.7
- 16384, 16384, 16339, 16294, 16249, 16204, 16158, 16113,
- 16068, 16022, 15977, 15932, 15886, 15840, 15795, 15749,
- 15703, 15657, 15612, 15566, 15520, 15474, 15427, 15381,
- 15335, 15289, 15242, 15196, 15149, 15103, 15056, 15010,
- 14963, 14916, 14869, 14822, 14775, 14728, 14681, 14634,
- 14587, 14539, 14492, 14445, 14397, 14350, 14302, 14254,
- 14206, 14159, 14111, 14063, 14015, 13967, 13918, 13870,
- 13822, 13773, 13725, 13676, 13628, 13579, 13530, 13481,
- 13432, 13383, 13334, 13285, 13236, 13187, 13137, 13088,
- 13038, 12988, 12939, 12889, 12839, 12789, 12739, 12689,
- 12639, 12588, 12538, 12487, 12437, 12386, 12335, 12285,
- 12234, 12183, 12132, 12080, 12029, 11978, 11926, 11875,
- 11823, 11771, 11719, 11667, 11615, 11563, 11511, 11458,
- 11406, 11353, 11301, 11248, 11195, 11142, 11089, 11036,
- 10982, 10929, 10875, 10822, 10768, 10714, 10660, 10606,
- 10552, 10497, 10443, 10388, 10333, 10279, 10224, 10168,
- 10113, 10058, 10002, 9947, 9891, 9835, 9779, 9723,
- 9666, 9610, 9553, 9497, 9440, 9383, 9326, 9268,
- 9211, 9153, 9095, 9037, 8979, 8921, 8862, 8804,
- 8745, 8686, 8627, 8568, 8508, 8449, 8389, 8329,
- 8269, 8208, 8148, 8087, 8026, 7965, 7903, 7842,
- 7780, 7718, 7656, 7593, 7531, 7468, 7405, 7341,
- 7278, 7214, 7150, 7086, 7021, 6956, 6891, 6826,
- 6760, 6695, 6628, 6562, 6495, 6428, 6361, 6293,
- 6225, 6157, 6089, 6020, 5950, 5881, 5811, 5741,
- 5670, 5599, 5527, 5456, 5383, 5311, 5237, 5164,
- 5090, 5015, 4941, 4865, 4789, 4713, 4636, 4558,
- 4480, 4401, 4322, 4242, 4162, 4080, 3998, 3916,
- 3832, 3748, 3663, 3577, 3490, 3402, 3314, 3224,
- 3133, 3041, 2948, 2854, 2758, 2661, 2562, 2461,
- 2359, 2255, 2148, 2040, 1929, 1815, 1698, 1577,
- 1452, 1323, 1187, 1045, 894, 731, 550, 339,
- 0, 339, 550, 731, 894, 1045, 1187, 1323,
- 1452, 1577, 1698, 1815, 1929, 2040, 2148, 2255,
- 2359, 2461, 2562, 2661, 2758, 2854, 2948, 3041,
- 3133, 3224, 3314, 3402, 3490, 3577, 3663, 3748,
- 3832, 3916, 3998, 4080, 4162, 4242, 4322, 4401,
- 4480, 4558, 4636, 4713, 4789, 4865, 4941, 5015,
- 5090, 5164, 5237, 5311, 5383, 5456, 5527, 5599,
- 5670, 5741, 5811, 5881, 5950, 6020, 6089, 6157,
- 6225, 6293, 6361, 6428, 6495, 6562, 6628, 6695,
- 6760, 6826, 6891, 6956, 7021, 7086, 7150, 7214,
- 7278, 7341, 7405, 7468, 7531, 7593, 7656, 7718,
- 7780, 7842, 7903, 7965, 8026, 8087, 8148, 8208,
- 8269, 8329, 8389, 8449, 8508, 8568, 8627, 8686,
- 8745, 8804, 8862, 8921, 8979, 9037, 9095, 9153,
- 9211, 9268, 9326, 9383, 9440, 9497, 9553, 9610,
- 9666, 9723, 9779, 9835, 9891, 9947, 10002, 10058,
- 10113, 10168, 10224, 10279, 10333, 10388, 10443, 10497,
- 10552, 10606, 10660, 10714, 10768, 10822, 10875, 10929,
- 10982, 11036, 11089, 11142, 11195, 11248, 11301, 11353,
- 11406, 11458, 11511, 11563, 11615, 11667, 11719, 11771,
- 11823, 11875, 11926, 11978, 12029, 12080, 12132, 12183,
- 12234, 12285, 12335, 12386, 12437, 12487, 12538, 12588,
- 12639, 12689, 12739, 12789, 12839, 12889, 12939, 12988,
- 13038, 13088, 13137, 13187, 13236, 13285, 13334, 13383,
- 13432, 13481, 13530, 13579, 13628, 13676, 13725, 13773,
- 13822, 13870, 13918, 13967, 14015, 14063, 14111, 14159,
- 14206, 14254, 14302, 14350, 14397, 14445, 14492, 14539,
- 14587, 14634, 14681, 14728, 14775, 14822, 14869, 14916,
- 14963, 15010, 15056, 15103, 15149, 15196, 15242, 15289,
- 15335, 15381, 15427, 15474, 15520, 15566, 15612, 15657,
- 15703, 15749, 15795, 15840, 15886, 15932, 15977, 16022,
- 16068, 16113, 16158, 16204, 16249, 16294, 16339, 16384,
- 16384,
-};
-/* clang-format on */
-
int av1_is_enough_erroradvantage(double best_erroradvantage, int params_cost) {
return best_erroradvantage < erroradv_tr &&
best_erroradvantage * params_cost < erroradv_prod_tr;
@@ -541,6 +464,11 @@ int64_t av1_refine_integerized_param(
}
wm->wmtype = get_wmtype(wm);
+ // Recompute shear params for the refined model
+ // This should never fail, because we only ever consider warp-able models
+ if (!av1_get_shear_params(wm)) {
+ assert(0);
+ }
return best_error;
}
diff --git a/third_party/aom/av1/encoder/global_motion.h b/third_party/aom/av1/encoder/global_motion.h
index 8c9c60f0f5..de46a0e1f2 100644
--- a/third_party/aom/av1/encoder/global_motion.h
+++ b/third_party/aom/av1/encoder/global_motion.h
@@ -15,6 +15,7 @@
#include "aom/aom_integer.h"
#include "aom_dsp/flow_estimation/flow_estimation.h"
#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "aom_util/aom_thread.h"
#ifdef __cplusplus
@@ -97,37 +98,6 @@ void av1_compute_feature_segmentation_map(uint8_t *segment_map, int width,
int height, int *inliers,
int num_inliers);
-extern const int error_measure_lut[513];
-
-static INLINE int error_measure(int err) {
- return error_measure_lut[256 + err];
-}
-
-#if CONFIG_AV1_HIGHBITDEPTH
-static INLINE int highbd_error_measure(int err, int bd) {
- const int b = bd - 8;
- const int bmask = (1 << b) - 1;
- const int v = (1 << b);
-
- // Split error into two parts and do an interpolated table lookup
- // To compute the table index and interpolation value, we want to calculate
- // the quotient and remainder of err / 2^b. But it is very important that
- // the division must round down, and the remainder must be positive,
- // ie. in the range [0, 2^b).
- //
- // In C, the >> and & operators do what we want, but the / and % operators
- // give the wrong results for negative inputs. So we must use >> and & here.
- //
- // For example, if bd == 10 and err == -5, compare the results:
- // (-5) >> 2 = -2, (-5) & 3 = 3
- // vs. (-5) / 4 = -1, (-5) % 4 = -1
- const int e1 = err >> b;
- const int e2 = err & bmask;
- return error_measure_lut[256 + e1] * (v - e2) +
- error_measure_lut[257 + e1] * e2;
-}
-#endif // CONFIG_AV1_HIGHBITDEPTH
-
int64_t av1_segmented_frame_error(int use_hbd, int bd, const uint8_t *ref,
int ref_stride, uint8_t *dst, int dst_stride,
int p_width, int p_height,
diff --git a/third_party/aom/av1/encoder/global_motion_facade.c b/third_party/aom/av1/encoder/global_motion_facade.c
index 02a4e70ed3..687eeee18a 100644
--- a/third_party/aom/av1/encoder/global_motion_facade.c
+++ b/third_party/aom/av1/encoder/global_motion_facade.c
@@ -89,6 +89,7 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
assert(ref_buf[frame] != NULL);
int bit_depth = cpi->common.seq_params->bit_depth;
GlobalMotionMethod global_motion_method = default_global_motion_method;
+ int downsample_level = cpi->sf.gm_sf.downsample_level;
int num_refinements = cpi->sf.gm_sf.num_refinement_steps;
bool mem_alloc_failed = false;
@@ -99,9 +100,10 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
double best_erroradv = erroradv_tr;
for (TransformationType model = FIRST_GLOBAL_TRANS_TYPE;
model <= LAST_GLOBAL_TRANS_TYPE; ++model) {
- if (!aom_compute_global_motion(
- model, cpi->source, ref_buf[frame], bit_depth, global_motion_method,
- motion_models, RANSAC_NUM_MOTIONS, &mem_alloc_failed)) {
+ if (!aom_compute_global_motion(model, cpi->source, ref_buf[frame],
+ bit_depth, global_motion_method,
+ downsample_level, motion_models,
+ RANSAC_NUM_MOTIONS, &mem_alloc_failed)) {
if (mem_alloc_failed) {
aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate global motion buffers");
@@ -115,6 +117,9 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
WarpedMotionParams tmp_wm_params;
av1_convert_model_to_params(motion_models[i].params, &tmp_wm_params);
+ // Check that the generated model is warp-able
+ if (!av1_get_shear_params(&tmp_wm_params)) continue;
+
// Skip models that we won't use (IDENTITY or TRANSLATION)
//
// For IDENTITY type models, we don't need to evaluate anything because
@@ -151,6 +156,14 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
double erroradvantage = (double)warp_error / ref_frame_error;
+ // Check that the model signaling cost is not too high
+ if (!av1_is_enough_erroradvantage(
+ erroradvantage,
+ gm_get_params_cost(&tmp_wm_params, ref_params,
+ cm->features.allow_high_precision_mv))) {
+ continue;
+ }
+
if (erroradvantage < best_erroradv) {
best_erroradv = erroradvantage;
// Save the wm_params modified by
@@ -161,34 +174,6 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
}
}
}
-
- if (!av1_get_shear_params(&cm->global_motion[frame]))
- cm->global_motion[frame] = default_warp_params;
-
-#if 0
- // We never choose translational models, so this code is disabled
- if (cm->global_motion[frame].wmtype == TRANSLATION) {
- cm->global_motion[frame].wmmat[0] =
- convert_to_trans_prec(cm->features.allow_high_precision_mv,
- cm->global_motion[frame].wmmat[0]) *
- GM_TRANS_ONLY_DECODE_FACTOR;
- cm->global_motion[frame].wmmat[1] =
- convert_to_trans_prec(cm->features.allow_high_precision_mv,
- cm->global_motion[frame].wmmat[1]) *
- GM_TRANS_ONLY_DECODE_FACTOR;
- }
-#endif
-
- if (cm->global_motion[frame].wmtype == IDENTITY) return;
-
- // If the best error advantage found doesn't meet the threshold for
- // this motion type, revert to IDENTITY.
- if (!av1_is_enough_erroradvantage(
- best_erroradv,
- gm_get_params_cost(&cm->global_motion[frame], ref_params,
- cm->features.allow_high_precision_mv))) {
- cm->global_motion[frame] = default_warp_params;
- }
}
// Computes global motion for the given reference frame.
diff --git a/third_party/aom/av1/encoder/k_means_template.h b/third_party/aom/av1/encoder/k_means_template.h
index 4be2038a6f..239029345d 100644
--- a/third_party/aom/av1/encoder/k_means_template.h
+++ b/third_party/aom/av1/encoder/k_means_template.h
@@ -24,6 +24,9 @@
#define RENAME_(x, y) AV1_K_MEANS_RENAME(x, y)
#define RENAME(x) RENAME_(x, AV1_K_MEANS_DIM)
+#define K_MEANS_RENAME_C(x, y) x##_dim##y##_c
+#define RENAME_C_(x, y) K_MEANS_RENAME_C(x, y)
+#define RENAME_C(x) RENAME_C_(x, AV1_K_MEANS_DIM)
// Though we want to compute the smallest L2 norm, in 1 dimension,
// it is equivalent to find the smallest L1 norm and then square it.
@@ -41,8 +44,8 @@ static int RENAME(calc_dist)(const int16_t *p1, const int16_t *p2) {
#endif
}
-void RENAME(av1_calc_indices)(const int16_t *data, const int16_t *centroids,
- uint8_t *indices, int64_t *dist, int n, int k) {
+void RENAME_C(av1_calc_indices)(const int16_t *data, const int16_t *centroids,
+ uint8_t *indices, int64_t *dist, int n, int k) {
if (dist) {
*dist = 0;
}
@@ -149,3 +152,6 @@ void RENAME(av1_k_means)(const int16_t *data, int16_t *centroids,
}
#undef RENAME_
#undef RENAME
+#undef K_MEANS_RENAME_C
+#undef RENAME_C_
+#undef RENAME_C
diff --git a/third_party/aom/av1/encoder/lookahead.c b/third_party/aom/av1/encoder/lookahead.c
index 9ef9b88675..476c91ab95 100644
--- a/third_party/aom/av1/encoder/lookahead.c
+++ b/third_party/aom/av1/encoder/lookahead.c
@@ -46,7 +46,7 @@ struct lookahead_ctx *av1_lookahead_init(
unsigned int width, unsigned int height, unsigned int subsampling_x,
unsigned int subsampling_y, int use_highbitdepth, unsigned int depth,
const int border_in_pixels, int byte_alignment, int num_lap_buffers,
- bool is_all_intra, int num_pyramid_levels) {
+ bool is_all_intra, bool alloc_pyramid) {
int lag_in_frames = AOMMAX(1, depth);
// For all-intra frame encoding, previous source frames are not required.
@@ -82,7 +82,7 @@ struct lookahead_ctx *av1_lookahead_init(
if (aom_realloc_frame_buffer(
&ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
use_highbitdepth, border_in_pixels, byte_alignment, NULL, NULL,
- NULL, num_pyramid_levels, 0)) {
+ NULL, alloc_pyramid, 0)) {
goto fail;
}
}
@@ -100,7 +100,7 @@ int av1_lookahead_full(const struct lookahead_ctx *ctx) {
int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
int64_t ts_start, int64_t ts_end, int use_highbitdepth,
- int num_pyramid_levels, aom_enc_frame_flags_t flags) {
+ bool alloc_pyramid, aom_enc_frame_flags_t flags) {
int width = src->y_crop_width;
int height = src->y_crop_height;
int uv_width = src->uv_crop_width;
@@ -124,9 +124,9 @@ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
height != buf->img.y_crop_height ||
uv_width != buf->img.uv_crop_width ||
uv_height != buf->img.uv_crop_height;
- larger_dimensions = width > buf->img.y_width || height > buf->img.y_height ||
- uv_width > buf->img.uv_width ||
- uv_height > buf->img.uv_height;
+ larger_dimensions =
+ width > buf->img.y_crop_width || height > buf->img.y_crop_height ||
+ uv_width > buf->img.uv_crop_width || uv_height > buf->img.uv_crop_height;
assert(!larger_dimensions || new_dimensions);
if (larger_dimensions) {
@@ -134,11 +134,15 @@ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
memset(&new_img, 0, sizeof(new_img));
if (aom_alloc_frame_buffer(&new_img, width, height, subsampling_x,
subsampling_y, use_highbitdepth,
- AOM_BORDER_IN_PIXELS, 0, num_pyramid_levels, 0))
+ AOM_BORDER_IN_PIXELS, 0, alloc_pyramid, 0))
return 1;
aom_free_frame_buffer(&buf->img);
buf->img = new_img;
} else if (new_dimensions) {
+ buf->img.y_width = src->y_width;
+ buf->img.y_height = src->y_height;
+ buf->img.uv_width = src->uv_width;
+ buf->img.uv_height = src->uv_height;
buf->img.y_crop_width = src->y_crop_width;
buf->img.y_crop_height = src->y_crop_height;
buf->img.uv_crop_width = src->uv_crop_width;
@@ -146,7 +150,6 @@ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
buf->img.subsampling_x = src->subsampling_x;
buf->img.subsampling_y = src->subsampling_y;
}
- // Partial copy not implemented yet
av1_copy_and_extend_frame(src, &buf->img);
buf->ts_start = ts_start;
diff --git a/third_party/aom/av1/encoder/lookahead.h b/third_party/aom/av1/encoder/lookahead.h
index c0e6d222f5..41eca87fa3 100644
--- a/third_party/aom/av1/encoder/lookahead.h
+++ b/third_party/aom/av1/encoder/lookahead.h
@@ -70,7 +70,7 @@ struct lookahead_ctx *av1_lookahead_init(
unsigned int width, unsigned int height, unsigned int subsampling_x,
unsigned int subsampling_y, int use_highbitdepth, unsigned int depth,
const int border_in_pixels, int byte_alignment, int num_lap_buffers,
- bool is_all_intra, int num_pyramid_levels);
+ bool is_all_intra, bool alloc_pyramid);
/**\brief Destroys the lookahead stage
*/
@@ -85,18 +85,18 @@ int av1_lookahead_full(const struct lookahead_ctx *ctx);
* This function will copy the source image into a new framebuffer with
* the expected stride/border.
*
- * \param[in] ctx Pointer to the lookahead context
- * \param[in] src Pointer to the image to enqueue
- * \param[in] ts_start Timestamp for the start of this frame
- * \param[in] ts_end Timestamp for the end of this frame
- * \param[in] use_highbitdepth Tell if HBD is used
- * \param[in] num_pyramid_levels Number of pyramid levels to allocate
- for each frame buffer
- * \param[in] flags Flags set on this frame
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] src Pointer to the image to enqueue
+ * \param[in] ts_start Timestamp for the start of this frame
+ * \param[in] ts_end Timestamp for the end of this frame
+ * \param[in] use_highbitdepth Tell if HBD is used
+ * \param[in] alloc_pyramid Whether to allocate a downsampling pyramid
+ * for each frame buffer
+ * \param[in] flags Flags set on this frame
*/
int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
int64_t ts_start, int64_t ts_end, int use_highbitdepth,
- int num_pyramid_levels, aom_enc_frame_flags_t flags);
+ bool alloc_pyramid, aom_enc_frame_flags_t flags);
/**\brief Get the next source buffer to encode
*
diff --git a/third_party/aom/av1/encoder/nonrd_pickmode.c b/third_party/aom/av1/encoder/nonrd_pickmode.c
index f939b6d1fa..57c74f66d5 100644
--- a/third_party/aom/av1/encoder/nonrd_pickmode.c
+++ b/third_party/aom/av1/encoder/nonrd_pickmode.c
@@ -2357,6 +2357,10 @@ static AOM_FORCE_INLINE bool skip_inter_mode_nonrd(
*ref_frame2 = NONE_FRAME;
}
+ if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP) &&
+ (*this_mode != GLOBALMV || *ref_frame != LAST_FRAME))
+ return true;
+
if (x->sb_me_block && *ref_frame == LAST_FRAME) {
// We want to make sure to test the superblock MV:
// so don't skip (return false) for NEAREST_LAST or NEAR_LAST if they
@@ -3241,7 +3245,8 @@ void av1_nonrd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
inter_pred_params_sr.conv_params =
get_conv_params(/*do_average=*/0, AOM_PLANE_Y, xd->bd);
- x->block_is_zero_sad = x->content_state_sb.source_sad_nonrd == kZeroSad;
+ x->block_is_zero_sad = x->content_state_sb.source_sad_nonrd == kZeroSad ||
+ segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP);
if (cpi->oxcf.tune_cfg.content == AOM_CONTENT_SCREEN &&
!x->force_zeromv_skip_for_blk &&
x->content_state_sb.source_sad_nonrd != kZeroSad &&
diff --git a/third_party/aom/av1/encoder/palette.c b/third_party/aom/av1/encoder/palette.c
index 7f79e9596e..45b56199c6 100644
--- a/third_party/aom/av1/encoder/palette.c
+++ b/third_party/aom/av1/encoder/palette.c
@@ -480,7 +480,7 @@ struct ColorCount {
int count;
};
-int color_count_comp(const void *c1, const void *c2) {
+static int color_count_comp(const void *c1, const void *c2) {
const struct ColorCount *color_count1 = (const struct ColorCount *)c1;
const struct ColorCount *color_count2 = (const struct ColorCount *)c2;
if (color_count1->count > color_count2->count) return -1;
diff --git a/third_party/aom/av1/encoder/palette.h b/third_party/aom/av1/encoder/palette.h
index 7da863a0cc..30886d37ae 100644
--- a/third_party/aom/av1/encoder/palette.h
+++ b/third_party/aom/av1/encoder/palette.h
@@ -26,7 +26,7 @@ struct PICK_MODE_CONTEXT;
struct macroblock;
/*!\cond */
-#define AV1_K_MEANS_RENAME(func, dim) func##_dim##dim##_c
+#define AV1_K_MEANS_RENAME(func, dim) func##_dim##dim
void AV1_K_MEANS_RENAME(av1_k_means, 1)(const int16_t *data, int16_t *centroids,
uint8_t *indices, int n, int k,
diff --git a/third_party/aom/av1/encoder/partition_search.c b/third_party/aom/av1/encoder/partition_search.c
index 1c17b09ee1..61d49a23f2 100644
--- a/third_party/aom/av1/encoder/partition_search.c
+++ b/third_party/aom/av1/encoder/partition_search.c
@@ -2144,8 +2144,9 @@ static void encode_b_nonrd(const AV1_COMP *const cpi, TileDataEnc *tile_data,
}
if (tile_data->allow_update_cdf) update_stats(&cpi->common, td);
}
- if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ && mbmi->skip_txfm &&
- !cpi->rc.rtc_external_ratectrl && cm->seg.enabled)
+ if ((cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ ||
+ cpi->active_map.enabled) &&
+ mbmi->skip_txfm && !cpi->rc.rtc_external_ratectrl && cm->seg.enabled)
av1_cyclic_reset_segment_skip(cpi, x, mi_row, mi_col, bsize, dry_run);
// TODO(Ravi/Remya): Move this copy function to a better logical place
// This function will copy the best mode information from block
@@ -2254,6 +2255,8 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data,
const AQ_MODE aq_mode = cpi->oxcf.q_cfg.aq_mode;
TxfmSearchInfo *txfm_info = &x->txfm_search_info;
int i;
+ const int seg_skip =
+ segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
// This is only needed for real time/allintra row-mt enabled multi-threaded
// encoding with cost update frequency set to COST_UPD_TILE/COST_UPD_OFF.
@@ -2276,15 +2279,17 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data,
}
for (i = 0; i < 2; ++i) pd[i].color_index_map = ctx->color_index_map[i];
- x->force_zeromv_skip_for_blk =
- get_force_zeromv_skip_flag_for_blk(cpi, x, bsize);
+ if (!seg_skip) {
+ x->force_zeromv_skip_for_blk =
+ get_force_zeromv_skip_flag_for_blk(cpi, x, bsize);
- // Source variance may be already compute at superblock level, so no need
- // to recompute, unless bsize < sb_size or source_variance is not yet set.
- if (!x->force_zeromv_skip_for_blk &&
- (x->source_variance == UINT_MAX || bsize < cm->seq_params->sb_size))
- x->source_variance = av1_get_perpixel_variance_facade(
- cpi, xd, &x->plane[0].src, bsize, AOM_PLANE_Y);
+ // Source variance may be already compute at superblock level, so no need
+ // to recompute, unless bsize < sb_size or source_variance is not yet set.
+ if (!x->force_zeromv_skip_for_blk &&
+ (x->source_variance == UINT_MAX || bsize < cm->seq_params->sb_size))
+ x->source_variance = av1_get_perpixel_variance_facade(
+ cpi, xd, &x->plane[0].src, bsize, AOM_PLANE_Y);
+ }
// Save rdmult before it might be changed, so it can be restored later.
const int orig_rdmult = x->rdmult;
@@ -2305,16 +2310,13 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data,
#if CONFIG_COLLECT_COMPONENT_TIMING
start_timing(cpi, nonrd_pick_inter_mode_sb_time);
#endif
- if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- RD_STATS invalid_rd;
- av1_invalid_rd_stats(&invalid_rd);
- // TODO(kyslov): add av1_nonrd_pick_inter_mode_sb_seg_skip
- av1_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, mi_row, mi_col,
- rd_cost, bsize, ctx,
- invalid_rd.rdcost);
- } else {
- av1_nonrd_pick_inter_mode_sb(cpi, tile_data, x, rd_cost, bsize, ctx);
+ if (seg_skip) {
+ x->force_zeromv_skip_for_blk = 1;
+ // TODO(marpan): Consider adding a function for nonrd:
+ // av1_nonrd_pick_inter_mode_sb_seg_skip(), instead of setting
+ // x->force_zeromv_skip flag and entering av1_nonrd_pick_inter_mode_sb().
}
+ av1_nonrd_pick_inter_mode_sb(cpi, tile_data, x, rd_cost, bsize, ctx);
#if CONFIG_COLLECT_COMPONENT_TIMING
end_timing(cpi, nonrd_pick_inter_mode_sb_time);
#endif
@@ -2322,10 +2324,12 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data,
if (cpi->sf.rt_sf.skip_cdef_sb) {
// cdef_strength is initialized to 1 which means skip_cdef, and is updated
// here. Check to see is skipping cdef is allowed.
+ // Always allow cdef_skip for seg_skip = 1.
const int allow_cdef_skipping =
- cpi->rc.frames_since_key > 10 && !cpi->rc.high_source_sad &&
- !(x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_U)] ||
- x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_V)]);
+ seg_skip ||
+ (cpi->rc.frames_since_key > 10 && !cpi->rc.high_source_sad &&
+ !(x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_U)] ||
+ x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_V)]));
// Find the corresponding 64x64 block. It'll be the 128x128 block if that's
// the block size.
diff --git a/third_party/aom/av1/encoder/partition_strategy.c b/third_party/aom/av1/encoder/partition_strategy.c
index ce06313579..1d62f128c7 100644
--- a/third_party/aom/av1/encoder/partition_strategy.c
+++ b/third_party/aom/av1/encoder/partition_strategy.c
@@ -1761,7 +1761,7 @@ void av1_prune_partitions_by_max_min_bsize(SuperBlockEnc *sb_enc,
// Decide whether to evaluate the AB partition specified by part_type based on
// split and HORZ/VERT info
-int evaluate_ab_partition_based_on_split(
+static int evaluate_ab_partition_based_on_split(
const PC_TREE *pc_tree, PARTITION_TYPE rect_part,
const RD_RECT_PART_WIN_INFO *rect_part_win_info, int qindex, int split_idx1,
int split_idx2) {
diff --git a/third_party/aom/av1/encoder/pass2_strategy.c b/third_party/aom/av1/encoder/pass2_strategy.c
index a9442ffc1a..bd8620c2be 100644
--- a/third_party/aom/av1/encoder/pass2_strategy.c
+++ b/third_party/aom/av1/encoder/pass2_strategy.c
@@ -158,28 +158,12 @@ static int frame_max_bits(const RATE_CONTROL *rc,
return (int)max_bits;
}
-static const double q_pow_term[(QINDEX_RANGE >> 5) + 1] = { 0.65, 0.70, 0.75,
- 0.80, 0.85, 0.90,
- 0.95, 0.95, 0.95 };
-#define ERR_DIVISOR 96.0
-static double calc_correction_factor(double err_per_mb, int q) {
- const double error_term = err_per_mb / ERR_DIVISOR;
- const int index = q >> 5;
- // Adjustment to power term based on qindex
- const double power_term =
- q_pow_term[index] +
- (((q_pow_term[index + 1] - q_pow_term[index]) * (q % 32)) / 32.0);
- assert(error_term >= 0.0);
- return fclamp(pow(error_term, power_term), 0.05, 5.0);
-}
-
// Based on history adjust expectations of bits per macroblock.
static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
TWO_PASS *const twopass = &cpi->ppi->twopass;
const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
// Based on recent history adjust expectations of bits per macroblock.
- double damp_fac = AOMMAX(5.0, rate_err_tol / 10.0);
double rate_err_factor = 1.0;
const double adj_limit = AOMMAX(0.2, (double)(100 - rate_err_tol) / 200.0);
const double min_fac = 1.0 - adj_limit;
@@ -214,9 +198,7 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
}
int err_estimate = p_rc->rate_error_estimate;
- int64_t bits_left = twopass->bits_left;
int64_t total_actual_bits = p_rc->total_actual_bits;
- int64_t bits_off_target = p_rc->vbr_bits_off_target;
double rolling_arf_group_actual_bits =
(double)twopass->rolling_arf_group_actual_bits;
double rolling_arf_group_target_bits =
@@ -231,10 +213,6 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
: 0;
total_actual_bits = simulate_parallel_frame ? p_rc->temp_total_actual_bits
: p_rc->total_actual_bits;
- bits_off_target = simulate_parallel_frame ? p_rc->temp_vbr_bits_off_target
- : p_rc->vbr_bits_off_target;
- bits_left =
- simulate_parallel_frame ? p_rc->temp_bits_left : twopass->bits_left;
rolling_arf_group_target_bits =
(double)(simulate_parallel_frame
? p_rc->temp_rolling_arf_group_target_bits
@@ -247,21 +225,21 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
: p_rc->rate_error_estimate;
#endif
- if (p_rc->bits_off_target && total_actual_bits > 0) {
- if (cpi->ppi->lap_enabled) {
- rate_err_factor = rolling_arf_group_actual_bits /
- DOUBLE_DIVIDE_CHECK(rolling_arf_group_target_bits);
+ if ((p_rc->bits_off_target && total_actual_bits > 0) &&
+ (rolling_arf_group_target_bits >= 1.0)) {
+ if (rolling_arf_group_actual_bits > rolling_arf_group_target_bits) {
+ double error_fraction =
+ (rolling_arf_group_actual_bits - rolling_arf_group_target_bits) /
+ rolling_arf_group_target_bits;
+ error_fraction = (error_fraction > 1.0) ? 1.0 : error_fraction;
+ rate_err_factor = 1.0 + error_fraction;
} else {
- rate_err_factor = 1.0 - ((double)(bits_off_target) /
- AOMMAX(total_actual_bits, bits_left));
+ double error_fraction =
+ (rolling_arf_group_target_bits - rolling_arf_group_actual_bits) /
+ rolling_arf_group_target_bits;
+ rate_err_factor = 1.0 - error_fraction;
}
- // Adjustment is damped if this is 1 pass with look ahead processing
- // (as there are only ever a few frames of data) and for all but the first
- // GOP in normal two pass.
- if ((twopass->bpm_factor != 1.0) || cpi->ppi->lap_enabled) {
- rate_err_factor = 1.0 + ((rate_err_factor - 1.0) / damp_fac);
- }
rate_err_factor = AOMMAX(min_fac, AOMMIN(max_fac, rate_err_factor));
}
@@ -270,36 +248,38 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
if ((rate_err_factor < 1.0 && err_estimate >= 0) ||
(rate_err_factor > 1.0 && err_estimate <= 0)) {
twopass->bpm_factor *= rate_err_factor;
- if (rate_err_tol >= 100) {
- twopass->bpm_factor =
- AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
- } else {
- twopass->bpm_factor = AOMMAX(0.1, AOMMIN(10.0, twopass->bpm_factor));
- }
+ twopass->bpm_factor = AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
}
}
-static int qbpm_enumerator(int rate_err_tol) {
- return 1200000 + ((300000 * AOMMIN(75, AOMMAX(rate_err_tol - 25, 0))) / 75);
+static const double q_div_term[(QINDEX_RANGE >> 5) + 1] = { 32.0, 40.0, 46.0,
+ 52.0, 56.0, 60.0,
+ 64.0, 68.0, 72.0 };
+#define EPMB_SCALER 1250000
+static double calc_correction_factor(double err_per_mb, int q) {
+ double power_term = 0.90;
+ const int index = q >> 5;
+ const double divisor =
+ q_div_term[index] +
+ (((q_div_term[index + 1] - q_div_term[index]) * (q % 32)) / 32.0);
+ double error_term = EPMB_SCALER * pow(err_per_mb, power_term);
+ return error_term / divisor;
}
// Similar to find_qindex_by_rate() function in ratectrl.c, but includes
// calculation of a correction_factor.
static int find_qindex_by_rate_with_correction(
int desired_bits_per_mb, aom_bit_depth_t bit_depth, double error_per_mb,
- double group_weight_factor, int rate_err_tol, int best_qindex,
- int worst_qindex) {
+ double group_weight_factor, int best_qindex, int worst_qindex) {
assert(best_qindex <= worst_qindex);
int low = best_qindex;
int high = worst_qindex;
while (low < high) {
const int mid = (low + high) >> 1;
- const double mid_factor = calc_correction_factor(error_per_mb, mid);
+ const double q_factor = calc_correction_factor(error_per_mb, mid);
const double q = av1_convert_qindex_to_q(mid, bit_depth);
- const int enumerator = qbpm_enumerator(rate_err_tol);
- const int mid_bits_per_mb =
- (int)((enumerator * mid_factor * group_weight_factor) / q);
+ const int mid_bits_per_mb = (int)((q_factor * group_weight_factor) / q);
if (mid_bits_per_mb > desired_bits_per_mb) {
low = mid + 1;
@@ -359,8 +339,8 @@ static int get_twopass_worst_quality(AV1_COMP *cpi, const double av_frame_err,
// content at the given rate.
int q = find_qindex_by_rate_with_correction(
target_norm_bits_per_mb, cpi->common.seq_params->bit_depth,
- av_err_per_mb, cpi->ppi->twopass.bpm_factor, rate_err_tol,
- rc->best_quality, rc->worst_quality);
+ av_err_per_mb, cpi->ppi->twopass.bpm_factor, rc->best_quality,
+ rc->worst_quality);
// Restriction on active max q for constrained quality mode.
if (rc_cfg->mode == AOM_CQ) q = AOMMAX(q, rc_cfg->cq_level);
@@ -4235,12 +4215,13 @@ void av1_twopass_postencode_update(AV1_COMP *cpi) {
twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0);
// If the rate control is drifting consider adjustment to min or maxq.
- if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref) {
+ if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref &&
+ (p_rc->rolling_target_bits > 0)) {
int minq_adj_limit;
int maxq_adj_limit;
minq_adj_limit =
(rc_cfg->mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
- maxq_adj_limit = rc->worst_quality - rc->active_worst_quality;
+ maxq_adj_limit = (rc->worst_quality - rc->active_worst_quality);
// Undershoot
if ((rc_cfg->under_shoot_pct < 100) &&
@@ -4252,8 +4233,9 @@ void av1_twopass_postencode_update(AV1_COMP *cpi) {
if ((pct_error >= rc_cfg->under_shoot_pct) &&
(p_rc->rate_error_estimate > 0)) {
twopass->extend_minq += 1;
+ twopass->extend_maxq -= 1;
}
- twopass->extend_maxq -= 1;
+
// Overshoot
} else if ((rc_cfg->over_shoot_pct < 100) &&
(p_rc->rolling_actual_bits > p_rc->rolling_target_bits)) {
@@ -4265,18 +4247,8 @@ void av1_twopass_postencode_update(AV1_COMP *cpi) {
if ((pct_error >= rc_cfg->over_shoot_pct) &&
(p_rc->rate_error_estimate < 0)) {
twopass->extend_maxq += 1;
+ twopass->extend_minq -= 1;
}
- twopass->extend_minq -= 1;
- } else {
- // Adjustment for extreme local overshoot.
- // Only applies when normal adjustment above is not used (e.g.
- // when threshold is set to 100).
- if (rc->projected_frame_size > (2 * rc->base_frame_target) &&
- rc->projected_frame_size > (2 * rc->avg_frame_bandwidth))
- ++twopass->extend_maxq;
- // Unwind extreme overshoot adjustment.
- else if (p_rc->rolling_target_bits > p_rc->rolling_actual_bits)
- --twopass->extend_maxq;
}
twopass->extend_minq =
clamp(twopass->extend_minq, -minq_adj_limit, minq_adj_limit);
diff --git a/third_party/aom/av1/encoder/pickcdef.c b/third_party/aom/av1/encoder/pickcdef.c
index 232a2f9edb..ed5fa55f17 100644
--- a/third_party/aom/av1/encoder/pickcdef.c
+++ b/third_party/aom/av1/encoder/pickcdef.c
@@ -894,7 +894,7 @@ void av1_cdef_search(AV1_COMP *cpi) {
int rdmult = cpi->td.mb.rdmult;
for (int i = 0; i <= 3; i++) {
if (i > max_signaling_bits) break;
- int best_lev0[CDEF_MAX_STRENGTHS];
+ int best_lev0[CDEF_MAX_STRENGTHS] = { 0 };
int best_lev1[CDEF_MAX_STRENGTHS] = { 0 };
const int nb_strengths = 1 << i;
uint64_t tot_mse;
diff --git a/third_party/aom/av1/encoder/picklpf.c b/third_party/aom/av1/encoder/picklpf.c
index 9084d3f13a..a504535028 100644
--- a/third_party/aom/av1/encoder/picklpf.c
+++ b/third_party/aom/av1/encoder/picklpf.c
@@ -27,12 +27,25 @@
#include "av1/encoder/encoder.h"
#include "av1/encoder/picklpf.h"
+// AV1 loop filter applies to the whole frame according to mi_rows and mi_cols,
+// which are calculated based on aligned width and aligned height,
+// In addition, if super res is enabled, it copies the whole frame
+// according to the aligned width and height (av1_superres_upscale()).
+// So we need to copy the whole filtered region, instead of the cropped region.
+// For example, input image size is: 160x90.
+// Then src->y_crop_width = 160, src->y_crop_height = 90.
+// The aligned frame size is: src->y_width = 160, src->y_height = 96.
+// AV1 aligns frame size to a multiple of 8, if there is
+// chroma subsampling, it is able to ensure the chroma is also
+// an integer number of mi units. mi unit is 4x4, 8 = 4 * 2, and 2 luma mi
+// units correspond to 1 chroma mi unit if there is subsampling.
+// See: aom_realloc_frame_buffer() in yv12config.c.
static void yv12_copy_plane(const YV12_BUFFER_CONFIG *src_bc,
YV12_BUFFER_CONFIG *dst_bc, int plane) {
switch (plane) {
- case 0: aom_yv12_copy_y(src_bc, dst_bc); break;
- case 1: aom_yv12_copy_u(src_bc, dst_bc); break;
- case 2: aom_yv12_copy_v(src_bc, dst_bc); break;
+ case 0: aom_yv12_copy_y(src_bc, dst_bc, 0); break;
+ case 1: aom_yv12_copy_u(src_bc, dst_bc, 0); break;
+ case 2: aom_yv12_copy_v(src_bc, dst_bc, 0); break;
default: assert(plane >= 0 && plane <= 2); break;
}
}
@@ -311,7 +324,7 @@ void av1_pick_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
&cpi->last_frame_uf, cm->width, cm->height,
seq_params->subsampling_x, seq_params->subsampling_y,
seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, NULL, NULL, NULL, 0, 0))
+ cm->features.byte_alignment, NULL, NULL, NULL, false, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate last frame buffer");
diff --git a/third_party/aom/av1/encoder/pickrst.c b/third_party/aom/av1/encoder/pickrst.c
index 6429064175..b0d0d0bb78 100644
--- a/third_party/aom/av1/encoder/pickrst.c
+++ b/third_party/aom/av1/encoder/pickrst.c
@@ -1103,6 +1103,39 @@ static INLINE int wrap_index(int i, int wiener_win) {
return (i >= wiener_halfwin1 ? wiener_win - 1 - i : i);
}
+// Splits each w[i] into smaller components w1[i] and w2[i] such that
+// w[i] = w1[i] * WIENER_TAP_SCALE_FACTOR + w2[i].
+static INLINE void split_wiener_filter_coefficients(int wiener_win,
+ const int32_t *w,
+ int32_t *w1, int32_t *w2) {
+ for (int i = 0; i < wiener_win; i++) {
+ w1[i] = w[i] / WIENER_TAP_SCALE_FACTOR;
+ w2[i] = w[i] - w1[i] * WIENER_TAP_SCALE_FACTOR;
+ assert(w[i] == w1[i] * WIENER_TAP_SCALE_FACTOR + w2[i]);
+ }
+}
+
+// Calculates x * w / WIENER_TAP_SCALE_FACTOR, where
+// w = w1 * WIENER_TAP_SCALE_FACTOR + w2.
+//
+// The multiplication x * w may overflow, so we multiply x by the components of
+// w (w1 and w2) and combine the multiplication with the division.
+static INLINE int64_t multiply_and_scale(int64_t x, int32_t w1, int32_t w2) {
+ // Let y = x * w / WIENER_TAP_SCALE_FACTOR
+ // = x * (w1 * WIENER_TAP_SCALE_FACTOR + w2) / WIENER_TAP_SCALE_FACTOR
+ const int64_t y = x * w1 + x * w2 / WIENER_TAP_SCALE_FACTOR;
+ // Double-check the calculation using __int128.
+ // TODO(wtc): Remove after 2024-04-30.
+#if !defined(NDEBUG) && defined(__GNUC__) && defined(__LP64__)
+ const int32_t w = w1 * WIENER_TAP_SCALE_FACTOR + w2;
+ const __int128 z = (__int128)x * w / WIENER_TAP_SCALE_FACTOR;
+ assert(z >= INT64_MIN);
+ assert(z <= INT64_MAX);
+ assert(y == (int64_t)z);
+#endif
+ return y;
+}
+
// Solve linear equations to find Wiener filter tap values
// Taps are output scaled by WIENER_FILT_STEP
static int linsolve_wiener(int n, int64_t *A, int stride, int64_t *b,
@@ -1175,10 +1208,12 @@ static int linsolve_wiener(int n, int64_t *A, int stride, int64_t *b,
// Fix vector b, update vector a
static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc,
- int64_t **Hc, int32_t *a, int32_t *b) {
+ int64_t **Hc, int32_t *a,
+ const int32_t *b) {
int i, j;
int64_t S[WIENER_WIN];
int64_t A[WIENER_HALFWIN1], B[WIENER_HALFWIN1 * WIENER_HALFWIN1];
+ int32_t b1[WIENER_WIN], b2[WIENER_WIN];
const int wiener_win2 = wiener_win * wiener_win;
const int wiener_halfwin1 = (wiener_win >> 1) + 1;
memset(A, 0, sizeof(A));
@@ -1189,16 +1224,7 @@ static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc,
A[jj] += Mc[i][j] * b[i] / WIENER_TAP_SCALE_FACTOR;
}
}
-
- // b/274668506: This is the dual branch for the issue in b/272139363. The fix
- // is similar. See comments in update_b_sep_sym() below.
- int32_t max_b_l = 0;
- for (int l = 0; l < wiener_win; ++l) {
- const int32_t abs_b_l = abs(b[l]);
- if (abs_b_l > max_b_l) max_b_l = abs_b_l;
- }
- const int scale_threshold = 128 * WIENER_TAP_SCALE_FACTOR;
- const int scaler = max_b_l < scale_threshold ? 1 : 4;
+ split_wiener_filter_coefficients(wiener_win, b, b1, b2);
for (i = 0; i < wiener_win; i++) {
for (j = 0; j < wiener_win; j++) {
@@ -1207,10 +1233,17 @@ static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc,
const int kk = wrap_index(k, wiener_win);
for (l = 0; l < wiener_win; ++l) {
const int ll = wrap_index(l, wiener_win);
- B[ll * wiener_halfwin1 + kk] +=
- Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] /
- (scaler * WIENER_TAP_SCALE_FACTOR) * b[j] /
- (WIENER_TAP_SCALE_FACTOR / scaler);
+ // Calculate
+ // B[ll * wiener_halfwin1 + kk] +=
+ // Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] /
+ // WIENER_TAP_SCALE_FACTOR * b[j] / WIENER_TAP_SCALE_FACTOR;
+ //
+ // The last multiplication may overflow, so we combine the last
+ // multiplication with the last division.
+ const int64_t x = Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] /
+ WIENER_TAP_SCALE_FACTOR;
+ // b[j] = b1[j] * WIENER_TAP_SCALE_FACTOR + b2[j]
+ B[ll * wiener_halfwin1 + kk] += multiply_and_scale(x, b1[j], b2[j]);
}
}
}
@@ -1246,10 +1279,12 @@ static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc,
// Fix vector a, update vector b
static AOM_INLINE void update_b_sep_sym(int wiener_win, int64_t **Mc,
- int64_t **Hc, int32_t *a, int32_t *b) {
+ int64_t **Hc, const int32_t *a,
+ int32_t *b) {
int i, j;
int64_t S[WIENER_WIN];
int64_t A[WIENER_HALFWIN1], B[WIENER_HALFWIN1 * WIENER_HALFWIN1];
+ int32_t a1[WIENER_WIN], a2[WIENER_WIN];
const int wiener_win2 = wiener_win * wiener_win;
const int wiener_halfwin1 = (wiener_win >> 1) + 1;
memset(A, 0, sizeof(A));
@@ -1260,32 +1295,7 @@ static AOM_INLINE void update_b_sep_sym(int wiener_win, int64_t **Mc,
A[ii] += Mc[i][j] * a[j] / WIENER_TAP_SCALE_FACTOR;
}
}
-
- // b/272139363: The computation,
- // Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] /
- // WIENER_TAP_SCALE_FACTOR * a[l] / WIENER_TAP_SCALE_FACTOR;
- // may generate a signed-integer-overflow. Conditionally scale the terms to
- // avoid a potential overflow.
- //
- // Hc contains accumulated correlation statistics and it is desired to leave
- // as much room as possible for Hc. It was experimentally observed that the
- // primary issue manifests itself with the second, a[l], multiply. For
- // max_a_l < WIENER_TAP_SCALE_FACTOR the first multiply with a[k] should not
- // increase dynamic range and the second multiply should hence be safe.
- // Thereafter a safe scale_threshold depends on the actual operational range
- // of Hc. The largest scale_threshold is expected to depend on bit-depth
- // (av1_compute_stats_highbd_c() scales highbd to 8-bit) and maximum
- // restoration-unit size (256), leading up to 32-bit positive numbers in Hc.
- // Noting that the caller, wiener_decompose_sep_sym(), initializes a[...]
- // to a range smaller than 16 bits, the scale_threshold is set as below for
- // convenience.
- int32_t max_a_l = 0;
- for (int l = 0; l < wiener_win; ++l) {
- const int32_t abs_a_l = abs(a[l]);
- if (abs_a_l > max_a_l) max_a_l = abs_a_l;
- }
- const int scale_threshold = 128 * WIENER_TAP_SCALE_FACTOR;
- const int scaler = max_a_l < scale_threshold ? 1 : 4;
+ split_wiener_filter_coefficients(wiener_win, a, a1, a2);
for (i = 0; i < wiener_win; i++) {
const int ii = wrap_index(i, wiener_win);
@@ -1294,10 +1304,17 @@ static AOM_INLINE void update_b_sep_sym(int wiener_win, int64_t **Mc,
int k, l;
for (k = 0; k < wiener_win; ++k) {
for (l = 0; l < wiener_win; ++l) {
- B[jj * wiener_halfwin1 + ii] +=
- Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] /
- (scaler * WIENER_TAP_SCALE_FACTOR) * a[l] /
- (WIENER_TAP_SCALE_FACTOR / scaler);
+ // Calculate
+ // B[jj * wiener_halfwin1 + ii] +=
+ // Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] /
+ // WIENER_TAP_SCALE_FACTOR * a[l] / WIENER_TAP_SCALE_FACTOR;
+ //
+ // The last multiplication may overflow, so we combine the last
+ // multiplication with the last division.
+ const int64_t x = Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] /
+ WIENER_TAP_SCALE_FACTOR;
+ // a[l] = a1[l] * WIENER_TAP_SCALE_FACTOR + a2[l]
+ B[jj * wiener_halfwin1 + ii] += multiply_and_scale(x, a1[l], a2[l]);
}
}
}
@@ -2050,7 +2067,7 @@ void av1_pick_filter_restoration(const YV12_BUFFER_CONFIG *src, AV1_COMP *cpi) {
&cpi->trial_frame_rst, cm->superres_upscaled_width,
cm->superres_upscaled_height, seq_params->subsampling_x,
seq_params->subsampling_y, highbd, AOM_RESTORATION_FRAME_BORDER,
- cm->features.byte_alignment, NULL, NULL, NULL, 0, 0))
+ cm->features.byte_alignment, NULL, NULL, NULL, false, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate trial restored frame buffer");
diff --git a/third_party/aom/av1/encoder/ratectrl.c b/third_party/aom/av1/encoder/ratectrl.c
index df86380272..7639484df5 100644
--- a/third_party/aom/av1/encoder/ratectrl.c
+++ b/third_party/aom/av1/encoder/ratectrl.c
@@ -30,6 +30,7 @@
#include "av1/common/seg_common.h"
#include "av1/encoder/encodemv.h"
+#include "av1/encoder/encoder_utils.h"
#include "av1/encoder/encode_strategy.h"
#include "av1/encoder/gop_structure.h"
#include "av1/encoder/random.h"
@@ -405,10 +406,10 @@ void av1_primary_rc_init(const AV1EncoderConfig *oxcf,
p_rc->rate_correction_factors[KF_STD] = 1.0;
p_rc->bits_off_target = p_rc->starting_buffer_level;
- p_rc->rolling_target_bits =
- (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate);
- p_rc->rolling_actual_bits =
- (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate);
+ p_rc->rolling_target_bits = AOMMAX(
+ 1, (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate));
+ p_rc->rolling_actual_bits = AOMMAX(
+ 1, (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate));
}
void av1_rc_init(const AV1EncoderConfig *oxcf, RATE_CONTROL *rc) {
@@ -439,6 +440,7 @@ void av1_rc_init(const AV1EncoderConfig *oxcf, RATE_CONTROL *rc) {
rc->rtc_external_ratectrl = 0;
rc->frame_level_fast_extra_bits = 0;
rc->use_external_qp_one_pass = 0;
+ rc->percent_blocks_inactive = 0;
}
static bool check_buffer_below_thresh(AV1_COMP *cpi, int64_t buffer_level,
@@ -1719,41 +1721,39 @@ static void adjust_active_best_and_worst_quality(const AV1_COMP *cpi,
const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
- const RefreshFrameInfo *const refresh_frame = &cpi->refresh_frame;
int active_best_quality = *active_best;
int active_worst_quality = *active_worst;
#if CONFIG_FPMT_TEST
- const int simulate_parallel_frame =
- cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 &&
- cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE;
- int extend_minq = simulate_parallel_frame ? p_rc->temp_extend_minq
- : cpi->ppi->twopass.extend_minq;
- int extend_maxq = simulate_parallel_frame ? p_rc->temp_extend_maxq
- : cpi->ppi->twopass.extend_maxq;
#endif
// Extension to max or min Q if undershoot or overshoot is outside
// the permitted range.
if (cpi->oxcf.rc_cfg.mode != AOM_Q) {
+#if CONFIG_FPMT_TEST
+ const int simulate_parallel_frame =
+ cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 &&
+ cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE;
+ const int extend_minq = simulate_parallel_frame
+ ? p_rc->temp_extend_minq
+ : cpi->ppi->twopass.extend_minq;
+ const int extend_maxq = simulate_parallel_frame
+ ? p_rc->temp_extend_maxq
+ : cpi->ppi->twopass.extend_maxq;
+ const RefreshFrameInfo *const refresh_frame = &cpi->refresh_frame;
if (frame_is_intra_only(cm) ||
(!rc->is_src_frame_alt_ref &&
(refresh_frame->golden_frame || is_intrl_arf_boost ||
refresh_frame->alt_ref_frame))) {
-#if CONFIG_FPMT_TEST
active_best_quality -= extend_minq;
active_worst_quality += (extend_maxq / 2);
-#else
- active_best_quality -= cpi->ppi->twopass.extend_minq / 4;
- active_worst_quality += (cpi->ppi->twopass.extend_maxq / 2);
-#endif
} else {
-#if CONFIG_FPMT_TEST
active_best_quality -= extend_minq / 2;
active_worst_quality += extend_maxq;
+ }
#else
- active_best_quality -= cpi->ppi->twopass.extend_minq / 4;
- active_worst_quality += cpi->ppi->twopass.extend_maxq;
+ (void)is_intrl_arf_boost;
+ active_best_quality -= cpi->ppi->twopass.extend_minq / 8;
+ active_worst_quality += cpi->ppi->twopass.extend_maxq / 4;
#endif
- }
}
#ifndef STRICT_RC
@@ -2991,6 +2991,24 @@ void av1_set_rtc_reference_structure_one_layer(AV1_COMP *cpi, int gf_update) {
cpi->rt_reduce_num_ref_buffers &= (rtc_ref->ref_idx[2] < 7);
}
+static int set_block_is_active(unsigned char *const active_map_4x4, int mi_cols,
+ int mi_rows, int sbi_col, int sbi_row, int sh,
+ int num_4x4) {
+ int r = sbi_row << sh;
+ int c = sbi_col << sh;
+ const int row_max = AOMMIN(num_4x4, mi_rows - r);
+ const int col_max = AOMMIN(num_4x4, mi_cols - c);
+ // Active map is set for 16x16 blocks, so only need to
+ // check over16x16,
+ for (int x = 0; x < row_max; x += 4) {
+ for (int y = 0; y < col_max; y += 4) {
+ if (active_map_4x4[(r + x) * mi_cols + (c + y)] == AM_SEGMENT_ID_ACTIVE)
+ return 1;
+ }
+ }
+ return 0;
+}
+
/*!\brief Check for scene detection, for 1 pass real-time mode.
*
* Compute average source sad (temporal sad: between current source and
@@ -3093,11 +3111,26 @@ static void rc_scene_detection_onepass_rt(AV1_COMP *cpi,
sizeof(*cpi->src_sad_blk_64x64)));
}
}
+ const CommonModeInfoParams *const mi_params = &cpi->common.mi_params;
+ const int mi_cols = mi_params->mi_cols;
+ const int mi_rows = mi_params->mi_rows;
+ int sh = (cm->seq_params->sb_size == BLOCK_128X128) ? 5 : 4;
+ int num_4x4 = (cm->seq_params->sb_size == BLOCK_128X128) ? 32 : 16;
+ unsigned char *const active_map_4x4 = cpi->active_map.map;
// Avoid bottom and right border.
for (int sbi_row = 0; sbi_row < sb_rows - border; ++sbi_row) {
for (int sbi_col = 0; sbi_col < sb_cols; ++sbi_col) {
- tmp_sad = cpi->ppi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y,
- last_src_ystride);
+ int block_is_active = 1;
+ if (cpi->active_map.enabled && rc->percent_blocks_inactive > 0) {
+ block_is_active = set_block_is_active(active_map_4x4, mi_cols, mi_rows,
+ sbi_col, sbi_row, sh, num_4x4);
+ }
+ if (block_is_active) {
+ tmp_sad = cpi->ppi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y,
+ last_src_ystride);
+ } else {
+ tmp_sad = 0;
+ }
if (cpi->src_sad_blk_64x64 != NULL)
cpi->src_sad_blk_64x64[sbi_col + sbi_row * sb_cols] = tmp_sad;
if (check_light_change) {
@@ -3456,8 +3489,13 @@ void av1_get_one_pass_rt_params(AV1_COMP *cpi, FRAME_TYPE *const frame_type,
}
}
}
- // Check for scene change: for SVC check on base spatial layer only.
- if (cpi->sf.rt_sf.check_scene_detection && svc->spatial_layer_id == 0) {
+ if (cpi->active_map.enabled && cpi->rc.percent_blocks_inactive == 100) {
+ rc->frame_source_sad = 0;
+ rc->avg_source_sad = (3 * rc->avg_source_sad + rc->frame_source_sad) >> 2;
+ rc->percent_blocks_with_motion = 0;
+ rc->high_source_sad = 0;
+ } else if (cpi->sf.rt_sf.check_scene_detection &&
+ svc->spatial_layer_id == 0) {
if (rc->prev_coded_width == cm->width &&
rc->prev_coded_height == cm->height) {
rc_scene_detection_onepass_rt(cpi, frame_input);
@@ -3522,6 +3560,10 @@ void av1_get_one_pass_rt_params(AV1_COMP *cpi, FRAME_TYPE *const frame_type,
}
}
+#define CHECK_INTER_LAYER_PRED(ref_frame) \
+ ((cpi->ref_frame_flags & av1_ref_frame_flag_list[ref_frame]) && \
+ (av1_check_ref_is_low_spatial_res_super_frame(cpi, ref_frame)))
+
int av1_encodedframe_overshoot_cbr(AV1_COMP *cpi, int *q) {
AV1_COMMON *const cm = &cpi->common;
PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
@@ -3532,12 +3574,26 @@ int av1_encodedframe_overshoot_cbr(AV1_COMP *cpi, int *q) {
int target_bits_per_mb;
double q2;
int enumerator;
+ int inter_layer_pred_on = 0;
int is_screen_content = (cpi->oxcf.tune_cfg.content == AOM_CONTENT_SCREEN);
- *q = (3 * cpi->rc.worst_quality + *q) >> 2;
- // For screen content use the max-q set by the user to allow for less
- // overshoot on slide changes.
- if (is_screen_content) *q = cpi->rc.worst_quality;
cpi->cyclic_refresh->counter_encode_maxq_scene_change = 0;
+ if (cpi->svc.spatial_layer_id > 0) {
+ // For spatial layers: check if inter-layer (spatial) prediction is used
+ // (check if any reference is being used that is the lower spatial layer),
+ inter_layer_pred_on = CHECK_INTER_LAYER_PRED(LAST_FRAME) ||
+ CHECK_INTER_LAYER_PRED(GOLDEN_FRAME) ||
+ CHECK_INTER_LAYER_PRED(ALTREF_FRAME);
+ }
+ // If inter-layer prediction is on: we expect to pull up the quality from
+ // the lower spatial layer, so we can use a lower q.
+ if (cpi->svc.spatial_layer_id > 0 && inter_layer_pred_on) {
+ *q = (cpi->rc.worst_quality + *q) >> 1;
+ } else {
+ *q = (3 * cpi->rc.worst_quality + *q) >> 2;
+ // For screen content use the max-q set by the user to allow for less
+ // overshoot on slide changes.
+ if (is_screen_content) *q = cpi->rc.worst_quality;
+ }
// Adjust avg_frame_qindex, buffer_level, and rate correction factors, as
// these parameters will affect QP selection for subsequent frames. If they
// have settled down to a very different (low QP) state, then not adjusting
@@ -3566,8 +3622,10 @@ int av1_encodedframe_overshoot_cbr(AV1_COMP *cpi, int *q) {
rate_correction_factor;
}
// For temporal layers: reset the rate control parameters across all
- // temporal layers.
- if (cpi->svc.number_temporal_layers > 1) {
+ // temporal layers. Only do it for spatial enhancement layers when
+ // inter_layer_pred_on is not set (off).
+ if (cpi->svc.number_temporal_layers > 1 &&
+ (cpi->svc.spatial_layer_id == 0 || inter_layer_pred_on == 0)) {
SVC *svc = &cpi->svc;
for (int tl = 0; tl < svc->number_temporal_layers; ++tl) {
int sl = svc->spatial_layer_id;
diff --git a/third_party/aom/av1/encoder/ratectrl.h b/third_party/aom/av1/encoder/ratectrl.h
index 6802ad42d0..5121a909f4 100644
--- a/third_party/aom/av1/encoder/ratectrl.h
+++ b/third_party/aom/av1/encoder/ratectrl.h
@@ -249,6 +249,9 @@ typedef struct {
// signals if number of blocks with motion is high
int percent_blocks_with_motion;
+ // signals percentage of 16x16 blocks that are inactive, via active_maps
+ int percent_blocks_inactive;
+
// Maximum value of source sad across all blocks of frame.
uint64_t max_block_source_sad;
diff --git a/third_party/aom/av1/encoder/speed_features.c b/third_party/aom/av1/encoder/speed_features.c
index 63d69cadc5..256b6fc9eb 100644
--- a/third_party/aom/av1/encoder/speed_features.c
+++ b/third_party/aom/av1/encoder/speed_features.c
@@ -1177,6 +1177,7 @@ static void set_good_speed_features_framesize_independent(
sf->mv_sf.subpel_search_method = SUBPEL_TREE_PRUNED_MORE;
sf->gm_sf.prune_zero_mv_with_sse = 2;
+ sf->gm_sf.downsample_level = 1;
sf->part_sf.simple_motion_search_prune_agg =
allow_screen_content_tools ? SIMPLE_AGG_LVL0 : SIMPLE_AGG_LVL2;
@@ -1282,6 +1283,8 @@ static void set_good_speed_features_framesize_independent(
sf->hl_sf.disable_extra_sc_testing = 1;
sf->hl_sf.second_alt_ref_filtering = 0;
+ sf->gm_sf.downsample_level = 2;
+
sf->inter_sf.prune_inter_modes_based_on_tpl = boosted ? 0 : 3;
sf->inter_sf.selective_ref_frame = 6;
sf->inter_sf.prune_single_ref = is_boosted_arf2_bwd_type ? 0 : 2;
@@ -1465,6 +1468,7 @@ static void set_rt_speed_feature_framesize_dependent(const AV1_COMP *const cpi,
if (is_360p_or_larger) {
sf->part_sf.fixed_partition_size = BLOCK_32X32;
sf->rt_sf.use_fast_fixed_part = 1;
+ sf->mv_sf.subpel_force_stop = HALF_PEL;
}
sf->rt_sf.increase_source_sad_thresh = 1;
sf->rt_sf.part_early_exit_zeromv = 2;
@@ -1472,6 +1476,7 @@ static void set_rt_speed_feature_framesize_dependent(const AV1_COMP *const cpi,
for (int i = 0; i < BLOCK_SIZES; ++i) {
sf->rt_sf.intra_y_mode_bsize_mask_nrd[i] = INTRA_DC;
}
+ sf->rt_sf.hybrid_intra_pickmode = 0;
}
// Setting for SVC, or when the ref_frame_config control is
// used to set the reference structure.
@@ -1572,13 +1577,13 @@ static void set_rt_speed_feature_framesize_dependent(const AV1_COMP *const cpi,
sf->rt_sf.screen_content_cdef_filter_qindex_thresh = 80;
sf->rt_sf.part_early_exit_zeromv = 1;
sf->rt_sf.nonrd_aggressive_skip = 1;
+ sf->rt_sf.thresh_active_maps_skip_lf_cdef = 90;
}
if (speed >= 11) {
sf->rt_sf.skip_lf_screen = 2;
sf->rt_sf.skip_cdef_sb = 2;
sf->rt_sf.part_early_exit_zeromv = 2;
sf->rt_sf.prune_palette_nonrd = 1;
- sf->rt_sf.set_zeromv_skip_based_on_source_sad = 2;
sf->rt_sf.increase_color_thresh_palette = 0;
}
sf->rt_sf.use_nonrd_altref_frame = 0;
@@ -1974,6 +1979,7 @@ static AOM_INLINE void init_gm_sf(GLOBAL_MOTION_SPEED_FEATURES *gm_sf) {
gm_sf->prune_ref_frame_for_gm_search = 0;
gm_sf->prune_zero_mv_with_sse = 0;
gm_sf->disable_gm_search_based_on_stats = 0;
+ gm_sf->downsample_level = 0;
gm_sf->num_refinement_steps = GM_MAX_REFINEMENT_STEPS;
}
@@ -2270,6 +2276,7 @@ static AOM_INLINE void init_rt_sf(REAL_TIME_SPEED_FEATURES *rt_sf) {
rt_sf->part_early_exit_zeromv = 0;
rt_sf->sse_early_term_inter_search = EARLY_TERM_DISABLED;
rt_sf->skip_lf_screen = 0;
+ rt_sf->thresh_active_maps_skip_lf_cdef = 100;
rt_sf->sad_based_adp_altref_lag = 0;
rt_sf->partition_direct_merging = 0;
rt_sf->var_part_based_on_qidx = 0;
diff --git a/third_party/aom/av1/encoder/speed_features.h b/third_party/aom/av1/encoder/speed_features.h
index 60c000e4f4..d59cb38a71 100644
--- a/third_party/aom/av1/encoder/speed_features.h
+++ b/third_party/aom/av1/encoder/speed_features.h
@@ -587,6 +587,9 @@ typedef struct GLOBAL_MOTION_SPEED_FEATURES {
// GF group
int disable_gm_search_based_on_stats;
+ // Downsampling pyramid level to use for global motion estimation
+ int downsample_level;
+
// Number of refinement steps to apply after initial model generation
int num_refinement_steps;
} GLOBAL_MOTION_SPEED_FEATURES;
@@ -1771,6 +1774,10 @@ typedef struct REAL_TIME_SPEED_FEATURES {
// where rc->high_source_sad = 0 (no slide-changes).
int skip_lf_screen;
+ // Threshold on the active/inactive region percent to disable
+ // the loopfilter and cdef. Setting to 100 disables this feature.
+ int thresh_active_maps_skip_lf_cdef;
+
// For nonrd: early exit out of variance partition that sets the
// block size to superblock size, and sets mode to zeromv-last skip.
// 0: disabled
diff --git a/third_party/aom/av1/encoder/superres_scale.c b/third_party/aom/av1/encoder/superres_scale.c
index 3b47909b15..41225d55ae 100644
--- a/third_party/aom/av1/encoder/superres_scale.c
+++ b/third_party/aom/av1/encoder/superres_scale.c
@@ -404,7 +404,7 @@ void av1_superres_post_encode(AV1_COMP *cpi) {
assert(!is_lossless_requested(&cpi->oxcf.rc_cfg));
assert(!cm->features.all_lossless);
- av1_superres_upscale(cm, NULL, cpi->image_pyramid_levels);
+ av1_superres_upscale(cm, NULL, cpi->alloc_pyramid);
// If regular resizing is occurring the source will need to be downscaled to
// match the upscaled superres resolution. Otherwise the original source is
diff --git a/third_party/aom/av1/encoder/svc_layercontext.c b/third_party/aom/av1/encoder/svc_layercontext.c
index 2c99cb89b8..33da3afbd3 100644
--- a/third_party/aom/av1/encoder/svc_layercontext.c
+++ b/third_party/aom/av1/encoder/svc_layercontext.c
@@ -203,8 +203,10 @@ void av1_update_temporal_layer_framerate(AV1_COMP *const cpi) {
}
}
-static AOM_INLINE bool check_ref_is_low_spatial_res_super_frame(
- int ref_frame, const SVC *svc, const RTC_REF *rtc_ref) {
+bool av1_check_ref_is_low_spatial_res_super_frame(AV1_COMP *const cpi,
+ int ref_frame) {
+ SVC *svc = &cpi->svc;
+ RTC_REF *const rtc_ref = &cpi->ppi->rtc_ref;
int ref_frame_idx = rtc_ref->ref_idx[ref_frame - 1];
return rtc_ref->buffer_time_index[ref_frame_idx] == svc->current_superframe &&
rtc_ref->buffer_spatial_layer[ref_frame_idx] <=
@@ -253,13 +255,13 @@ void av1_restore_layer_context(AV1_COMP *const cpi) {
// previous spatial layer(s) at the same time (current_superframe).
if (rtc_ref->set_ref_frame_config && svc->force_zero_mode_spatial_ref &&
cpi->sf.rt_sf.use_nonrd_pick_mode) {
- if (check_ref_is_low_spatial_res_super_frame(LAST_FRAME, svc, rtc_ref)) {
+ if (av1_check_ref_is_low_spatial_res_super_frame(cpi, LAST_FRAME)) {
svc->skip_mvsearch_last = 1;
}
- if (check_ref_is_low_spatial_res_super_frame(GOLDEN_FRAME, svc, rtc_ref)) {
+ if (av1_check_ref_is_low_spatial_res_super_frame(cpi, GOLDEN_FRAME)) {
svc->skip_mvsearch_gf = 1;
}
- if (check_ref_is_low_spatial_res_super_frame(ALTREF_FRAME, svc, rtc_ref)) {
+ if (av1_check_ref_is_low_spatial_res_super_frame(cpi, ALTREF_FRAME)) {
svc->skip_mvsearch_altref = 1;
}
}
diff --git a/third_party/aom/av1/encoder/svc_layercontext.h b/third_party/aom/av1/encoder/svc_layercontext.h
index 93118be2d4..d56ea77791 100644
--- a/third_party/aom/av1/encoder/svc_layercontext.h
+++ b/third_party/aom/av1/encoder/svc_layercontext.h
@@ -223,6 +223,21 @@ void av1_update_layer_context_change_config(struct AV1_COMP *const cpi,
*/
void av1_update_temporal_layer_framerate(struct AV1_COMP *const cpi);
+/*!\brief Prior to check if reference is lower spatial layer at the same
+ * timestamp/superframe.
+ *
+ * \ingroup SVC
+ * \callgraph
+ * \callergraph
+ *
+ * \param[in] cpi Top level encoder structure
+ * \param[in] ref_frame Reference frame
+ *
+ * \return True if the ref_frame if lower spatial layer, otherwise false.
+ */
+bool av1_check_ref_is_low_spatial_res_super_frame(struct AV1_COMP *const cpi,
+ int ref_frame);
+
/*!\brief Prior to encoding the frame, set the layer context, for the current
layer to be encoded, to the cpi struct.
*
diff --git a/third_party/aom/av1/encoder/temporal_filter.c b/third_party/aom/av1/encoder/temporal_filter.c
index 7d4d25de6a..e8cc145030 100644
--- a/third_party/aom/av1/encoder/temporal_filter.c
+++ b/third_party/aom/av1/encoder/temporal_filter.c
@@ -463,12 +463,12 @@ static void tf_build_predictor(const YV12_BUFFER_CONFIG *ref_frame,
// Returns:
// Nothing will be returned. But the content to which `accum` and `pred`
// point will be modified.
-void tf_apply_temporal_filter_self(const YV12_BUFFER_CONFIG *ref_frame,
- const MACROBLOCKD *mbd,
- const BLOCK_SIZE block_size,
- const int mb_row, const int mb_col,
- const int num_planes, uint32_t *accum,
- uint16_t *count) {
+static void tf_apply_temporal_filter_self(const YV12_BUFFER_CONFIG *ref_frame,
+ const MACROBLOCKD *mbd,
+ const BLOCK_SIZE block_size,
+ const int mb_row, const int mb_col,
+ const int num_planes, uint32_t *accum,
+ uint16_t *count) {
// Block information.
const int mb_height = block_size_high[block_size];
const int mb_width = block_size_wide[block_size];
@@ -564,9 +564,10 @@ static INLINE void compute_square_diff(const uint8_t *ref, const int ref_offset,
// Returns:
// Nothing will be returned. But the content to which `luma_sse_sum` points
// will be modified.
-void compute_luma_sq_error_sum(uint32_t *square_diff, uint32_t *luma_sse_sum,
- int block_height, int block_width,
- int ss_x_shift, int ss_y_shift) {
+static void compute_luma_sq_error_sum(uint32_t *square_diff,
+ uint32_t *luma_sse_sum, int block_height,
+ int block_width, int ss_x_shift,
+ int ss_y_shift) {
for (int i = 0; i < block_height; ++i) {
for (int j = 0; j < block_width; ++j) {
for (int ii = 0; ii < (1 << ss_y_shift); ++ii) {
@@ -1456,7 +1457,7 @@ bool av1_tf_info_alloc(TEMPORAL_FILTER_INFO *tf_info, const AV1_COMP *cpi) {
oxcf->frm_dim_cfg.height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0)) {
+ NULL, cpi->alloc_pyramid, 0)) {
return false;
}
}
diff --git a/third_party/aom/av1/encoder/temporal_filter.h b/third_party/aom/av1/encoder/temporal_filter.h
index 6504b91b66..a40fb039b9 100644
--- a/third_party/aom/av1/encoder/temporal_filter.h
+++ b/third_party/aom/av1/encoder/temporal_filter.h
@@ -14,6 +14,8 @@
#include <stdbool.h>
+#include "aom_util/aom_pthread.h"
+
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/third_party/aom/av1/encoder/tpl_model.c b/third_party/aom/av1/encoder/tpl_model.c
index ca60e4981e..86f5485a26 100644
--- a/third_party/aom/av1/encoder/tpl_model.c
+++ b/third_party/aom/av1/encoder/tpl_model.c
@@ -19,6 +19,7 @@
#include "config/aom_scale_rtcd.h"
#include "aom/aom_codec.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/av1_common_int.h"
#include "av1/common/enums.h"
@@ -193,7 +194,7 @@ void av1_setup_tpl_buffers(AV1_PRIMARY *const ppi,
&tpl_data->tpl_rec_pool[frame], width, height,
seq_params->subsampling_x, seq_params->subsampling_y,
seq_params->use_highbitdepth, tpl_data->border_in_pixels,
- byte_alignment, 0, alloc_y_plane_only))
+ byte_alignment, false, alloc_y_plane_only))
aom_internal_error(&ppi->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
diff --git a/third_party/aom/av1/encoder/tpl_model.h b/third_party/aom/av1/encoder/tpl_model.h
index bcd58216c5..0150c702f9 100644
--- a/third_party/aom/av1/encoder/tpl_model.h
+++ b/third_party/aom/av1/encoder/tpl_model.h
@@ -30,6 +30,7 @@ struct TPL_INFO;
#include "config/aom_config.h"
#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/mv.h"
#include "av1/common/scale.h"
diff --git a/third_party/aom/av1/encoder/tune_butteraugli.c b/third_party/aom/av1/encoder/tune_butteraugli.c
index 92fc4b2a92..4381af6a8b 100644
--- a/third_party/aom/av1/encoder/tune_butteraugli.c
+++ b/third_party/aom/av1/encoder/tune_butteraugli.c
@@ -209,7 +209,7 @@ void av1_setup_butteraugli_source(AV1_COMP *cpi) {
if (dst->buffer_alloc_sz == 0) {
aom_alloc_frame_buffer(
dst, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0);
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0);
}
av1_copy_and_extend_frame(cpi->source, dst);
@@ -218,7 +218,7 @@ void av1_setup_butteraugli_source(AV1_COMP *cpi) {
aom_alloc_frame_buffer(
resized_dst, width / resize_factor, height / resize_factor, ss_x, ss_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
}
if (!av1_resize_and_extend_frame_nonnormative(
cpi->source, resized_dst, bit_depth, av1_num_planes(cm))) {
@@ -244,7 +244,7 @@ void av1_setup_butteraugli_rdmult_and_restore_source(AV1_COMP *cpi, double K) {
aom_alloc_frame_buffer(
&resized_recon, width / resize_factor, height / resize_factor, ss_x, ss_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
copy_img(&cpi->common.cur_frame->buf, &resized_recon, width / resize_factor,
height / resize_factor);
@@ -267,12 +267,12 @@ void av1_setup_butteraugli_rdmult(AV1_COMP *cpi) {
cpi->source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_source, &cpi->scaled_source, cm->features.interp_filter,
- 0, false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ 0, false, false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
if (cpi->unscaled_last_source != NULL) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
cm->features.interp_filter, 0, false, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->alloc_pyramid);
}
av1_setup_butteraugli_source(cpi);
diff --git a/third_party/aom/av1/encoder/tune_vmaf.c b/third_party/aom/av1/encoder/tune_vmaf.c
index 4e5ffa387c..91db3db726 100644
--- a/third_party/aom/av1/encoder/tune_vmaf.c
+++ b/third_party/aom/av1/encoder/tune_vmaf.c
@@ -288,10 +288,10 @@ static AOM_INLINE void gaussian_blur(const int bit_depth,
}
}
-static AOM_INLINE double cal_approx_vmaf(const AV1_COMP *const cpi,
- double source_variance,
- YV12_BUFFER_CONFIG *const source,
- YV12_BUFFER_CONFIG *const sharpened) {
+static AOM_INLINE double cal_approx_vmaf(
+ const AV1_COMP *const cpi, double source_variance,
+ const YV12_BUFFER_CONFIG *const source,
+ const YV12_BUFFER_CONFIG *const sharpened) {
const int bit_depth = cpi->td.mb.e_mbd.bd;
const bool cal_vmaf_neg =
cpi->oxcf.tune_cfg.tuning == AOM_TUNE_VMAF_NEG_MAX_GAIN;
@@ -305,11 +305,11 @@ static AOM_INLINE double cal_approx_vmaf(const AV1_COMP *const cpi,
}
static double find_best_frame_unsharp_amount_loop(
- const AV1_COMP *const cpi, YV12_BUFFER_CONFIG *const source,
- YV12_BUFFER_CONFIG *const blurred, YV12_BUFFER_CONFIG *const sharpened,
- double best_vmaf, const double baseline_variance,
- const double unsharp_amount_start, const double step_size,
- const int max_loop_count, const double max_amount) {
+ const AV1_COMP *const cpi, const YV12_BUFFER_CONFIG *const source,
+ const YV12_BUFFER_CONFIG *const blurred,
+ const YV12_BUFFER_CONFIG *const sharpened, double best_vmaf,
+ const double baseline_variance, const double unsharp_amount_start,
+ const double step_size, const int max_loop_count, const double max_amount) {
const double min_amount = 0.0;
int loop_count = 0;
double approx_vmaf = best_vmaf;
@@ -328,13 +328,11 @@ static double find_best_frame_unsharp_amount_loop(
return AOMMIN(max_amount, AOMMAX(unsharp_amount, min_amount));
}
-static double find_best_frame_unsharp_amount(const AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG *const source,
- YV12_BUFFER_CONFIG *const blurred,
- const double unsharp_amount_start,
- const double step_size,
- const int max_loop_count,
- const double max_filter_amount) {
+static double find_best_frame_unsharp_amount(
+ const AV1_COMP *const cpi, const YV12_BUFFER_CONFIG *const source,
+ const YV12_BUFFER_CONFIG *const blurred, const double unsharp_amount_start,
+ const double step_size, const int max_loop_count,
+ const double max_filter_amount) {
const AV1_COMMON *const cm = &cpi->common;
const int width = source->y_width;
const int height = source->y_height;
@@ -343,7 +341,7 @@ static double find_best_frame_unsharp_amount(const AV1_COMP *const cpi,
aom_alloc_frame_buffer(
&sharpened, width, height, source->subsampling_x, source->subsampling_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
const double baseline_variance = frame_average_variance(cpi, source);
double unsharp_amount;
@@ -376,7 +374,7 @@ static double find_best_frame_unsharp_amount(const AV1_COMP *const cpi,
}
void av1_vmaf_neg_preprocessing(AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG *const source) {
+ const YV12_BUFFER_CONFIG *const source) {
const AV1_COMMON *const cm = &cpi->common;
const int bit_depth = cpi->td.mb.e_mbd.bd;
const int width = source->y_width;
@@ -395,7 +393,7 @@ void av1_vmaf_neg_preprocessing(AV1_COMP *const cpi,
aom_alloc_frame_buffer(
&blurred, width, height, source->subsampling_x, source->subsampling_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
gaussian_blur(bit_depth, source, &blurred);
unsharp(cpi, source, &blurred, source, best_frame_unsharp_amount);
@@ -403,7 +401,7 @@ void av1_vmaf_neg_preprocessing(AV1_COMP *const cpi,
}
void av1_vmaf_frame_preprocessing(AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG *const source) {
+ const YV12_BUFFER_CONFIG *const source) {
const AV1_COMMON *const cm = &cpi->common;
const int bit_depth = cpi->td.mb.e_mbd.bd;
const int width = source->y_width;
@@ -415,11 +413,11 @@ void av1_vmaf_frame_preprocessing(AV1_COMP *const cpi,
aom_alloc_frame_buffer(
&source_extended, width, height, source->subsampling_x,
source->subsampling_y, cm->seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0);
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(
&blurred, width, height, source->subsampling_x, source->subsampling_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
av1_copy_and_extend_frame(source, &source_extended);
gaussian_blur(bit_depth, &source_extended, &blurred);
@@ -442,7 +440,7 @@ void av1_vmaf_frame_preprocessing(AV1_COMP *const cpi,
}
void av1_vmaf_blk_preprocessing(AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG *const source) {
+ const YV12_BUFFER_CONFIG *const source) {
const AV1_COMMON *const cm = &cpi->common;
const int width = source->y_width;
const int height = source->y_height;
@@ -455,11 +453,11 @@ void av1_vmaf_blk_preprocessing(AV1_COMP *const cpi,
memset(&source_extended, 0, sizeof(source_extended));
aom_alloc_frame_buffer(
&blurred, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0);
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&source_extended, width, height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
av1_copy_and_extend_frame(source, &source_extended);
gaussian_blur(bit_depth, &source_extended, &blurred);
@@ -495,11 +493,11 @@ void av1_vmaf_blk_preprocessing(AV1_COMP *const cpi,
aom_alloc_frame_buffer(&source_block, block_w, block_h, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&blurred_block, block_w, block_h, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
for (int row = 0; row < num_rows; ++row) {
for (int col = 0; col < num_cols; ++col) {
@@ -622,7 +620,7 @@ void av1_set_mb_vmaf_rdmult_scaling(AV1_COMP *cpi) {
aom_alloc_frame_buffer(
&resized_source, y_width / resize_factor, y_height / resize_factor, ss_x,
ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
if (!av1_resize_and_extend_frame_nonnormative(
cpi->source, &resized_source, bit_depth, av1_num_planes(cm))) {
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
@@ -643,7 +641,7 @@ void av1_set_mb_vmaf_rdmult_scaling(AV1_COMP *cpi) {
aom_alloc_frame_buffer(&blurred, resized_y_width, resized_y_height, ss_x,
ss_y, cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
gaussian_blur(bit_depth, &resized_source, &blurred);
YV12_BUFFER_CONFIG recon;
@@ -651,7 +649,7 @@ void av1_set_mb_vmaf_rdmult_scaling(AV1_COMP *cpi) {
aom_alloc_frame_buffer(&recon, resized_y_width, resized_y_height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_yv12_copy_frame(&resized_source, &recon, 1);
VmafContext *vmaf_context;
@@ -830,15 +828,15 @@ static double calc_vmaf_motion_score(const AV1_COMP *const cpi,
aom_alloc_frame_buffer(&blurred_cur, y_width, y_height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&blurred_last, y_width, y_height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&blurred_next, y_width, y_height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
gaussian_blur(bit_depth, cur, &blurred_cur);
gaussian_blur(bit_depth, last, &blurred_last);
@@ -881,8 +879,8 @@ static double calc_vmaf_motion_score(const AV1_COMP *const cpi,
}
static AOM_INLINE void get_neighbor_frames(const AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG **last,
- YV12_BUFFER_CONFIG **next) {
+ const YV12_BUFFER_CONFIG **last,
+ const YV12_BUFFER_CONFIG **next) {
const AV1_COMMON *const cm = &cpi->common;
const GF_GROUP *gf_group = &cpi->ppi->gf_group;
const int src_index =
@@ -920,7 +918,7 @@ int av1_get_vmaf_base_qindex(const AV1_COMP *const cpi, int current_qindex) {
if (approx_sse < sse_threshold || approx_dvmaf < vmaf_threshold) {
return current_qindex;
}
- YV12_BUFFER_CONFIG *cur_buf = cpi->source;
+ const YV12_BUFFER_CONFIG *cur_buf = cpi->source;
if (cm->show_frame == 0) {
const int src_index = gf_group->arf_src_offset[cpi->gf_frame_index];
struct lookahead_entry *cur_entry = av1_lookahead_peek(
@@ -929,7 +927,7 @@ int av1_get_vmaf_base_qindex(const AV1_COMP *const cpi, int current_qindex) {
}
assert(cur_buf);
- YV12_BUFFER_CONFIG *next_buf, *last_buf;
+ const YV12_BUFFER_CONFIG *next_buf, *last_buf;
get_neighbor_frames(cpi, &last_buf, &next_buf);
assert(last_buf);
@@ -954,8 +952,8 @@ int av1_get_vmaf_base_qindex(const AV1_COMP *const cpi, int current_qindex) {
static AOM_INLINE double cal_approx_score(
AV1_COMP *const cpi, double src_variance, double new_variance,
- double src_score, YV12_BUFFER_CONFIG *const src,
- YV12_BUFFER_CONFIG *const recon_sharpened) {
+ double src_score, const YV12_BUFFER_CONFIG *const src,
+ const YV12_BUFFER_CONFIG *const recon_sharpened) {
double score;
const uint32_t bit_depth = cpi->td.mb.e_mbd.bd;
const bool cal_vmaf_neg =
@@ -967,11 +965,12 @@ static AOM_INLINE double cal_approx_score(
static double find_best_frame_unsharp_amount_loop_neg(
AV1_COMP *const cpi, double src_variance, double base_score,
- YV12_BUFFER_CONFIG *const src, YV12_BUFFER_CONFIG *const recon,
- YV12_BUFFER_CONFIG *const ref, YV12_BUFFER_CONFIG *const src_blurred,
- YV12_BUFFER_CONFIG *const recon_blurred,
- YV12_BUFFER_CONFIG *const src_sharpened,
- YV12_BUFFER_CONFIG *const recon_sharpened, FULLPEL_MV *mvs,
+ const YV12_BUFFER_CONFIG *const src, const YV12_BUFFER_CONFIG *const recon,
+ const YV12_BUFFER_CONFIG *const ref,
+ const YV12_BUFFER_CONFIG *const src_blurred,
+ const YV12_BUFFER_CONFIG *const recon_blurred,
+ const YV12_BUFFER_CONFIG *const src_sharpened,
+ const YV12_BUFFER_CONFIG *const recon_sharpened, FULLPEL_MV *mvs,
double best_score, const double unsharp_amount_start,
const double step_size, const int max_loop_count, const double max_amount) {
const double min_amount = 0.0;
@@ -999,8 +998,8 @@ static double find_best_frame_unsharp_amount_loop_neg(
}
static double find_best_frame_unsharp_amount_neg(
- AV1_COMP *const cpi, YV12_BUFFER_CONFIG *const src,
- YV12_BUFFER_CONFIG *const recon, YV12_BUFFER_CONFIG *const ref,
+ AV1_COMP *const cpi, const YV12_BUFFER_CONFIG *const src,
+ const YV12_BUFFER_CONFIG *const recon, const YV12_BUFFER_CONFIG *const ref,
double base_score, const double unsharp_amount_start,
const double step_size, const int max_loop_count,
const double max_filter_amount) {
@@ -1023,18 +1022,18 @@ static double find_best_frame_unsharp_amount_neg(
aom_alloc_frame_buffer(&recon_sharpened, width, height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&src_sharpened, width, height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&recon_blurred, width, height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(
&src_blurred, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0);
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0);
gaussian_blur(bit_depth, recon, &recon_blurred);
gaussian_blur(bit_depth, src, &src_blurred);
@@ -1076,8 +1075,8 @@ static double find_best_frame_unsharp_amount_neg(
}
void av1_update_vmaf_curve(AV1_COMP *cpi) {
- YV12_BUFFER_CONFIG *source = cpi->source;
- YV12_BUFFER_CONFIG *recon = &cpi->common.cur_frame->buf;
+ const YV12_BUFFER_CONFIG *source = cpi->source;
+ const YV12_BUFFER_CONFIG *recon = &cpi->common.cur_frame->buf;
const int bit_depth = cpi->td.mb.e_mbd.bd;
const GF_GROUP *const gf_group = &cpi->ppi->gf_group;
const int layer_depth =
@@ -1099,7 +1098,7 @@ void av1_update_vmaf_curve(AV1_COMP *cpi) {
}
if (cpi->oxcf.tune_cfg.tuning == AOM_TUNE_VMAF_NEG_MAX_GAIN) {
- YV12_BUFFER_CONFIG *last, *next;
+ const YV12_BUFFER_CONFIG *last, *next;
get_neighbor_frames(cpi, &last, &next);
double best_unsharp_amount_start =
get_layer_value(cpi->vmaf_info.last_frame_unsharp_amount, layer_depth);
diff --git a/third_party/aom/av1/encoder/tune_vmaf.h b/third_party/aom/av1/encoder/tune_vmaf.h
index a04a29e6fe..404fd1029a 100644
--- a/third_party/aom/av1/encoder/tune_vmaf.h
+++ b/third_party/aom/av1/encoder/tune_vmaf.h
@@ -43,13 +43,13 @@ typedef struct {
struct AV1_COMP;
void av1_vmaf_blk_preprocessing(struct AV1_COMP *cpi,
- YV12_BUFFER_CONFIG *source);
+ const YV12_BUFFER_CONFIG *source);
void av1_vmaf_frame_preprocessing(struct AV1_COMP *cpi,
- YV12_BUFFER_CONFIG *source);
+ const YV12_BUFFER_CONFIG *source);
void av1_vmaf_neg_preprocessing(struct AV1_COMP *cpi,
- YV12_BUFFER_CONFIG *source);
+ const YV12_BUFFER_CONFIG *source);
void av1_set_mb_vmaf_rdmult_scaling(struct AV1_COMP *cpi);
diff --git a/third_party/aom/av1/encoder/tx_search.c b/third_party/aom/av1/encoder/tx_search.c
index 7292c01191..5dcc08c0ff 100644
--- a/third_party/aom/av1/encoder/tx_search.c
+++ b/third_party/aom/av1/encoder/tx_search.c
@@ -1109,13 +1109,11 @@ static INLINE void dist_block_tx_domain(MACROBLOCK *x, int plane, int block,
*out_sse = RIGHT_SIGNED_SHIFT(this_sse, shift);
}
-uint16_t prune_txk_type_separ(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
- int block, TX_SIZE tx_size, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, int *txk_map,
- int16_t allowed_tx_mask, int prune_factor,
- const TXB_CTX *const txb_ctx,
- int reduced_tx_set_used, int64_t ref_best_rd,
- int num_sel) {
+static uint16_t prune_txk_type_separ(
+ const AV1_COMP *cpi, MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
+ int blk_row, int blk_col, BLOCK_SIZE plane_bsize, int *txk_map,
+ int16_t allowed_tx_mask, int prune_factor, const TXB_CTX *const txb_ctx,
+ int reduced_tx_set_used, int64_t ref_best_rd, int num_sel) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
@@ -1255,11 +1253,12 @@ uint16_t prune_txk_type_separ(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
return prune;
}
-uint16_t prune_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
- int block, TX_SIZE tx_size, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, int *txk_map,
- uint16_t allowed_tx_mask, int prune_factor,
- const TXB_CTX *const txb_ctx, int reduced_tx_set_used) {
+static uint16_t prune_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
+ int block, TX_SIZE tx_size, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize,
+ int *txk_map, uint16_t allowed_tx_mask,
+ int prune_factor, const TXB_CTX *const txb_ctx,
+ int reduced_tx_set_used) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
int tx_type;
diff --git a/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
index a4def754b0..31cc37db7a 100644
--- a/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
+++ b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
@@ -2638,6 +2638,11 @@ void av1_lowbd_fwd_txfm2d_16x64_sse2(const int16_t *input, int32_t *output,
}
}
+// Include top-level function only for 32-bit x86, to support Valgrind.
+// For normal use, we require SSE4.1, so av1_lowbd_fwd_txfm_sse4_1 will be used
+// instead of this function. However, 32-bit Valgrind does not support SSE4.1,
+// so we include a fallback to SSE2 to improve performance
+#if AOM_ARCH_X86
static FwdTxfm2dFunc fwd_txfm2d_func_ls[TX_SIZES_ALL] = {
av1_lowbd_fwd_txfm2d_4x4_sse2, // 4x4 transform
av1_lowbd_fwd_txfm2d_8x8_sse2, // 8x8 transform
@@ -2671,3 +2676,4 @@ void av1_lowbd_fwd_txfm_sse2(const int16_t *src_diff, tran_low_t *coeff,
fwd_txfm2d_func(src_diff, coeff, diff_stride, txfm_param->tx_type,
txfm_param->bd);
}
+#endif // AOM_ARCH_X86
diff --git a/third_party/aom/av1/encoder/x86/cnn_avx2.c b/third_party/aom/av1/encoder/x86/cnn_avx2.c
index ee93b3d5a0..9c26a56641 100644
--- a/third_party/aom/av1/encoder/x86/cnn_avx2.c
+++ b/third_party/aom/av1/encoder/x86/cnn_avx2.c
@@ -466,7 +466,7 @@ static INLINE void cnn_convolve_no_maxpool_padding_valid_layer2_avx2(
// As per the layer config set by av1_intra_mode_cnn_partition_cnn_config,
// the filter_width and filter_height are equal to 2 for layer >= 1. So
// convolution happens at 2x2 for layer >= 1.
-void cnn_convolve_no_maxpool_padding_valid_2x2_avx2(
+static void cnn_convolve_no_maxpool_padding_valid_2x2_avx2(
const float **input, int in_width, int in_height, int in_stride,
const CNN_LAYER_CONFIG *const layer_config, float **output, int out_stride,
int start_idx, const int cstep, const int channel_step) {
diff --git a/third_party/aom/build/cmake/aom_config_defaults.cmake b/third_party/aom/build/cmake/aom_config_defaults.cmake
index da7de4b0f4..980dfb9327 100644
--- a/third_party/aom/build/cmake/aom_config_defaults.cmake
+++ b/third_party/aom/build/cmake/aom_config_defaults.cmake
@@ -37,6 +37,7 @@ set_aom_detect_var(HAVE_NEON_DOTPROD 0
set_aom_detect_var(HAVE_NEON_I8MM 0
"Enables Armv8.2-A Neon i8mm intrinsics optimizations.")
set_aom_detect_var(HAVE_SVE 0 "Enables Armv8.2-A SVE intrinsics optimizations.")
+set_aom_detect_var(HAVE_SVE2 0 "Enables Armv9-A SVE2 intrinsics optimizations.")
# PPC feature flags.
set_aom_detect_var(HAVE_VSX 0 "Enables VSX optimizations.")
@@ -84,6 +85,9 @@ set_aom_config_var(CONFIG_AV1_TEMPORAL_DENOISING 0
set_aom_config_var(CONFIG_MULTITHREAD 1 "Multithread support.")
set_aom_config_var(CONFIG_OS_SUPPORT 0 "Internal flag.")
set_aom_config_var(CONFIG_PIC 0 "Build with PIC enabled.")
+set_aom_config_var(CONFIG_QUANT_MATRIX 1
+ "Build with quantization matrices for AV1 encoder."
+ "AV1 decoder is always built with quantization matrices.")
set_aom_config_var(CONFIG_REALTIME_ONLY 0
"Build for RTC-only. See aomcx.h for all disabled features.")
set_aom_config_var(CONFIG_RUNTIME_CPU_DETECT 1 "Runtime CPU detection support.")
@@ -209,6 +213,8 @@ set_aom_option_var(
"Enables Armv8.2-A Neon i8mm optimizations on AArch64 targets." ON)
set_aom_option_var(ENABLE_SVE
"Enables Armv8.2-A SVE optimizations on AArch64 targets." ON)
+set_aom_option_var(ENABLE_SVE2
+ "Enables Armv9-A SVE2 optimizations on AArch64 targets." ON)
# VSX intrinsics flags.
set_aom_option_var(ENABLE_VSX "Enables VSX optimizations on PowerPC targets."
diff --git a/third_party/aom/build/cmake/aom_configure.cmake b/third_party/aom/build/cmake/aom_configure.cmake
index 917e7cac5d..304d90d1e1 100644
--- a/third_party/aom/build/cmake/aom_configure.cmake
+++ b/third_party/aom/build/cmake/aom_configure.cmake
@@ -320,6 +320,10 @@ else()
# minimum supported C++ version. If Clang is using this Standard Library
# implementation, it cannot target C++11.
require_cxx_flag_nomsvc("-std=c++14" YES)
+ elseif(CYGWIN AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+ # The GNU C++ compiler in Cygwin needs the -std=gnu++11 flag to make the
+ # POSIX function declarations visible in the Standard C Library headers.
+ require_cxx_flag_nomsvc("-std=gnu++11" YES)
else()
require_cxx_flag_nomsvc("-std=c++11" YES)
endif()
@@ -393,6 +397,13 @@ else()
endif()
add_compiler_flag_if_supported("-D_LARGEFILE_SOURCE")
add_compiler_flag_if_supported("-D_FILE_OFFSET_BITS=64")
+
+ # Do not allow implicit vector type conversions on Clang builds (this is
+ # already the default on GCC builds).
+ if(CMAKE_C_COMPILER_ID MATCHES "Clang")
+ # Clang 8.0.1 (in Cygwin) doesn't support -flax-vector-conversions=none.
+ add_compiler_flag_if_supported("-flax-vector-conversions=none")
+ endif()
endif()
# Prior to r23, or with ANDROID_USE_LEGACY_TOOLCHAIN_FILE set,
diff --git a/third_party/aom/build/cmake/compiler_flags.cmake b/third_party/aom/build/cmake/compiler_flags.cmake
index f008b964f5..3afcd50b5c 100644
--- a/third_party/aom/build/cmake/compiler_flags.cmake
+++ b/third_party/aom/build/cmake/compiler_flags.cmake
@@ -176,11 +176,11 @@ function(require_cxx_flag cxx_flag update_cxx_flags)
endif()
unset(HAVE_CXX_FLAG CACHE)
- message("Checking C compiler flag support for: " ${cxx_flag})
+ message("Checking C++ compiler flag support for: " ${cxx_flag})
check_cxx_compiler_flag("${cxx_flag}" HAVE_CXX_FLAG)
if(NOT HAVE_CXX_FLAG)
message(
- FATAL_ERROR "${PROJECT_NAME} requires support for C flag: ${cxx_flag}.")
+ FATAL_ERROR "${PROJECT_NAME} requires support for C++ flag: ${cxx_flag}.")
endif()
if(NOT "${AOM_EXE_LINKER_FLAGS}" STREQUAL "")
diff --git a/third_party/aom/build/cmake/cpu.cmake b/third_party/aom/build/cmake/cpu.cmake
index a9b7a67070..489dbcbf44 100644
--- a/third_party/aom/build/cmake/cpu.cmake
+++ b/third_party/aom/build/cmake/cpu.cmake
@@ -14,11 +14,12 @@ if("${AOM_TARGET_CPU}" STREQUAL "arm64")
set(AOM_ARCH_AARCH64 1)
set(RTCD_ARCH_ARM "yes")
- set(ARM64_FLAVORS "NEON;ARM_CRC32;NEON_DOTPROD;NEON_I8MM;SVE")
+ set(ARM64_FLAVORS "NEON;ARM_CRC32;NEON_DOTPROD;NEON_I8MM;SVE;SVE2")
set(AOM_ARM_CRC32_DEFAULT_FLAG "-march=armv8-a+crc")
set(AOM_NEON_DOTPROD_DEFAULT_FLAG "-march=armv8.2-a+dotprod")
set(AOM_NEON_I8MM_DEFAULT_FLAG "-march=armv8.2-a+dotprod+i8mm")
set(AOM_SVE_DEFAULT_FLAG "-march=armv8.2-a+dotprod+i8mm+sve")
+ set(AOM_SVE2_DEFAULT_FLAG "-march=armv9-a+sve2") # SVE2 is a v9-only feature
# Check that the compiler flag to enable each flavor is supported by the
# compiler. This may not be the case for new architecture features on old
@@ -26,16 +27,27 @@ if("${AOM_TARGET_CPU}" STREQUAL "arm64")
foreach(flavor ${ARM64_FLAVORS})
if(ENABLE_${flavor} AND NOT DEFINED AOM_${flavor}_FLAG)
set(AOM_${flavor}_FLAG "${AOM_${flavor}_DEFAULT_FLAG}")
+ string(TOLOWER "${flavor}" flavor_lower)
+
+ # Do not use check_c_compiler_flag here since the regex used to match
+ # against stderr does not recognise the "invalid feature modifier" error
+ # produced by certain versions of GCC, leading to the feature being
+ # incorrectly marked as available.
+ set(OLD_CMAKE_REQURED_FLAGS ${CMAKE_REQUIRED_FLAGS})
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AOM_${flavor}_FLAG}")
unset(FLAG_SUPPORTED)
- check_c_compiler_flag("${AOM_${flavor}_FLAG}" FLAG_SUPPORTED)
+ aom_check_source_compiles("arm_feature_flag_${flavor_lower}_available"
+ "static void function(void) {}" FLAG_SUPPORTED)
+ set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQURED_FLAGS})
+
if(NOT ${FLAG_SUPPORTED})
set(ENABLE_${flavor} 0)
endif()
endif()
endforeach()
- # SVE requires that the Neon-SVE bridge header is also available.
- if(ENABLE_SVE)
+ # SVE and SVE2 require that the Neon-SVE bridge header is also available.
+ if(ENABLE_SVE OR ENABLE_SVE2)
set(OLD_CMAKE_REQURED_FLAGS ${CMAKE_REQUIRED_FLAGS})
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AOM_SVE_FLAG}")
aom_check_source_compiles("arm_neon_sve_bridge_available" "
@@ -47,6 +59,7 @@ if("${AOM_TARGET_CPU}" STREQUAL "arm64")
set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQURED_FLAGS})
if(HAVE_SVE_HEADERS EQUAL 0)
set(ENABLE_SVE 0)
+ set(ENABLE_SVE2 0)
endif()
endif()
diff --git a/third_party/aom/build/cmake/rtcd.pl b/third_party/aom/build/cmake/rtcd.pl
index 1cf52f076c..f4a70842d0 100755
--- a/third_party/aom/build/cmake/rtcd.pl
+++ b/third_party/aom/build/cmake/rtcd.pl
@@ -392,7 +392,7 @@ if ($opts{arch} eq 'x86') {
@ALL_ARCHS = filter(qw/neon/);
arm;
} elsif ($opts{arch} eq 'arm64' ) {
- @ALL_ARCHS = filter(qw/neon arm_crc32 neon_dotprod neon_i8mm sve/);
+ @ALL_ARCHS = filter(qw/neon arm_crc32 neon_dotprod neon_i8mm sve sve2/);
@REQUIRES = filter(qw/neon/);
&require(@REQUIRES);
arm;
diff --git a/third_party/aom/doc/dev_guide/av1_encoder.dox b/third_party/aom/doc/dev_guide/av1_encoder.dox
index 0f7e8f87e2..a40b58933b 100644
--- a/third_party/aom/doc/dev_guide/av1_encoder.dox
+++ b/third_party/aom/doc/dev_guide/av1_encoder.dox
@@ -1313,6 +1313,34 @@ Related functions:
All the related functions are listed in \ref coefficient_coding.
+\section architecture_simd SIMD usage
+
+In order to efficiently encode video on modern platforms, it is necessary to
+implement optimized versions of many core encoding and decoding functions using
+architecture-specific SIMD instructions.
+
+Functions which have optimized implementations will have multiple variants
+in the code, each suffixed with the name of the appropriate instruction set.
+There will additionally be an `_c` version, which acts as a reference
+implementation which the SIMD variants can be tested against.
+
+As different machines with the same nominal architecture may support different
+subsets of SIMD instructions, we have dynamic CPU detection logic which chooses
+the appropriate functions to use at run time. This process is handled by
+`build/cmake/rtcd.pl`, with function definitions in the files
+`*_rtcd_defs.pl` elsewhere in the codebase.
+
+Currently SIMD is supported on the following platforms:
+
+- x86: Requires SSE4.1 or above
+
+- Arm: Requires Neon (Armv7-A and above)
+
+We aim to provide implementations of all performance-critical functions which
+are compatible with the instruction sets listed above. Additional SIMD
+extensions (e.g. AVX on x86, SVE on Arm) are also used to provide even
+greater performance where available.
+
*/
/*!\defgroup encoder_algo Encoder Algorithm
diff --git a/third_party/aom/examples/av1_dec_fuzzer.cc b/third_party/aom/examples/av1_dec_fuzzer.cc
index 9b9a0b9cb6..e9388b7062 100644
--- a/third_party/aom/examples/av1_dec_fuzzer.cc
+++ b/third_party/aom/examples/av1_dec_fuzzer.cc
@@ -34,6 +34,14 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
return 0;
}
+ // Abusing the four unused bytes at the end of the IVF file header as a source
+ // of random bits.
+ unsigned int tile_mode = (data[IVF_FILE_HDR_SZ - 1] & 2) != 0;
+ unsigned int ext_tile_debug = (data[IVF_FILE_HDR_SZ - 1] & 4) != 0;
+ unsigned int is_annexb = (data[IVF_FILE_HDR_SZ - 1] & 8) != 0;
+ int output_all_layers = (data[IVF_FILE_HDR_SZ - 1] & 0x10) != 0;
+ int operating_point = data[IVF_FILE_HDR_SZ - 2] & 0x1F;
+
aom_codec_iface_t *codec_interface = aom_codec_av1_dx();
aom_codec_ctx_t codec;
// Set thread count in the range [1, 64].
@@ -42,6 +50,13 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
if (aom_codec_dec_init(&codec, codec_interface, &cfg, 0)) {
return 0;
}
+ AOM_CODEC_CONTROL_TYPECHECKED(&codec, AV1_SET_TILE_MODE, tile_mode);
+ AOM_CODEC_CONTROL_TYPECHECKED(&codec, AV1D_EXT_TILE_DEBUG, ext_tile_debug);
+ AOM_CODEC_CONTROL_TYPECHECKED(&codec, AV1D_SET_IS_ANNEXB, is_annexb);
+ AOM_CODEC_CONTROL_TYPECHECKED(&codec, AV1D_SET_OUTPUT_ALL_LAYERS,
+ output_all_layers);
+ AOM_CODEC_CONTROL_TYPECHECKED(&codec, AV1D_SET_OPERATING_POINT,
+ operating_point);
data += IVF_FILE_HDR_SZ;
size -= IVF_FILE_HDR_SZ;
diff --git a/third_party/aom/examples/svc_encoder_rtc.cc b/third_party/aom/examples/svc_encoder_rtc.cc
index 2c041081e5..c751e9868c 100644
--- a/third_party/aom/examples/svc_encoder_rtc.cc
+++ b/third_party/aom/examples/svc_encoder_rtc.cc
@@ -1442,6 +1442,35 @@ static int qindex_to_quantizer(int qindex) {
return 63;
}
+static void set_active_map(const aom_codec_enc_cfg_t *cfg,
+ aom_codec_ctx_t *codec, int frame_cnt) {
+ aom_active_map_t map = { 0, 0, 0 };
+
+ map.rows = (cfg->g_h + 15) / 16;
+ map.cols = (cfg->g_w + 15) / 16;
+
+ map.active_map = (uint8_t *)malloc(map.rows * map.cols);
+ if (!map.active_map) die("Failed to allocate active map");
+
+ // Example map for testing.
+ for (unsigned int i = 0; i < map.rows; ++i) {
+ for (unsigned int j = 0; j < map.cols; ++j) {
+ int index = map.cols * i + j;
+ map.active_map[index] = 1;
+ if (frame_cnt < 300) {
+ if (i < map.rows / 2 && j < map.cols / 2) map.active_map[index] = 0;
+ } else if (frame_cnt >= 300) {
+ if (i < map.rows / 2 && j >= map.cols / 2) map.active_map[index] = 0;
+ }
+ }
+ }
+
+ if (aom_codec_control(codec, AOME_SET_ACTIVEMAP, &map))
+ die_codec(codec, "Failed to set active map");
+
+ free(map.active_map);
+}
+
int main(int argc, const char **argv) {
AppInput app_input;
AvxVideoWriter *outfile[AOM_MAX_LAYERS] = { NULL };
@@ -1494,6 +1523,9 @@ int main(int argc, const char **argv) {
// Flag to test setting speed per layer.
const int test_speed_per_layer = 0;
+ // Flag for testing active maps.
+ const int test_active_maps = 0;
+
/* Setup default input stream settings */
app_input.input_ctx.framerate.numerator = 30;
app_input.input_ctx.framerate.denominator = 1;
@@ -1874,6 +1906,8 @@ int main(int argc, const char **argv) {
}
}
+ if (test_active_maps) set_active_map(&cfg, &codec, frame_cnt);
+
// Do the layer encode.
aom_usec_timer_start(&timer);
if (aom_codec_encode(&codec, frame_avail ? &raw : NULL, pts, 1, flags))
diff --git a/third_party/aom/libs.doxy_template b/third_party/aom/libs.doxy_template
index ba77751a50..01da81ac0c 100644
--- a/third_party/aom/libs.doxy_template
+++ b/third_party/aom/libs.doxy_template
@@ -1219,15 +1219,6 @@ HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_GAMMA = 80
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to YES can help to show when doxygen was last run and thus if the
-# documentation is up to date.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP = NO
-
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via Javascript. If disabled, the navigation index will
@@ -1509,17 +1500,6 @@ EXT_LINKS_IN_WINDOW = NO
FORMULA_FONTSIZE = 10
-# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT = YES
-
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# https://www.mathjax.org) which uses client side Javascript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
@@ -1820,14 +1800,6 @@ LATEX_HIDE_INDICES = NO
LATEX_BIB_STYLE = plain
-# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
-# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_TIMESTAMP = NO
-
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the
@@ -2167,23 +2139,6 @@ HAVE_DOT = NO
DOT_NUM_THREADS = 0
-# When you want a differently looking font in the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTNAME = Helvetica
-
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTSIZE = 10
-
# By default doxygen will tell dot to use the default font as specified with
# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
# the path where dot can find it using this tag.
@@ -2401,18 +2356,6 @@ DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 0
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT = NO
-
# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
diff --git a/third_party/aom/test/active_map_test.cc b/third_party/aom/test/active_map_test.cc
index 979ee6b8b3..de16541281 100644
--- a/third_party/aom/test/active_map_test.cc
+++ b/third_party/aom/test/active_map_test.cc
@@ -19,8 +19,10 @@
namespace {
+// Params: test mode, speed, aq_mode and screen_content mode.
class ActiveMapTest
- : public ::libaom_test::CodecTestWith2Params<libaom_test::TestMode, int>,
+ : public ::libaom_test::CodecTestWith4Params<libaom_test::TestMode, int,
+ int, int>,
public ::libaom_test::EncoderTest {
protected:
static const int kWidth = 208;
@@ -32,6 +34,8 @@ class ActiveMapTest
void SetUp() override {
InitializeConfig(GET_PARAM(1));
cpu_used_ = GET_PARAM(2);
+ aq_mode_ = GET_PARAM(3);
+ screen_mode_ = GET_PARAM(4);
}
void PreEncodeFrameHook(::libaom_test::VideoSource *video,
@@ -41,6 +45,9 @@ class ActiveMapTest
encoder->Control(AV1E_SET_ALLOW_WARPED_MOTION, 0);
encoder->Control(AV1E_SET_ENABLE_GLOBAL_MOTION, 0);
encoder->Control(AV1E_SET_ENABLE_OBMC, 0);
+ encoder->Control(AV1E_SET_AQ_MODE, aq_mode_);
+ encoder->Control(AV1E_SET_TUNE_CONTENT, screen_mode_);
+ if (screen_mode_) encoder->Control(AV1E_SET_ENABLE_PALETTE, 1);
} else if (video->frame() == 3) {
aom_active_map_t map = aom_active_map_t();
/* clang-format off */
@@ -79,19 +86,22 @@ class ActiveMapTest
cfg_.g_pass = AOM_RC_ONE_PASS;
cfg_.rc_end_usage = AOM_CBR;
cfg_.kf_max_dist = 90000;
- ::libaom_test::I420VideoSource video("hantro_odd.yuv", kWidth, kHeight, 30,
- 1, 0, 20);
+ ::libaom_test::I420VideoSource video("hantro_odd.yuv", kWidth, kHeight, 100,
+ 1, 0, 100);
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
int cpu_used_;
+ int aq_mode_;
+ int screen_mode_;
};
TEST_P(ActiveMapTest, Test) { DoTest(); }
AV1_INSTANTIATE_TEST_SUITE(ActiveMapTest,
::testing::Values(::libaom_test::kRealTime),
- ::testing::Range(5, 9));
+ ::testing::Range(5, 12), ::testing::Values(0, 3),
+ ::testing::Values(0, 1));
} // namespace
diff --git a/third_party/aom/test/aom_image_test.cc b/third_party/aom/test/aom_image_test.cc
index ad48e73e3d..03f4373f35 100644
--- a/third_party/aom/test/aom_image_test.cc
+++ b/third_party/aom/test/aom_image_test.cc
@@ -47,6 +47,16 @@ TEST(AomImageTest, AomImgSetRectOverflow) {
0);
}
+TEST(AomImageTest, AomImgAllocNone) {
+ const int kWidth = 128;
+ const int kHeight = 128;
+
+ aom_image_t img;
+ aom_img_fmt_t format = AOM_IMG_FMT_NONE;
+ unsigned int align = 32;
+ ASSERT_EQ(aom_img_alloc(&img, format, kWidth, kHeight, align), nullptr);
+}
+
TEST(AomImageTest, AomImgAllocNv12) {
const int kWidth = 128;
const int kHeight = 128;
@@ -54,7 +64,7 @@ TEST(AomImageTest, AomImgAllocNv12) {
aom_image_t img;
aom_img_fmt_t format = AOM_IMG_FMT_NV12;
unsigned int align = 32;
- EXPECT_NE(aom_img_alloc(&img, format, kWidth, kHeight, align), nullptr);
+ EXPECT_EQ(aom_img_alloc(&img, format, kWidth, kHeight, align), &img);
EXPECT_EQ(img.stride[AOM_PLANE_U], img.stride[AOM_PLANE_Y]);
EXPECT_EQ(img.stride[AOM_PLANE_V], 0);
EXPECT_EQ(img.planes[AOM_PLANE_V], nullptr);
diff --git a/third_party/aom/test/av1_convolve_test.cc b/third_party/aom/test/av1_convolve_test.cc
index 5bbac21803..b2392276cc 100644
--- a/third_party/aom/test/av1_convolve_test.cc
+++ b/third_party/aom/test/av1_convolve_test.cc
@@ -631,6 +631,11 @@ INSTANTIATE_TEST_SUITE_P(NEON, AV1ConvolveXHighbdTest,
BuildHighbdParams(av1_highbd_convolve_x_sr_neon));
#endif
+#if HAVE_SVE2
+INSTANTIATE_TEST_SUITE_P(SVE2, AV1ConvolveXHighbdTest,
+ BuildHighbdParams(av1_highbd_convolve_x_sr_sve2));
+#endif
+
/////////////////////////////////////////////////////////////////
// Single reference convolve-x IntraBC functions (high bit-depth)
/////////////////////////////////////////////////////////////////
@@ -998,6 +1003,11 @@ INSTANTIATE_TEST_SUITE_P(NEON, AV1ConvolveYHighbdTest,
BuildHighbdParams(av1_highbd_convolve_y_sr_neon));
#endif
+#if HAVE_SVE2
+INSTANTIATE_TEST_SUITE_P(SVE2, AV1ConvolveYHighbdTest,
+ BuildHighbdParams(av1_highbd_convolve_y_sr_sve2));
+#endif
+
/////////////////////////////////////////////////////////////////
// Single reference convolve-y IntraBC functions (high bit-depth)
/////////////////////////////////////////////////////////////////
@@ -1523,6 +1533,11 @@ INSTANTIATE_TEST_SUITE_P(NEON, AV1Convolve2DHighbdTest,
BuildHighbdParams(av1_highbd_convolve_2d_sr_neon));
#endif
+#if HAVE_SVE2
+INSTANTIATE_TEST_SUITE_P(SVE2, AV1Convolve2DHighbdTest,
+ BuildHighbdParams(av1_highbd_convolve_2d_sr_sve2));
+#endif
+
//////////////////////////////////////////////////////////////////
// Single reference convolve-2d IntraBC functions (high bit-depth)
//////////////////////////////////////////////////////////////////
@@ -1943,6 +1958,12 @@ INSTANTIATE_TEST_SUITE_P(
BuildHighbdLumaParams(av1_highbd_dist_wtd_convolve_x_neon));
#endif
+#if HAVE_SVE2
+INSTANTIATE_TEST_SUITE_P(
+ SVE2, AV1ConvolveXHighbdCompoundTest,
+ BuildHighbdLumaParams(av1_highbd_dist_wtd_convolve_x_sve2));
+#endif
+
#endif // CONFIG_AV1_HIGHBITDEPTH
////////////////////////////////////////////////
@@ -2023,6 +2044,12 @@ INSTANTIATE_TEST_SUITE_P(
BuildHighbdLumaParams(av1_highbd_dist_wtd_convolve_y_neon));
#endif
+#if HAVE_SVE2
+INSTANTIATE_TEST_SUITE_P(
+ SVE2, AV1ConvolveYHighbdCompoundTest,
+ BuildHighbdLumaParams(av1_highbd_dist_wtd_convolve_y_sve2));
+#endif
+
#endif // CONFIG_AV1_HIGHBITDEPTH
//////////////////////////////////////////////////////
@@ -2312,11 +2339,6 @@ TEST_P(AV1Convolve2DCompoundTest, RunTest) { RunTest(); }
INSTANTIATE_TEST_SUITE_P(C, AV1Convolve2DCompoundTest,
BuildLowbdLumaParams(av1_dist_wtd_convolve_2d_c));
-#if HAVE_SSE2
-INSTANTIATE_TEST_SUITE_P(SSE2, AV1Convolve2DCompoundTest,
- BuildLowbdLumaParams(av1_dist_wtd_convolve_2d_sse2));
-#endif
-
#if HAVE_SSSE3
INSTANTIATE_TEST_SUITE_P(SSSE3, AV1Convolve2DCompoundTest,
BuildLowbdLumaParams(av1_dist_wtd_convolve_2d_ssse3));
@@ -2442,6 +2464,12 @@ INSTANTIATE_TEST_SUITE_P(
BuildHighbdLumaParams(av1_highbd_dist_wtd_convolve_2d_neon));
#endif
+#if HAVE_SVE2
+INSTANTIATE_TEST_SUITE_P(
+ SVE2, AV1Convolve2DHighbdCompoundTest,
+ BuildHighbdLumaParams(av1_highbd_dist_wtd_convolve_2d_sve2));
+#endif
+
#endif // CONFIG_AV1_HIGHBITDEPTH
} // namespace
diff --git a/third_party/aom/test/av1_fwd_txfm2d_test.cc b/third_party/aom/test/av1_fwd_txfm2d_test.cc
index 2ed5d94db3..4a5a634545 100644
--- a/third_party/aom/test/av1_fwd_txfm2d_test.cc
+++ b/third_party/aom/test/av1_fwd_txfm2d_test.cc
@@ -443,7 +443,7 @@ using ::testing::Combine;
using ::testing::Values;
using ::testing::ValuesIn;
-#if HAVE_SSE2
+#if AOM_ARCH_X86 && HAVE_SSE2
static TX_SIZE fwd_txfm_for_sse2[] = {
TX_4X4,
TX_8X8,
@@ -469,15 +469,14 @@ static TX_SIZE fwd_txfm_for_sse2[] = {
INSTANTIATE_TEST_SUITE_P(SSE2, AV1FwdTxfm2dTest,
Combine(ValuesIn(fwd_txfm_for_sse2),
Values(av1_lowbd_fwd_txfm_sse2)));
-#endif // HAVE_SSE2
+#endif // AOM_ARCH_X86 && HAVE_SSE2
#if HAVE_SSE4_1
-static TX_SIZE fwd_txfm_for_sse41[] = {
- TX_4X4,
- TX_64X64,
- TX_32X64,
- TX_64X32,
-};
+static TX_SIZE fwd_txfm_for_sse41[] = { TX_4X4, TX_8X8, TX_16X16, TX_32X32,
+ TX_64X64, TX_4X8, TX_8X4, TX_8X16,
+ TX_16X8, TX_16X32, TX_32X16, TX_32X64,
+ TX_64X32, TX_4X16, TX_16X4, TX_8X32,
+ TX_32X8, TX_16X64, TX_64X16 };
INSTANTIATE_TEST_SUITE_P(SSE4_1, AV1FwdTxfm2dTest,
Combine(ValuesIn(fwd_txfm_for_sse41),
diff --git a/third_party/aom/test/av1_wedge_utils_test.cc b/third_party/aom/test/av1_wedge_utils_test.cc
index 1055ff35b2..2234561b7d 100644
--- a/third_party/aom/test/av1_wedge_utils_test.cc
+++ b/third_party/aom/test/av1_wedge_utils_test.cc
@@ -408,4 +408,16 @@ INSTANTIATE_TEST_SUITE_P(
av1_wedge_compute_delta_squares_avx2)));
#endif // HAVE_AVX2
+#if HAVE_SVE
+INSTANTIATE_TEST_SUITE_P(
+ SVE, WedgeUtilsSSEOptTest,
+ ::testing::Values(TestFuncsFSSE(av1_wedge_sse_from_residuals_c,
+ av1_wedge_sse_from_residuals_sve)));
+
+INSTANTIATE_TEST_SUITE_P(
+ SVE, WedgeUtilsSignOptTest,
+ ::testing::Values(TestFuncsFSign(av1_wedge_sign_from_residuals_c,
+ av1_wedge_sign_from_residuals_sve)));
+#endif // HAVE_SVE
+
} // namespace
diff --git a/third_party/aom/test/cdef_test.cc b/third_party/aom/test/cdef_test.cc
index ad54407ca7..ac0591f6a8 100644
--- a/third_party/aom/test/cdef_test.cc
+++ b/third_party/aom/test/cdef_test.cc
@@ -614,7 +614,7 @@ TEST_P(CDEFCopyRect16to16Test, TestSIMDNoMismatch) {
using std::make_tuple;
-#if (HAVE_SSE2 || HAVE_SSSE3 || HAVE_SSE4_1 || HAVE_AVX2 || HAVE_NEON)
+#if ((AOM_ARCH_X86 && HAVE_SSSE3) || HAVE_SSE4_1 || HAVE_AVX2 || HAVE_NEON)
static const CdefFilterBlockFunctions kCdefFilterFuncC[] = {
{ &cdef_filter_8_0_c, &cdef_filter_8_1_c, &cdef_filter_8_2_c,
&cdef_filter_8_3_c }
@@ -626,50 +626,7 @@ static const CdefFilterBlockFunctions kCdefFilterHighbdFuncC[] = {
};
#endif
-#if HAVE_SSE2
-static const CdefFilterBlockFunctions kCdefFilterFuncSse2[] = {
- { &cdef_filter_8_0_sse2, &cdef_filter_8_1_sse2, &cdef_filter_8_2_sse2,
- &cdef_filter_8_3_sse2 }
-};
-
-static const CdefFilterBlockFunctions kCdefFilterHighbdFuncSse2[] = {
- { &cdef_filter_16_0_sse2, &cdef_filter_16_1_sse2, &cdef_filter_16_2_sse2,
- &cdef_filter_16_3_sse2 }
-};
-
-INSTANTIATE_TEST_SUITE_P(
- SSE2, CDEFBlockTest,
- ::testing::Combine(::testing::ValuesIn(kCdefFilterFuncSse2),
- ::testing::ValuesIn(kCdefFilterFuncC),
- ::testing::Values(BLOCK_4X4, BLOCK_4X8, BLOCK_8X4,
- BLOCK_8X8),
- ::testing::Range(0, 16), ::testing::Values(8)));
-INSTANTIATE_TEST_SUITE_P(
- SSE2, CDEFBlockHighbdTest,
- ::testing::Combine(::testing::ValuesIn(kCdefFilterHighbdFuncSse2),
- ::testing::ValuesIn(kCdefFilterHighbdFuncC),
- ::testing::Values(BLOCK_4X4, BLOCK_4X8, BLOCK_8X4,
- BLOCK_8X8),
- ::testing::Range(0, 16), ::testing::Range(10, 13, 2)));
-INSTANTIATE_TEST_SUITE_P(SSE2, CDEFFindDirTest,
- ::testing::Values(make_tuple(&cdef_find_dir_sse2,
- &cdef_find_dir_c)));
-INSTANTIATE_TEST_SUITE_P(SSE2, CDEFFindDirDualTest,
- ::testing::Values(make_tuple(&cdef_find_dir_dual_sse2,
- &cdef_find_dir_dual_c)));
-
-INSTANTIATE_TEST_SUITE_P(
- SSE2, CDEFCopyRect8to16Test,
- ::testing::Values(make_tuple(&cdef_copy_rect8_8bit_to_16bit_c,
- &cdef_copy_rect8_8bit_to_16bit_sse2)));
-
-INSTANTIATE_TEST_SUITE_P(
- SSE2, CDEFCopyRect16to16Test,
- ::testing::Values(make_tuple(&cdef_copy_rect8_16bit_to_16bit_c,
- &cdef_copy_rect8_16bit_to_16bit_sse2)));
-#endif
-
-#if HAVE_SSSE3
+#if AOM_ARCH_X86 && HAVE_SSSE3
static const CdefFilterBlockFunctions kCdefFilterFuncSsse3[] = {
{ &cdef_filter_8_0_ssse3, &cdef_filter_8_1_ssse3, &cdef_filter_8_2_ssse3,
&cdef_filter_8_3_ssse3 }
@@ -843,30 +800,7 @@ INSTANTIATE_TEST_SUITE_P(
#endif
// Test speed for all supported architectures
-#if HAVE_SSE2
-INSTANTIATE_TEST_SUITE_P(
- SSE2, CDEFSpeedTest,
- ::testing::Combine(::testing::ValuesIn(kCdefFilterFuncSse2),
- ::testing::ValuesIn(kCdefFilterFuncC),
- ::testing::Values(BLOCK_4X4, BLOCK_4X8, BLOCK_8X4,
- BLOCK_8X8),
- ::testing::Range(0, 16), ::testing::Values(8)));
-INSTANTIATE_TEST_SUITE_P(
- SSE2, CDEFSpeedHighbdTest,
- ::testing::Combine(::testing::ValuesIn(kCdefFilterHighbdFuncSse2),
- ::testing::ValuesIn(kCdefFilterHighbdFuncC),
- ::testing::Values(BLOCK_4X4, BLOCK_4X8, BLOCK_8X4,
- BLOCK_8X8),
- ::testing::Range(0, 16), ::testing::Values(10)));
-INSTANTIATE_TEST_SUITE_P(SSE2, CDEFFindDirSpeedTest,
- ::testing::Values(make_tuple(&cdef_find_dir_sse2,
- &cdef_find_dir_c)));
-INSTANTIATE_TEST_SUITE_P(SSE2, CDEFFindDirDualSpeedTest,
- ::testing::Values(make_tuple(&cdef_find_dir_dual_sse2,
- &cdef_find_dir_dual_c)));
-#endif
-
-#if HAVE_SSSE3
+#if AOM_ARCH_X86 && HAVE_SSSE3
INSTANTIATE_TEST_SUITE_P(
SSSE3, CDEFSpeedTest,
::testing::Combine(::testing::ValuesIn(kCdefFilterFuncSsse3),
diff --git a/third_party/aom/test/convolve_test.cc b/third_party/aom/test/convolve_test.cc
index c97f814057..cab590927b 100644
--- a/third_party/aom/test/convolve_test.cc
+++ b/third_party/aom/test/convolve_test.cc
@@ -773,6 +773,17 @@ WRAP(convolve8_vert_neon, 10)
WRAP(convolve8_horiz_neon, 12)
WRAP(convolve8_vert_neon, 12)
#endif // HAVE_NEON
+
+#if HAVE_SVE
+WRAP(convolve8_horiz_sve, 8)
+WRAP(convolve8_vert_sve, 8)
+
+WRAP(convolve8_horiz_sve, 10)
+WRAP(convolve8_vert_sve, 10)
+
+WRAP(convolve8_horiz_sve, 12)
+WRAP(convolve8_vert_sve, 12)
+#endif // HAVE_SVE
#endif // CONFIG_AV1_HIGHBITDEPTH
#undef WRAP
@@ -832,12 +843,6 @@ const ConvolveParam kArrayHighbdConvolve_sse2[] = {
INSTANTIATE_TEST_SUITE_P(SSE2, HighbdConvolveTest,
::testing::ValuesIn(kArrayHighbdConvolve_sse2));
#endif
-const ConvolveFunctions convolve8_sse2(aom_convolve8_horiz_sse2,
- aom_convolve8_vert_sse2, 0);
-const ConvolveParam kArrayConvolve_sse2[] = { ALL_SIZES(convolve8_sse2) };
-
-INSTANTIATE_TEST_SUITE_P(SSE2, LowbdConvolveTest,
- ::testing::ValuesIn(kArrayConvolve_sse2));
#endif
#if HAVE_SSSE3
@@ -919,4 +924,22 @@ INSTANTIATE_TEST_SUITE_P(NEON_I8MM, LowbdConvolveTest,
::testing::ValuesIn(kArray_Convolve8_neon_i8mm));
#endif // HAVE_NEON_I8MM
+#if HAVE_SVE
+#if CONFIG_AV1_HIGHBITDEPTH
+const ConvolveFunctions wrap_convolve8_sve(wrap_convolve8_horiz_sve_8,
+ wrap_convolve8_vert_sve_8, 8);
+const ConvolveFunctions wrap_convolve10_sve(wrap_convolve8_horiz_sve_10,
+ wrap_convolve8_vert_sve_10, 10);
+const ConvolveFunctions wrap_convolve12_sve(wrap_convolve8_horiz_sve_12,
+ wrap_convolve8_vert_sve_12, 12);
+const ConvolveParam kArray_HighbdConvolve8_sve[] = {
+ ALL_SIZES_64(wrap_convolve8_sve), ALL_SIZES_64(wrap_convolve10_sve),
+ ALL_SIZES_64(wrap_convolve12_sve)
+};
+
+INSTANTIATE_TEST_SUITE_P(SVE, HighbdConvolveTest,
+ ::testing::ValuesIn(kArray_HighbdConvolve8_sve));
+#endif
+#endif // HAVE_SVE
+
} // namespace
diff --git a/third_party/aom/test/corner_match_test.cc b/third_party/aom/test/corner_match_test.cc
index 9733732180..895c8ad7d3 100644
--- a/third_party/aom/test/corner_match_test.cc
+++ b/third_party/aom/test/corner_match_test.cc
@@ -27,13 +27,19 @@ namespace AV1CornerMatch {
using libaom_test::ACMRandom;
-typedef double (*ComputeCrossCorrFunc)(const unsigned char *im1, int stride1,
- int x1, int y1, const unsigned char *im2,
- int stride2, int x2, int y2);
+typedef bool (*ComputeMeanStddevFunc)(const unsigned char *frame, int stride,
+ int x, int y, double *mean,
+ double *one_over_stddev);
+typedef double (*ComputeCorrFunc)(const unsigned char *frame1, int stride1,
+ int x1, int y1, double mean1,
+ double one_over_stddev1,
+ const unsigned char *frame2, int stride2,
+ int x2, int y2, double mean2,
+ double one_over_stddev2);
using std::make_tuple;
using std::tuple;
-typedef tuple<int, ComputeCrossCorrFunc> CornerMatchParam;
+typedef tuple<int, ComputeMeanStddevFunc, ComputeCorrFunc> CornerMatchParam;
class AV1CornerMatchTest : public ::testing::TestWithParam<CornerMatchParam> {
public:
@@ -41,8 +47,11 @@ class AV1CornerMatchTest : public ::testing::TestWithParam<CornerMatchParam> {
void SetUp() override;
protected:
- void RunCheckOutput(int run_times);
- ComputeCrossCorrFunc target_func;
+ void GenerateInput(uint8_t *input1, uint8_t *input2, int w, int h, int mode);
+ void RunCheckOutput();
+ void RunSpeedTest();
+ ComputeMeanStddevFunc target_compute_mean_stddev_func;
+ ComputeCorrFunc target_compute_corr_func;
libaom_test::ACMRandom rnd_;
};
@@ -51,14 +60,31 @@ GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(AV1CornerMatchTest);
AV1CornerMatchTest::~AV1CornerMatchTest() = default;
void AV1CornerMatchTest::SetUp() {
rnd_.Reset(ACMRandom::DeterministicSeed());
- target_func = GET_PARAM(1);
+ target_compute_mean_stddev_func = GET_PARAM(1);
+ target_compute_corr_func = GET_PARAM(2);
}
-void AV1CornerMatchTest::RunCheckOutput(int run_times) {
+void AV1CornerMatchTest::GenerateInput(uint8_t *input1, uint8_t *input2, int w,
+ int h, int mode) {
+ if (mode == 0) {
+ for (int i = 0; i < h; ++i)
+ for (int j = 0; j < w; ++j) {
+ input1[i * w + j] = rnd_.Rand8();
+ input2[i * w + j] = rnd_.Rand8();
+ }
+ } else if (mode == 1) {
+ for (int i = 0; i < h; ++i)
+ for (int j = 0; j < w; ++j) {
+ int v = rnd_.Rand8();
+ input1[i * w + j] = v;
+ input2[i * w + j] = (v / 2) + (rnd_.Rand8() & 15);
+ }
+ }
+}
+
+void AV1CornerMatchTest::RunCheckOutput() {
const int w = 128, h = 128;
- const int num_iters = 10000;
- int i, j;
- aom_usec_timer ref_timer, test_timer;
+ const int num_iters = 1000;
std::unique_ptr<uint8_t[]> input1(new (std::nothrow) uint8_t[w * h]);
std::unique_ptr<uint8_t[]> input2(new (std::nothrow) uint8_t[w * h]);
@@ -69,76 +95,139 @@ void AV1CornerMatchTest::RunCheckOutput(int run_times) {
// i) Random data, should have correlation close to 0
// ii) Linearly related data + noise, should have correlation close to 1
int mode = GET_PARAM(0);
- if (mode == 0) {
- for (i = 0; i < h; ++i)
- for (j = 0; j < w; ++j) {
- input1[i * w + j] = rnd_.Rand8();
- input2[i * w + j] = rnd_.Rand8();
- }
- } else if (mode == 1) {
- for (i = 0; i < h; ++i)
- for (j = 0; j < w; ++j) {
- int v = rnd_.Rand8();
- input1[i * w + j] = v;
- input2[i * w + j] = (v / 2) + (rnd_.Rand8() & 15);
- }
+ GenerateInput(&input1[0], &input2[0], w, h, mode);
+
+ for (int i = 0; i < num_iters; ++i) {
+ int x1 = MATCH_SZ_BY2 + rnd_.PseudoUniform(w + 1 - MATCH_SZ);
+ int y1 = MATCH_SZ_BY2 + rnd_.PseudoUniform(h + 1 - MATCH_SZ);
+ int x2 = MATCH_SZ_BY2 + rnd_.PseudoUniform(w + 1 - MATCH_SZ);
+ int y2 = MATCH_SZ_BY2 + rnd_.PseudoUniform(h + 1 - MATCH_SZ);
+
+ double c_mean1, c_one_over_stddev1, c_mean2, c_one_over_stddev2;
+ bool c_valid1 = aom_compute_mean_stddev_c(input1.get(), w, x1, y1, &c_mean1,
+ &c_one_over_stddev1);
+ bool c_valid2 = aom_compute_mean_stddev_c(input2.get(), w, x2, y2, &c_mean2,
+ &c_one_over_stddev2);
+
+ double simd_mean1, simd_one_over_stddev1, simd_mean2, simd_one_over_stddev2;
+ bool simd_valid1 = target_compute_mean_stddev_func(
+ input1.get(), w, x1, y1, &simd_mean1, &simd_one_over_stddev1);
+ bool simd_valid2 = target_compute_mean_stddev_func(
+ input2.get(), w, x2, y2, &simd_mean2, &simd_one_over_stddev2);
+
+ // Run the correlation calculation even if one of the "valid" flags is
+ // false, i.e. if one of the patches doesn't have enough variance. This is
+ // safe because any potential division by 0 is caught in
+ // aom_compute_mean_stddev(), and one_over_stddev is set to 0 instead.
+ // This causes aom_compute_correlation() to return 0, without causing a
+ // division by 0.
+ const double c_corr = aom_compute_correlation_c(
+ input1.get(), w, x1, y1, c_mean1, c_one_over_stddev1, input2.get(), w,
+ x2, y2, c_mean2, c_one_over_stddev2);
+ const double simd_corr = target_compute_corr_func(
+ input1.get(), w, x1, y1, c_mean1, c_one_over_stddev1, input2.get(), w,
+ x2, y2, c_mean2, c_one_over_stddev2);
+
+ ASSERT_EQ(simd_valid1, c_valid1);
+ ASSERT_EQ(simd_valid2, c_valid2);
+ ASSERT_EQ(simd_mean1, c_mean1);
+ ASSERT_EQ(simd_one_over_stddev1, c_one_over_stddev1);
+ ASSERT_EQ(simd_mean2, c_mean2);
+ ASSERT_EQ(simd_one_over_stddev2, c_one_over_stddev2);
+ ASSERT_EQ(simd_corr, c_corr);
}
+}
- for (i = 0; i < num_iters; ++i) {
- int x1 = MATCH_SZ_BY2 + rnd_.PseudoUniform(w - 2 * MATCH_SZ_BY2);
- int y1 = MATCH_SZ_BY2 + rnd_.PseudoUniform(h - 2 * MATCH_SZ_BY2);
- int x2 = MATCH_SZ_BY2 + rnd_.PseudoUniform(w - 2 * MATCH_SZ_BY2);
- int y2 = MATCH_SZ_BY2 + rnd_.PseudoUniform(h - 2 * MATCH_SZ_BY2);
-
- double res_c = av1_compute_cross_correlation_c(input1.get(), w, x1, y1,
- input2.get(), w, x2, y2);
- double res_simd =
- target_func(input1.get(), w, x1, y1, input2.get(), w, x2, y2);
-
- if (run_times > 1) {
- aom_usec_timer_start(&ref_timer);
- for (j = 0; j < run_times; j++) {
- av1_compute_cross_correlation_c(input1.get(), w, x1, y1, input2.get(),
- w, x2, y2);
- }
- aom_usec_timer_mark(&ref_timer);
- const int elapsed_time_c =
- static_cast<int>(aom_usec_timer_elapsed(&ref_timer));
+void AV1CornerMatchTest::RunSpeedTest() {
+ const int w = 16, h = 16;
+ const int num_iters = 1000000;
+ aom_usec_timer ref_timer, test_timer;
- aom_usec_timer_start(&test_timer);
- for (j = 0; j < run_times; j++) {
- target_func(input1.get(), w, x1, y1, input2.get(), w, x2, y2);
- }
- aom_usec_timer_mark(&test_timer);
- const int elapsed_time_simd =
- static_cast<int>(aom_usec_timer_elapsed(&test_timer));
-
- printf(
- "c_time=%d \t simd_time=%d \t "
- "gain=%d\n",
- elapsed_time_c, elapsed_time_simd,
- (elapsed_time_c / elapsed_time_simd));
- } else {
- ASSERT_EQ(res_simd, res_c);
- }
+ std::unique_ptr<uint8_t[]> input1(new (std::nothrow) uint8_t[w * h]);
+ std::unique_ptr<uint8_t[]> input2(new (std::nothrow) uint8_t[w * h]);
+ ASSERT_NE(input1, nullptr);
+ ASSERT_NE(input2, nullptr);
+
+ // Test the two extreme cases:
+ // i) Random data, should have correlation close to 0
+ // ii) Linearly related data + noise, should have correlation close to 1
+ int mode = GET_PARAM(0);
+ GenerateInput(&input1[0], &input2[0], w, h, mode);
+
+ // Time aom_compute_mean_stddev()
+ double c_mean1, c_one_over_stddev1, c_mean2, c_one_over_stddev2;
+ aom_usec_timer_start(&ref_timer);
+ for (int i = 0; i < num_iters; i++) {
+ aom_compute_mean_stddev_c(input1.get(), w, 0, 0, &c_mean1,
+ &c_one_over_stddev1);
+ aom_compute_mean_stddev_c(input2.get(), w, 0, 0, &c_mean2,
+ &c_one_over_stddev2);
+ }
+ aom_usec_timer_mark(&ref_timer);
+ int elapsed_time_c = static_cast<int>(aom_usec_timer_elapsed(&ref_timer));
+
+ double simd_mean1, simd_one_over_stddev1, simd_mean2, simd_one_over_stddev2;
+ aom_usec_timer_start(&test_timer);
+ for (int i = 0; i < num_iters; i++) {
+ target_compute_mean_stddev_func(input1.get(), w, 0, 0, &simd_mean1,
+ &simd_one_over_stddev1);
+ target_compute_mean_stddev_func(input2.get(), w, 0, 0, &simd_mean2,
+ &simd_one_over_stddev2);
+ }
+ aom_usec_timer_mark(&test_timer);
+ int elapsed_time_simd = static_cast<int>(aom_usec_timer_elapsed(&test_timer));
+
+ printf(
+ "aom_compute_mean_stddev(): c_time=%6d simd_time=%6d "
+ "gain=%.3f\n",
+ elapsed_time_c, elapsed_time_simd,
+ (elapsed_time_c / (double)elapsed_time_simd));
+
+ // Time aom_compute_correlation
+ aom_usec_timer_start(&ref_timer);
+ for (int i = 0; i < num_iters; i++) {
+ aom_compute_correlation_c(input1.get(), w, 0, 0, c_mean1,
+ c_one_over_stddev1, input2.get(), w, 0, 0,
+ c_mean2, c_one_over_stddev2);
+ }
+ aom_usec_timer_mark(&ref_timer);
+ elapsed_time_c = static_cast<int>(aom_usec_timer_elapsed(&ref_timer));
+
+ aom_usec_timer_start(&test_timer);
+ for (int i = 0; i < num_iters; i++) {
+ target_compute_corr_func(input1.get(), w, 0, 0, c_mean1, c_one_over_stddev1,
+ input2.get(), w, 0, 0, c_mean2,
+ c_one_over_stddev2);
}
+ aom_usec_timer_mark(&test_timer);
+ elapsed_time_simd = static_cast<int>(aom_usec_timer_elapsed(&test_timer));
+
+ printf(
+ "aom_compute_correlation(): c_time=%6d simd_time=%6d "
+ "gain=%.3f\n",
+ elapsed_time_c, elapsed_time_simd,
+ (elapsed_time_c / (double)elapsed_time_simd));
}
-TEST_P(AV1CornerMatchTest, CheckOutput) { RunCheckOutput(1); }
-TEST_P(AV1CornerMatchTest, DISABLED_Speed) { RunCheckOutput(100000); }
+TEST_P(AV1CornerMatchTest, CheckOutput) { RunCheckOutput(); }
+TEST_P(AV1CornerMatchTest, DISABLED_Speed) { RunSpeedTest(); }
#if HAVE_SSE4_1
INSTANTIATE_TEST_SUITE_P(
SSE4_1, AV1CornerMatchTest,
- ::testing::Values(make_tuple(0, &av1_compute_cross_correlation_sse4_1),
- make_tuple(1, &av1_compute_cross_correlation_sse4_1)));
+ ::testing::Values(make_tuple(0, &aom_compute_mean_stddev_sse4_1,
+ &aom_compute_correlation_sse4_1),
+ make_tuple(1, &aom_compute_mean_stddev_sse4_1,
+ &aom_compute_correlation_sse4_1)));
#endif
#if HAVE_AVX2
INSTANTIATE_TEST_SUITE_P(
AVX2, AV1CornerMatchTest,
- ::testing::Values(make_tuple(0, &av1_compute_cross_correlation_avx2),
- make_tuple(1, &av1_compute_cross_correlation_avx2)));
+ ::testing::Values(make_tuple(0, &aom_compute_mean_stddev_avx2,
+ &aom_compute_correlation_avx2),
+ make_tuple(1, &aom_compute_mean_stddev_avx2,
+ &aom_compute_correlation_avx2)));
#endif
} // namespace AV1CornerMatch
diff --git a/third_party/aom/test/disflow_test.cc b/third_party/aom/test/disflow_test.cc
index 124c9a96c7..4f004480e2 100644
--- a/third_party/aom/test/disflow_test.cc
+++ b/third_party/aom/test/disflow_test.cc
@@ -114,6 +114,11 @@ INSTANTIATE_TEST_SUITE_P(SSE4_1, ComputeFlowTest,
::testing::Values(aom_compute_flow_at_point_sse4_1));
#endif
+#if HAVE_AVX2
+INSTANTIATE_TEST_SUITE_P(AVX2, ComputeFlowTest,
+ ::testing::Values(aom_compute_flow_at_point_avx2));
+#endif
+
#if HAVE_NEON
INSTANTIATE_TEST_SUITE_P(NEON, ComputeFlowTest,
::testing::Values(aom_compute_flow_at_point_neon));
diff --git a/third_party/aom/test/encode_api_test.cc b/third_party/aom/test/encode_api_test.cc
index 605743f9be..a7d5b3aa3c 100644
--- a/third_party/aom/test/encode_api_test.cc
+++ b/third_party/aom/test/encode_api_test.cc
@@ -10,6 +10,8 @@
*/
#include <cassert>
+#include <climits>
+#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <tuple>
@@ -556,6 +558,83 @@ TEST(EncodeAPI, Buganizer310457427) {
encoder.Encode(false);
}
+TEST(EncodeAPI, PtsSmallerThanInitialPts) {
+ // Initialize libaom encoder.
+ aom_codec_iface_t *const iface = aom_codec_av1_cx();
+ aom_codec_ctx_t enc;
+ aom_codec_enc_cfg_t cfg;
+
+ ASSERT_EQ(aom_codec_enc_config_default(iface, &cfg, AOM_USAGE_REALTIME),
+ AOM_CODEC_OK);
+
+ cfg.g_w = 1280;
+ cfg.g_h = 720;
+ cfg.rc_target_bitrate = 1000;
+
+ ASSERT_EQ(aom_codec_enc_init(&enc, iface, &cfg, 0), AOM_CODEC_OK);
+
+ // Create input image.
+ aom_image_t *const image =
+ CreateGrayImage(AOM_IMG_FMT_I420, cfg.g_w, cfg.g_h);
+ ASSERT_NE(image, nullptr);
+
+ // Encode frame.
+ ASSERT_EQ(aom_codec_encode(&enc, image, 12, 1, 0), AOM_CODEC_OK);
+ ASSERT_EQ(aom_codec_encode(&enc, image, 13, 1, 0), AOM_CODEC_OK);
+ // pts (10) is smaller than the initial pts (12).
+ ASSERT_EQ(aom_codec_encode(&enc, image, 10, 1, 0), AOM_CODEC_INVALID_PARAM);
+
+ // Free resources.
+ aom_img_free(image);
+ aom_codec_destroy(&enc);
+}
+
+TEST(EncodeAPI, PtsOrDurationTooBig) {
+ // Initialize libaom encoder.
+ aom_codec_iface_t *const iface = aom_codec_av1_cx();
+ aom_codec_ctx_t enc;
+ aom_codec_enc_cfg_t cfg;
+
+ ASSERT_EQ(aom_codec_enc_config_default(iface, &cfg, AOM_USAGE_REALTIME),
+ AOM_CODEC_OK);
+
+ cfg.g_w = 1280;
+ cfg.g_h = 720;
+ cfg.rc_target_bitrate = 1000;
+
+ ASSERT_EQ(aom_codec_enc_init(&enc, iface, &cfg, 0), AOM_CODEC_OK);
+
+ // Create input image.
+ aom_image_t *const image =
+ CreateGrayImage(AOM_IMG_FMT_I420, cfg.g_w, cfg.g_h);
+ ASSERT_NE(image, nullptr);
+
+ // Encode frame.
+ ASSERT_EQ(aom_codec_encode(&enc, image, 0, 1, 0), AOM_CODEC_OK);
+ // pts, when converted to ticks, is too big.
+ ASSERT_EQ(aom_codec_encode(&enc, image, INT64_MAX / 1000000 + 1, 1, 0),
+ AOM_CODEC_INVALID_PARAM);
+#if ULONG_MAX > INT64_MAX
+ // duration is too big.
+ ASSERT_EQ(aom_codec_encode(&enc, image, 0, (1ul << 63), 0),
+ AOM_CODEC_INVALID_PARAM);
+ // pts + duration is too big.
+ ASSERT_EQ(aom_codec_encode(&enc, image, 1, INT64_MAX, 0),
+ AOM_CODEC_INVALID_PARAM);
+#endif
+ // pts + duration, when converted to ticks, is too big.
+#if ULONG_MAX > INT64_MAX
+ ASSERT_EQ(aom_codec_encode(&enc, image, 0, 0x1c0a0a1a3232, 0),
+ AOM_CODEC_INVALID_PARAM);
+#endif
+ ASSERT_EQ(aom_codec_encode(&enc, image, INT64_MAX / 1000000, 1, 0),
+ AOM_CODEC_INVALID_PARAM);
+
+ // Free resources.
+ aom_img_free(image);
+ aom_codec_destroy(&enc);
+}
+
class EncodeAPIParameterized
: public testing::TestWithParam<std::tuple<
/*usage=*/unsigned int, /*speed=*/int, /*aq_mode=*/unsigned int>> {};
diff --git a/third_party/aom/test/hbd_metrics_test.cc b/third_party/aom/test/hbd_metrics_test.cc
index 303d580c4a..71c816f1cc 100644
--- a/third_party/aom/test/hbd_metrics_test.cc
+++ b/third_party/aom/test/hbd_metrics_test.cc
@@ -112,10 +112,10 @@ class HBDMetricsTestBase {
memset(&hbd_src, 0, sizeof(hbd_src));
memset(&hbd_dst, 0, sizeof(hbd_dst));
- aom_alloc_frame_buffer(&lbd_src, width, height, 1, 1, 0, 32, 16, 0, 0);
- aom_alloc_frame_buffer(&lbd_dst, width, height, 1, 1, 0, 32, 16, 0, 0);
- aom_alloc_frame_buffer(&hbd_src, width, height, 1, 1, 1, 32, 16, 0, 0);
- aom_alloc_frame_buffer(&hbd_dst, width, height, 1, 1, 1, 32, 16, 0, 0);
+ aom_alloc_frame_buffer(&lbd_src, width, height, 1, 1, 0, 32, 16, false, 0);
+ aom_alloc_frame_buffer(&lbd_dst, width, height, 1, 1, 0, 32, 16, false, 0);
+ aom_alloc_frame_buffer(&hbd_src, width, height, 1, 1, 1, 32, 16, false, 0);
+ aom_alloc_frame_buffer(&hbd_dst, width, height, 1, 1, 1, 32, 16, false, 0);
memset(lbd_src.buffer_alloc, kPixFiller, lbd_src.buffer_alloc_sz);
while (i < lbd_src.buffer_alloc_sz) {
diff --git a/third_party/aom/test/level_test.cc b/third_party/aom/test/level_test.cc
index a7c26d2305..6d59f45272 100644
--- a/third_party/aom/test/level_test.cc
+++ b/third_party/aom/test/level_test.cc
@@ -135,12 +135,12 @@ TEST_P(LevelTest, TestLevelMonitoringLowBitrate) {
// To save run time, we only test speed 4.
if (cpu_used_ == 4) {
libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
- 30, 1, 0, 40);
+ 30, 1, 0, 30);
target_level_ = kLevelKeepStats;
cfg_.rc_target_bitrate = 1000;
- cfg_.g_limit = 40;
+ cfg_.g_limit = 30;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
- ASSERT_EQ(level_[0], 0);
+ ASSERT_LE(level_[0], 0);
}
}
@@ -148,12 +148,12 @@ TEST_P(LevelTest, TestLevelMonitoringHighBitrate) {
// To save run time, we only test speed 4.
if (cpu_used_ == 4) {
libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
- 30, 1, 0, 40);
+ 30, 1, 0, 30);
target_level_ = kLevelKeepStats;
cfg_.rc_target_bitrate = 4000;
- cfg_.g_limit = 40;
+ cfg_.g_limit = 30;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
- ASSERT_EQ(level_[0], 4);
+ ASSERT_LE(level_[0], 4);
}
}
@@ -166,7 +166,7 @@ TEST_P(LevelTest, TestTargetLevel0) {
target_level_ = target_level;
cfg_.rc_target_bitrate = 4000;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
- ASSERT_EQ(level_[0], target_level);
+ ASSERT_LE(level_[0], target_level);
}
}
diff --git a/third_party/aom/test/quantize_func_test.cc b/third_party/aom/test/quantize_func_test.cc
index 328d5b10df..61f26ea57f 100644
--- a/third_party/aom/test/quantize_func_test.cc
+++ b/third_party/aom/test/quantize_func_test.cc
@@ -19,6 +19,7 @@
#include "config/av1_rtcd.h"
#include "aom/aom_codec.h"
+#include "aom_dsp/txfm_common.h"
#include "aom_ports/aom_timer.h"
#include "av1/encoder/encoder.h"
#include "av1/common/scan.h"
@@ -482,9 +483,9 @@ const QuantizeParam<LPQuantizeFunc> kLPQParamArrayAvx2[] = {
make_tuple(&av1_quantize_lp_c, &av1_quantize_lp_avx2,
static_cast<TX_SIZE>(TX_16X16), TYPE_FP, AOM_BITS_8),
make_tuple(&av1_quantize_lp_c, &av1_quantize_lp_avx2,
- static_cast<TX_SIZE>(TX_32X32), TYPE_FP, AOM_BITS_8),
+ static_cast<TX_SIZE>(TX_8X8), TYPE_FP, AOM_BITS_8),
make_tuple(&av1_quantize_lp_c, &av1_quantize_lp_avx2,
- static_cast<TX_SIZE>(TX_64X64), TYPE_FP, AOM_BITS_8)
+ static_cast<TX_SIZE>(TX_4X4), TYPE_FP, AOM_BITS_8)
};
INSTANTIATE_TEST_SUITE_P(AVX2, LowPrecisionQuantizeTest,
@@ -704,9 +705,9 @@ const QuantizeParam<LPQuantizeFunc> kLPQParamArrayNEON[] = {
make_tuple(av1_quantize_lp_c, av1_quantize_lp_neon,
static_cast<TX_SIZE>(TX_16X16), TYPE_FP, AOM_BITS_8),
make_tuple(av1_quantize_lp_c, av1_quantize_lp_neon,
- static_cast<TX_SIZE>(TX_32X32), TYPE_FP, AOM_BITS_8),
+ static_cast<TX_SIZE>(TX_8X8), TYPE_FP, AOM_BITS_8),
make_tuple(av1_quantize_lp_c, av1_quantize_lp_neon,
- static_cast<TX_SIZE>(TX_64X64), TYPE_FP, AOM_BITS_8)
+ static_cast<TX_SIZE>(TX_4X4), TYPE_FP, AOM_BITS_8)
};
INSTANTIATE_TEST_SUITE_P(NEON, LowPrecisionQuantizeTest,
diff --git a/third_party/aom/test/resize_test.cc b/third_party/aom/test/resize_test.cc
index 755d4e3d02..a84a4654a8 100644
--- a/third_party/aom/test/resize_test.cc
+++ b/third_party/aom/test/resize_test.cc
@@ -15,7 +15,6 @@
#include "aom/aomcx.h"
#include "aom_dsp/aom_dsp_common.h"
#include "av1/encoder/encoder.h"
-#include "common/tools_common.h"
#include "third_party/googletest/src/googletest/include/gtest/gtest.h"
#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
@@ -690,6 +689,45 @@ TEST_P(ResizeRealtimeTest, TestExternalResizeWorks) {
}
}
+TEST_P(ResizeRealtimeTest, TestExternalResizeWorksUsePSNR) {
+ ResizingVideoSource video;
+ video.flag_codec_ = 1;
+ change_bitrate_ = false;
+ set_scale_mode_ = false;
+ set_scale_mode2_ = false;
+ set_scale_mode3_ = false;
+ mismatch_psnr_ = 0.0;
+ mismatch_nframes_ = 0;
+ init_flags_ = AOM_CODEC_USE_PSNR;
+ cfg_.rc_dropframe_thresh = 30;
+ DefaultConfig();
+ // Test external resizing with start resolution equal to
+ // 1. kInitialWidth and kInitialHeight
+ // 2. down-scaled kInitialWidth and kInitialHeight
+ for (int i = 0; i < 2; i++) {
+ video.change_start_resln_ = static_cast<bool>(i);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+
+ // Check we decoded the same number of frames as we attempted to encode
+ ASSERT_EQ(frame_info_list_.size(), video.limit());
+ for (const auto &info : frame_info_list_) {
+ const unsigned int frame = static_cast<unsigned>(info.pts);
+ unsigned int expected_w;
+ unsigned int expected_h;
+ ScaleForFrameNumber(frame, kInitialWidth, kInitialHeight,
+ video.flag_codec_, video.change_start_resln_,
+ &expected_w, &expected_h);
+ EXPECT_EQ(expected_w, info.w)
+ << "Frame " << frame << " had unexpected width";
+ EXPECT_EQ(expected_h, info.h)
+ << "Frame " << frame << " had unexpected height";
+ EXPECT_EQ(static_cast<unsigned int>(0), GetMismatchFrames());
+ }
+ frame_info_list_.clear();
+ }
+}
+
// Verify the dynamic resizer behavior for real time, 1 pass CBR mode.
// Run at low bitrate, with resize_allowed = 1, and verify that we get
// one resize down event.
diff --git a/third_party/aom/test/sad_test.cc b/third_party/aom/test/sad_test.cc
index 521274863c..64cf8006be 100644
--- a/third_party/aom/test/sad_test.cc
+++ b/third_party/aom/test/sad_test.cc
@@ -3202,6 +3202,7 @@ const SadSkipMxNx4Param skip_x4d_avx2_tests[] = {
make_tuple(32, 8, &aom_sad_skip_32x8x4d_avx2, -1),
make_tuple(16, 64, &aom_sad_skip_16x64x4d_avx2, -1),
+ make_tuple(16, 4, &aom_sad_skip_16x4x4d_avx2, -1),
#endif
};
@@ -3294,6 +3295,7 @@ const SadMxNx4Param x3d_avx2_tests[] = {
#if !CONFIG_REALTIME_ONLY
make_tuple(32, 8, &aom_sad32x8x3d_avx2, -1),
make_tuple(64, 16, &aom_sad64x16x3d_avx2, -1),
+ make_tuple(16, 4, &aom_sad16x4x3d_avx2, -1),
#endif // !CONFIG_REALTIME_ONLY
#if CONFIG_AV1_HIGHBITDEPTH
diff --git a/third_party/aom/test/segment_binarization_sync.cc b/third_party/aom/test/segment_binarization_sync.cc
index bd8cf11410..108e66a838 100644
--- a/third_party/aom/test/segment_binarization_sync.cc
+++ b/third_party/aom/test/segment_binarization_sync.cc
@@ -10,15 +10,14 @@
*/
#include "third_party/googletest/src/googletest/include/gtest/gtest.h"
+
+#include "av1/common/seg_common.h"
+#include "av1/decoder/decodemv.h"
+#include "av1/encoder/bitstream.h"
#include "test/acm_random.h"
using libaom_test::ACMRandom;
-extern "C" {
-int av1_neg_interleave(int x, int ref, int max);
-int av1_neg_deinterleave(int diff, int ref, int max);
-}
-
namespace {
struct Segment {
@@ -28,8 +27,6 @@ struct Segment {
};
Segment GenerateSegment(int seed) {
- static const int MAX_SEGMENTS = 8;
-
ACMRandom rnd_(seed);
Segment segment;
diff --git a/third_party/aom/test/sharpness_test.cc b/third_party/aom/test/sharpness_test.cc
index 64465c88eb..054fbcc660 100644
--- a/third_party/aom/test/sharpness_test.cc
+++ b/third_party/aom/test/sharpness_test.cc
@@ -30,7 +30,7 @@ const std::unordered_map<
kPsnrThreshold = { { static_cast<int>(::libaom_test::kTwoPassGood),
{ { 2, { { 2, 37.6 }, { 5, 37.6 } } },
{ 4, { { 2, 37.5 }, { 5, 37.5 } } },
- { 6, { { 2, 37.5 }, { 5, 37.5 } } } } },
+ { 6, { { 2, 37.4 }, { 5, 37.4 } } } } },
{ static_cast<int>(::libaom_test::kAllIntra),
{ { 3, { { 2, 42.2 }, { 5, 42.2 } } },
{ 6, { { 2, 41.8 }, { 4, 41.9 }, { 5, 41.9 } } },
diff --git a/third_party/aom/test/test.cmake b/third_party/aom/test/test.cmake
index ce94a5a657..e2f5da570d 100644
--- a/third_party/aom/test/test.cmake
+++ b/third_party/aom/test/test.cmake
@@ -28,8 +28,7 @@ function(add_to_libaom_test_srcs src_list_name)
set(AOM_TEST_SOURCE_VARS "${AOM_TEST_SOURCE_VARS}" PARENT_SCOPE)
endfunction()
-list(APPEND AOM_UNIT_TEST_WRAPPER_SOURCES "${AOM_GEN_SRC_DIR}/usage_exit.c"
- "${AOM_ROOT}/test/test_libaom.cc")
+list(APPEND AOM_UNIT_TEST_WRAPPER_SOURCES "${AOM_ROOT}/test/test_libaom.cc")
add_to_libaom_test_srcs(AOM_UNIT_TEST_WRAPPER_SOURCES)
list(APPEND AOM_UNIT_TEST_COMMON_SOURCES
@@ -102,7 +101,7 @@ add_to_libaom_test_srcs(AOM_UNIT_TEST_ENCODER_SOURCES)
list(APPEND AOM_ENCODE_PERF_TEST_SOURCES "${AOM_ROOT}/test/encode_perf_test.cc")
list(APPEND AOM_UNIT_TEST_WEBM_SOURCES "${AOM_ROOT}/test/webm_video_source.h")
add_to_libaom_test_srcs(AOM_UNIT_TEST_WEBM_SOURCES)
-list(APPEND AOM_TEST_INTRA_PRED_SPEED_SOURCES "${AOM_GEN_SRC_DIR}/usage_exit.c"
+list(APPEND AOM_TEST_INTRA_PRED_SPEED_SOURCES
"${AOM_ROOT}/test/test_intra_pred_speed.cc")
if(CONFIG_AV1_DECODER)
@@ -277,24 +276,24 @@ if(NOT BUILD_SHARED_LIBS)
list(APPEND AOM_UNIT_TEST_COMMON_SOURCES
"${AOM_ROOT}/test/coding_path_sync.cc")
endif()
- if(CONFIG_REALTIME_ONLY)
- list(REMOVE_ITEM AOM_UNIT_TEST_COMMON_SOURCES
- "${AOM_ROOT}/test/altref_test.cc"
- "${AOM_ROOT}/test/av1_encoder_parms_get_to_decoder.cc"
- "${AOM_ROOT}/test/av1_ext_tile_test.cc"
- "${AOM_ROOT}/test/cnn_test.cc"
- "${AOM_ROOT}/test/decode_multithreaded_test.cc"
- "${AOM_ROOT}/test/error_resilience_test.cc"
- "${AOM_ROOT}/test/kf_test.cc"
- "${AOM_ROOT}/test/lossless_test.cc"
- "${AOM_ROOT}/test/sb_multipass_test.cc"
- "${AOM_ROOT}/test/sb_qp_sweep_test.cc"
- "${AOM_ROOT}/test/selfguided_filter_test.cc"
- "${AOM_ROOT}/test/screen_content_test.cc"
- "${AOM_ROOT}/test/still_picture_test.cc"
- "${AOM_ROOT}/test/tile_independence_test.cc"
- "${AOM_ROOT}/test/tpl_model_test.cc")
- endif()
+ endif()
+ if(CONFIG_REALTIME_ONLY)
+ list(REMOVE_ITEM AOM_UNIT_TEST_COMMON_SOURCES
+ "${AOM_ROOT}/test/altref_test.cc"
+ "${AOM_ROOT}/test/av1_encoder_parms_get_to_decoder.cc"
+ "${AOM_ROOT}/test/av1_ext_tile_test.cc"
+ "${AOM_ROOT}/test/cnn_test.cc"
+ "${AOM_ROOT}/test/decode_multithreaded_test.cc"
+ "${AOM_ROOT}/test/error_resilience_test.cc"
+ "${AOM_ROOT}/test/kf_test.cc"
+ "${AOM_ROOT}/test/lossless_test.cc"
+ "${AOM_ROOT}/test/sb_multipass_test.cc"
+ "${AOM_ROOT}/test/sb_qp_sweep_test.cc"
+ "${AOM_ROOT}/test/selfguided_filter_test.cc"
+ "${AOM_ROOT}/test/screen_content_test.cc"
+ "${AOM_ROOT}/test/still_picture_test.cc"
+ "${AOM_ROOT}/test/tile_independence_test.cc"
+ "${AOM_ROOT}/test/tpl_model_test.cc")
endif()
if(CONFIG_FPMT_TEST AND (NOT CONFIG_REALTIME_ONLY))
@@ -462,6 +461,7 @@ function(setup_aom_test_targets)
add_executable(test_libaom ${AOM_UNIT_TEST_WRAPPER_SOURCES}
$<TARGET_OBJECTS:aom_common_app_util>
+ $<TARGET_OBJECTS:aom_usage_exit>
$<TARGET_OBJECTS:test_aom_common>)
set_property(TARGET test_libaom PROPERTY FOLDER ${AOM_IDE_TEST_FOLDER})
list(APPEND AOM_APP_TARGETS test_libaom)
@@ -484,9 +484,9 @@ function(setup_aom_test_targets)
endif()
if(NOT BUILD_SHARED_LIBS)
- add_executable(test_intra_pred_speed
- ${AOM_TEST_INTRA_PRED_SPEED_SOURCES}
- $<TARGET_OBJECTS:aom_common_app_util>)
+ add_executable(test_intra_pred_speed ${AOM_TEST_INTRA_PRED_SPEED_SOURCES}
+ $<TARGET_OBJECTS:aom_common_app_util>
+ $<TARGET_OBJECTS:aom_usage_exit>)
set_property(TARGET test_intra_pred_speed
PROPERTY FOLDER ${AOM_IDE_TEST_FOLDER})
target_link_libraries(test_intra_pred_speed ${AOM_LIB_LINK_TYPE} aom
diff --git a/third_party/aom/test/test_libaom.cc b/third_party/aom/test/test_libaom.cc
index fbd7f2e380..26abbb0a06 100644
--- a/third_party/aom/test/test_libaom.cc
+++ b/third_party/aom/test/test_libaom.cc
@@ -62,6 +62,7 @@ int main(int argc, char **argv) {
if (!(caps & HAS_NEON_DOTPROD)) append_negative_gtest_filter("NEON_DOTPROD");
if (!(caps & HAS_NEON_I8MM)) append_negative_gtest_filter("NEON_I8MM");
if (!(caps & HAS_SVE)) append_negative_gtest_filter("SVE");
+ if (!(caps & HAS_SVE2)) append_negative_gtest_filter("SVE2");
#elif AOM_ARCH_ARM
const int caps = aom_arm_cpu_caps();
if (!(caps & HAS_NEON)) append_negative_gtest_filter("NEON");
diff --git a/third_party/aom/test/variance_test.cc b/third_party/aom/test/variance_test.cc
index e31f8f820c..261c080028 100644
--- a/third_party/aom/test/variance_test.cc
+++ b/third_party/aom/test/variance_test.cc
@@ -2785,64 +2785,6 @@ const GetSseSumParamsDual kArrayGetSseSum16x16Dual_sse2[] = {
INSTANTIATE_TEST_SUITE_P(SSE2, GetSseSum16x16DualTest,
::testing::ValuesIn(kArrayGetSseSum16x16Dual_sse2));
-const SubpelVarianceParams kArraySubpelVariance_sse2[] = {
- SubpelVarianceParams(7, 7, &aom_sub_pixel_variance128x128_sse2, 0),
- SubpelVarianceParams(7, 6, &aom_sub_pixel_variance128x64_sse2, 0),
- SubpelVarianceParams(6, 7, &aom_sub_pixel_variance64x128_sse2, 0),
- SubpelVarianceParams(6, 6, &aom_sub_pixel_variance64x64_sse2, 0),
- SubpelVarianceParams(6, 5, &aom_sub_pixel_variance64x32_sse2, 0),
- SubpelVarianceParams(5, 6, &aom_sub_pixel_variance32x64_sse2, 0),
- SubpelVarianceParams(5, 5, &aom_sub_pixel_variance32x32_sse2, 0),
- SubpelVarianceParams(5, 4, &aom_sub_pixel_variance32x16_sse2, 0),
- SubpelVarianceParams(4, 5, &aom_sub_pixel_variance16x32_sse2, 0),
- SubpelVarianceParams(4, 4, &aom_sub_pixel_variance16x16_sse2, 0),
- SubpelVarianceParams(4, 3, &aom_sub_pixel_variance16x8_sse2, 0),
- SubpelVarianceParams(3, 4, &aom_sub_pixel_variance8x16_sse2, 0),
- SubpelVarianceParams(3, 3, &aom_sub_pixel_variance8x8_sse2, 0),
- SubpelVarianceParams(3, 2, &aom_sub_pixel_variance8x4_sse2, 0),
- SubpelVarianceParams(2, 3, &aom_sub_pixel_variance4x8_sse2, 0),
- SubpelVarianceParams(2, 2, &aom_sub_pixel_variance4x4_sse2, 0),
-#if !CONFIG_REALTIME_ONLY
- SubpelVarianceParams(6, 4, &aom_sub_pixel_variance64x16_sse2, 0),
- SubpelVarianceParams(4, 6, &aom_sub_pixel_variance16x64_sse2, 0),
- SubpelVarianceParams(5, 3, &aom_sub_pixel_variance32x8_sse2, 0),
- SubpelVarianceParams(3, 5, &aom_sub_pixel_variance8x32_sse2, 0),
- SubpelVarianceParams(4, 2, &aom_sub_pixel_variance16x4_sse2, 0),
- SubpelVarianceParams(2, 4, &aom_sub_pixel_variance4x16_sse2, 0),
-#endif
-};
-INSTANTIATE_TEST_SUITE_P(SSE2, AvxSubpelVarianceTest,
- ::testing::ValuesIn(kArraySubpelVariance_sse2));
-
-const SubpelAvgVarianceParams kArraySubpelAvgVariance_sse2[] = {
- SubpelAvgVarianceParams(7, 7, &aom_sub_pixel_avg_variance128x128_sse2, 0),
- SubpelAvgVarianceParams(7, 6, &aom_sub_pixel_avg_variance128x64_sse2, 0),
- SubpelAvgVarianceParams(6, 7, &aom_sub_pixel_avg_variance64x128_sse2, 0),
- SubpelAvgVarianceParams(6, 6, &aom_sub_pixel_avg_variance64x64_sse2, 0),
- SubpelAvgVarianceParams(6, 5, &aom_sub_pixel_avg_variance64x32_sse2, 0),
- SubpelAvgVarianceParams(5, 6, &aom_sub_pixel_avg_variance32x64_sse2, 0),
- SubpelAvgVarianceParams(5, 5, &aom_sub_pixel_avg_variance32x32_sse2, 0),
- SubpelAvgVarianceParams(5, 4, &aom_sub_pixel_avg_variance32x16_sse2, 0),
- SubpelAvgVarianceParams(4, 5, &aom_sub_pixel_avg_variance16x32_sse2, 0),
- SubpelAvgVarianceParams(4, 4, &aom_sub_pixel_avg_variance16x16_sse2, 0),
- SubpelAvgVarianceParams(4, 3, &aom_sub_pixel_avg_variance16x8_sse2, 0),
- SubpelAvgVarianceParams(3, 4, &aom_sub_pixel_avg_variance8x16_sse2, 0),
- SubpelAvgVarianceParams(3, 3, &aom_sub_pixel_avg_variance8x8_sse2, 0),
- SubpelAvgVarianceParams(3, 2, &aom_sub_pixel_avg_variance8x4_sse2, 0),
- SubpelAvgVarianceParams(2, 3, &aom_sub_pixel_avg_variance4x8_sse2, 0),
- SubpelAvgVarianceParams(2, 2, &aom_sub_pixel_avg_variance4x4_sse2, 0),
-#if !CONFIG_REALTIME_ONLY
- SubpelAvgVarianceParams(6, 4, &aom_sub_pixel_avg_variance64x16_sse2, 0),
- SubpelAvgVarianceParams(4, 6, &aom_sub_pixel_avg_variance16x64_sse2, 0),
- SubpelAvgVarianceParams(5, 3, &aom_sub_pixel_avg_variance32x8_sse2, 0),
- SubpelAvgVarianceParams(3, 5, &aom_sub_pixel_avg_variance8x32_sse2, 0),
- SubpelAvgVarianceParams(4, 2, &aom_sub_pixel_avg_variance16x4_sse2, 0),
- SubpelAvgVarianceParams(2, 4, &aom_sub_pixel_avg_variance4x16_sse2, 0),
-#endif
-};
-INSTANTIATE_TEST_SUITE_P(SSE2, AvxSubpelAvgVarianceTest,
- ::testing::ValuesIn(kArraySubpelAvgVariance_sse2));
-
#if CONFIG_AV1_HIGHBITDEPTH
#if HAVE_SSE2
INSTANTIATE_TEST_SUITE_P(
@@ -2852,6 +2794,15 @@ INSTANTIATE_TEST_SUITE_P(
MseHBDWxHParams(2, 3, &aom_mse_wxh_16bit_highbd_sse2, 10),
MseHBDWxHParams(2, 2, &aom_mse_wxh_16bit_highbd_sse2,
10)));
+
+INSTANTIATE_TEST_SUITE_P(
+ SSE2, AvxHBDMseTest,
+ ::testing::Values(MseParams(4, 4, &aom_highbd_12_mse16x16_sse2, 12),
+ MseParams(3, 3, &aom_highbd_12_mse8x8_sse2, 12),
+ MseParams(4, 4, &aom_highbd_10_mse16x16_sse2, 10),
+ MseParams(3, 3, &aom_highbd_10_mse8x8_sse2, 10),
+ MseParams(4, 4, &aom_highbd_8_mse16x16_sse2, 8),
+ MseParams(3, 3, &aom_highbd_8_mse8x8_sse2, 8)));
#endif // HAVE_SSE2
#if HAVE_SSE4_1
INSTANTIATE_TEST_SUITE_P(
@@ -2878,14 +2829,11 @@ INSTANTIATE_TEST_SUITE_P(
12)));
#endif // HAVE_SSE4_1
+#if HAVE_AVX2
INSTANTIATE_TEST_SUITE_P(
- SSE2, AvxHBDMseTest,
- ::testing::Values(MseParams(4, 4, &aom_highbd_12_mse16x16_sse2, 12),
- MseParams(3, 3, &aom_highbd_12_mse8x8_sse2, 12),
- MseParams(4, 4, &aom_highbd_10_mse16x16_sse2, 10),
- MseParams(3, 3, &aom_highbd_10_mse8x8_sse2, 10),
- MseParams(4, 4, &aom_highbd_8_mse16x16_sse2, 8),
- MseParams(3, 3, &aom_highbd_8_mse8x8_sse2, 8)));
+ AVX2, AvxHBDMseTest,
+ ::testing::Values(MseParams(4, 4, &aom_highbd_10_mse16x16_avx2, 10)));
+#endif // HAVE_AVX2
const VarianceParams kArrayHBDVariance_sse2[] = {
VarianceParams(7, 7, &aom_highbd_12_variance128x128_sse2, 12),
diff --git a/third_party/aom/test/wiener_test.cc b/third_party/aom/test/wiener_test.cc
index 7eb6372aaa..b995c84d8f 100644
--- a/third_party/aom/test/wiener_test.cc
+++ b/third_party/aom/test/wiener_test.cc
@@ -1075,6 +1075,233 @@ TEST(SearchWienerTest, 12bitSignedIntegerOverflowInUpdateBSepSym) {
EXPECT_EQ(aom_codec_destroy(&enc), AOM_CODEC_OK);
}
+// A test that reproduces crbug.com/oss-fuzz/66474: signed integer overflow in
+// update_b_sep_sym().
+TEST(SearchWienerTest, 12bitSignedIntegerOverflowInUpdateBSepSym2) {
+ constexpr int kWidth = 510;
+ constexpr int kHeight = 3;
+ static const uint16_t buffer[kWidth * kHeight] = {
+ // Y plane:
+ 2136, 4095, 0, 0, 0, 4095, 4095, 0, 4095, 4095, 329, 0,
+ 4095, 0, 4095, 2587, 0, 0, 0, 4095, 0, 0, 0, 0,
+ 4095, 0, 4095, 878, 0, 4095, 0, 4095, 1474, 0, 573, 0,
+ 2401, 0, 1663, 4095, 0, 9, 3381, 0, 1084, 0, 270, 0,
+ 4095, 4095, 4095, 3992, 4095, 2047, 0, 0, 0, 4095, 41, 0,
+ 2726, 279, 0, 0, 4095, 0, 0, 1437, 0, 4095, 4095, 0,
+ 0, 0, 4095, 1683, 183, 3976, 3052, 0, 4095, 0, 0, 0,
+ 4095, 4095, 1882, 4095, 0, 4095, 83, 4095, 0, 4095, 0, 0,
+ 4095, 4095, 0, 0, 1637, 4095, 0, 4095, 0, 4095, 4095, 4095,
+ 0, 4095, 197, 4095, 563, 0, 3696, 3073, 3670, 0, 4095, 4095,
+ 0, 0, 0, 4095, 0, 0, 0, 0, 4095, 4095, 0, 0,
+ 0, 3539, 3468, 0, 2856, 3880, 0, 0, 1350, 2358, 4095, 802,
+ 4051, 0, 4095, 4095, 4095, 1677, 4095, 1135, 0, 4095, 0, 0,
+ 0, 618, 4095, 4095, 4095, 0, 2080, 4095, 0, 0, 1917, 0,
+ 0, 4095, 1937, 2835, 4095, 4095, 4095, 4095, 0, 4095, 4095, 3938,
+ 1707, 0, 0, 0, 4095, 448, 4095, 0, 1000, 2481, 3408, 0,
+ 0, 4095, 0, 3176, 0, 4095, 0, 4095, 4095, 4095, 0, 160,
+ 222, 1134, 4095, 4095, 0, 3539, 4095, 569, 3364, 0, 4095, 3687,
+ 0, 4095, 0, 0, 473, 0, 0, 4095, 298, 0, 3126, 4095,
+ 3854, 424, 0, 0, 4095, 3893, 0, 0, 175, 2774, 0, 4095,
+ 0, 2661, 950, 4095, 0, 1553, 0, 4095, 0, 4095, 4095, 2767,
+ 3630, 799, 255, 0, 4095, 0, 0, 4095, 2375, 0, 0, 0,
+ 0, 4095, 4095, 0, 0, 0, 1404, 4095, 4095, 4095, 4095, 2317,
+ 4095, 1227, 2205, 775, 0, 4095, 0, 0, 797, 1125, 736, 1773,
+ 2996, 4095, 2822, 4095, 4095, 0, 0, 0, 919, 0, 968, 3426,
+ 2702, 2613, 3647, 0, 0, 4095, 4095, 129, 4095, 0, 0, 4095,
+ 0, 0, 3632, 0, 3275, 123, 4095, 1566, 0, 0, 0, 1609,
+ 0, 1466, 4095, 577, 4095, 4095, 0, 4095, 1103, 1103, 4095, 0,
+ 1909, 0, 4095, 0, 4095, 4095, 227, 0, 4095, 2168, 4095, 374,
+ 4095, 4095, 4095, 0, 0, 0, 4095, 2066, 4095, 4095, 1475, 0,
+ 1959, 673, 4095, 0, 4095, 4095, 4095, 1142, 0, 464, 1819, 2033,
+ 4095, 0, 2212, 4095, 4095, 3961, 0, 4095, 0, 2838, 0, 4095,
+ 4095, 4095, 4095, 0, 3796, 3379, 2208, 0, 4095, 4095, 1943, 478,
+ 3573, 4095, 1763, 0, 0, 4095, 4095, 4095, 4095, 2061, 3346, 4095,
+ 0, 0, 4095, 0, 4095, 4095, 4095, 3738, 4095, 4095, 0, 4095,
+ 0, 425, 0, 0, 0, 927, 0, 0, 1814, 966, 4095, 0,
+ 0, 3185, 570, 3883, 2932, 0, 1413, 4095, 4095, 4095, 4095, 2477,
+ 2270, 4095, 2531, 4095, 1936, 3110, 99, 3936, 4095, 1315, 4095, 0,
+ 4095, 3564, 4095, 0, 0, 2797, 4095, 0, 1598, 0, 0, 3064,
+ 3526, 4095, 4095, 0, 3473, 3661, 0, 2388, 0, 4095, 639, 4095,
+ 0, 4095, 2390, 3715, 4095, 0, 0, 0, 740, 4095, 1432, 0,
+ 0, 0, 4057, 0, 0, 757, 4095, 4095, 0, 1437, 0, 0,
+ 4095, 0, 0, 0, 0, 0, 272, 4095, 4095, 4095, 2175, 4058,
+ 0, 4095, 4095, 4095, 3959, 3535, 0, 4095, 0, 0, 4095, 4095,
+ 4095, 4095, 0, 0, 4095, 4095, 4095, 3440, 3811, 0, 4095, 4095,
+ 4095, 4095, 0, 4095, 3193, 3674, 2819, 4095, 4095, 4048, 0, 0,
+ 4037, 4095, 3110, 4095, 1003, 0, 3650, 4095, 4095, 3154, 0, 1274,
+ 2192, 4095, 0, 4095, 0, 2814, 981, 370, 1407, 0, 4095, 1518,
+ 4095, 0, 0, 0, 0, 4095, 1577, 0, 4095, 0, 2607, 4095,
+ 3583, 0, 0, 4095, 1983, 1498, 4095, 4095, 2645, 4095, 4095, 3480,
+ 2587, 4095, 0, 0, 0, 0, 4095, 0, 4095, 4095, 0, 284,
+ 3973, 0, 0, 3677, 2463, 4095, 1338, 0, 4095, 0, 0, 4095,
+ 212, 2000, 4095, 4095, 0, 4095, 3780, 2039, 4095, 2453, 4095, 2050,
+ 2660, 1, 3839, 5, 1, 505, 809, 2907, 0, 0, 0, 1421,
+ 4095, 0, 0, 4095, 4095, 4095, 552, 0, 0, 4095, 3056, 0,
+ 0, 0, 0, 0, 4095, 0, 3386, 0, 0, 0, 4095, 0,
+ 0, 3404, 2702, 3534, 4095, 3562, 0, 4095, 4095, 150, 4095, 0,
+ 0, 3599, 4095, 4095, 0, 0, 0, 4095, 4095, 2093, 4095, 3753,
+ 3754, 4095, 0, 4095, 2733, 4095, 4095, 0, 0, 4095, 0, 0,
+ 0, 1496, 4095, 2366, 2936, 2494, 4095, 744, 1173, 4095, 0, 0,
+ 0, 1966, 4095, 4095, 0, 178, 3254, 4095, 4095, 995, 4095, 2083,
+ 0, 2639, 4095, 3422, 4095, 4095, 4095, 0, 842, 4095, 4095, 552,
+ 3681, 4095, 0, 1075, 2631, 554, 0, 0, 4095, 0, 0, 0,
+ 4095, 4095, 0, 0, 0, 2234, 0, 1098, 4095, 3164, 4095, 0,
+ 2748, 0, 0, 0, 4095, 4095, 4095, 1724, 891, 3496, 3964, 4095,
+ 0, 0, 1923, 4095, 4095, 4095, 3118, 0, 0, 0, 4095, 4095,
+ 0, 0, 3856, 4095, 0, 0, 4095, 4095, 2647, 0, 2089, 4095,
+ 471, 0, 4095, 0, 0, 0, 4095, 0, 1263, 2969, 289, 0,
+ 0, 4095, 289, 0, 0, 2965, 0, 0, 3280, 2279, 4091, 5,
+ 512, 1776, 4, 2046, 3994, 1, 4095, 898, 4095, 0, 0, 0,
+ 0, 4095, 0, 4095, 4095, 1930, 0, 0, 3725, 4095, 4095, 0,
+ 2593, 4095, 0, 4095, 984, 0, 4095, 2388, 0, 0, 4095, 4095,
+ 3341, 4095, 0, 2787, 0, 831, 2978, 4095, 0, 0, 0, 4095,
+ 1624, 4095, 1054, 1039, 0, 89, 3565, 0, 4095, 468, 0, 4095,
+ 4095, 0, 4095, 4095, 0, 3907, 0, 0, 0, 0, 0, 0,
+ 4095, 1898, 2178, 4095, 0, 3708, 2825, 0, 4095, 0, 4095, 4095,
+ 0, 0, 811, 1078, 0, 4095, 0, 3478, 0, 0, 1127, 0,
+ 504, 4095, 4095, 2006, 4095, 0, 2666, 1172, 4095, 4095, 4095, 4095,
+ 4095, 0, 199, 4095, 0, 2355, 2650, 2961, 0, 0, 0, 4095,
+ 4095, 0, 4095, 0, 4095, 1477, 0, 0, 1946, 0, 3352, 1988,
+ 0, 0, 2321, 4095, 0, 4095, 3367, 0, 0, 4095, 4095, 1946,
+ 0, 4034, 0, 0, 4095, 4095, 0, 0, 0, 0, 4095, 973,
+ 1734, 3966, 4095, 0, 3780, 1242, 0, 4095, 1301, 0, 1513, 4095,
+ 1079, 4095, 0, 0, 1316, 4095, 4095, 675, 2713, 2006, 4095, 4095,
+ 0, 0, 4095, 4095, 0, 3542, 4095, 0, 2365, 130, 4095, 2919,
+ 0, 4095, 3434, 0, 905, 4095, 673, 4095, 4095, 0, 3923, 293,
+ 4095, 213, 4095, 4095, 1334, 4095, 0, 3317, 0, 0, 0, 4095,
+ 4095, 4095, 2598, 2010, 0, 0, 3507, 0, 0, 0, 489, 0,
+ 0, 1782, 2681, 3303, 4095, 4095, 1955, 4095, 4095, 4095, 203, 1973,
+ 4095, 4020, 0, 4095, 1538, 0, 373, 1934, 4095, 0, 4095, 2244,
+ 4095, 1936, 4095, 640, 0, 4095, 0, 0, 0, 3653, 4095, 1966,
+ 4095, 4095, 4095, 4095, 0, 4095, 843, 0, 4095, 4095, 4095, 1646,
+ 4095, 0, 0, 4095, 4095, 4095, 2164, 0, 0, 0, 2141, 4095,
+ 0, 903, 4095, 4095, 0, 624, 4095, 792, 0, 0, 0, 0,
+ 0, 0, 0, 4095, 0, 4095, 4095, 2466, 0, 3631, 0, 4095,
+ 4095, 4095, 0, 941, 4095, 4095, 1609, 4095, 4095, 0, 0, 2398,
+ 4095, 4095, 2579, 0, 4020, 3485, 0, 0, 4095, 0, 4095, 0,
+ 3158, 2355, 0, 4095, 4095, 4095, 0, 0, 4095, 0, 0, 4095,
+ 475, 2272, 1010, 0, 0, 4095, 0, 0, 4095, 841, 4095, 4095,
+ 4095, 4095, 0, 4095, 0, 1046, 4095, 1738, 708, 4095, 0, 4095,
+ 4095, 0, 4095, 4095, 0, 4095, 4095, 0, 0, 0, 4032, 0,
+ 2679, 0, 1564, 0, 0, 0, 659, 1915, 4095, 3682, 0, 3660,
+ 4095, 723, 1383, 2499, 1353, 4095, 0, 3898, 2322, 3798, 4095, 0,
+ 444, 2277, 3729, 4095, 4095, 4095, 3054, 387, 3309, 4048, 3793, 2842,
+ 2087, 0, 3274, 2454, 518, 0, 4095, 0, 4095, 4095, 3358, 4095,
+ 2083, 2105, 0, 0, 0, 1125, 2636, 0, 0, 0, 0, 736,
+ 0, 349, 0, 4095, 2031, 4095, 992, 0, 4095, 3284, 4095, 214,
+ 3692, 4010, 402, 0, 0, 3776, 4095, 4095, 4095, 4095, 803, 2095,
+ 3864, 4095, 3323, 0, 0, 361, 1634, 0, 983, 0, 1181, 4095,
+ 1791, 4095, 367, 792, 4095, 4095, 3315, 3149, 4095, 62, 4095, 1791,
+ 3708, 2030, 4095, 1237, 0, 4095, 4095, 0, 0, 0, 0, 4095,
+ 1902, 2257, 4095, 4095, 0, 0, 2929, 4095, 0, 4095, 2356, 4095,
+ 2877, 1296, 4095, 0, 0, 0, 1310, 1968, 820, 4095, 4095, 4095,
+ 4095, 4095, 0, 0, 4095, 4095, 4095, 2897, 1787, 2218, 0, 129,
+ 4095, 4095, 0, 4095, 2331, 4095, 4095, 3192, 4095, 1744, 755, 0,
+ 1905, 0, 4095, 4095, 4095, 0, 0, 4095, 4095, 4095, 0, 0,
+ 0, 1467, 266, 1719, 4095, 729, 4095, 4095, 2647, 3543, 3388, 3326,
+ 4095, 0, 4095, 4095, 4095, 1416, 4095, 2131, 810, 0, 0, 4095,
+ 4095, 1250, 0, 0, 4095, 2722, 1493, 4095, 0, 4095, 0, 2895,
+ 0, 3847, 0, 2078, 0, 0, 0, 4095, 4095, 4095, 4095, 0,
+ 4095, 2651, 4095, 4095, 351, 2675, 4095, 0, 858, 0, 0, 0,
+ 816, 4095, 0, 4095, 0, 3842, 1990, 593, 0, 0, 3992, 4095,
+ 4095, 0, 4095, 1314, 4095, 4095, 1864, 2561, 4095, 1339, 0, 4095,
+ 2201, 4095, 0, 1403, 0, 0, 4095, 4095, 4095, 0, 0, 0,
+ 0, 0, 0, 577, 4095, 995, 2534, 827, 1431, 4095, 4095, 778,
+ 1405, 0, 0, 4095, 0, 4095, 1327, 4095, 0, 2725, 3351, 3937,
+ 741, 0, 2690, 2849, 4095, 4095, 2151, 0, 4095, 0, 4095, 4095,
+ 4095, 1342, 142, 1920, 1007, 2001
+ };
+ unsigned char *img_data =
+ reinterpret_cast<unsigned char *>(const_cast<uint16_t *>(buffer));
+
+ aom_image_t img;
+ EXPECT_EQ(&img, aom_img_wrap(&img, AOM_IMG_FMT_I42016, kWidth, kHeight, 1,
+ img_data));
+ img.cp = AOM_CICP_CP_UNSPECIFIED;
+ img.tc = AOM_CICP_TC_UNSPECIFIED;
+ img.mc = AOM_CICP_MC_UNSPECIFIED;
+ img.monochrome = 1;
+ img.csp = AOM_CSP_UNKNOWN;
+ img.range = AOM_CR_FULL_RANGE;
+ img.planes[1] = img.planes[2] = nullptr;
+ img.stride[1] = img.stride[2] = 0;
+
+ aom_codec_iface_t *iface = aom_codec_av1_cx();
+ aom_codec_enc_cfg_t cfg;
+ EXPECT_EQ(AOM_CODEC_OK,
+ aom_codec_enc_config_default(iface, &cfg, AOM_USAGE_GOOD_QUALITY));
+ cfg.rc_end_usage = AOM_Q;
+ cfg.g_profile = 2;
+ cfg.g_bit_depth = AOM_BITS_12;
+ cfg.g_input_bit_depth = 12;
+ cfg.g_w = kWidth;
+ cfg.g_h = kHeight;
+ cfg.g_lag_in_frames = 0;
+ cfg.g_threads = 53;
+ cfg.monochrome = 1;
+ cfg.rc_min_quantizer = 22;
+ cfg.rc_max_quantizer = 30;
+ aom_codec_ctx_t enc;
+ EXPECT_EQ(AOM_CODEC_OK,
+ aom_codec_enc_init(&enc, iface, &cfg, AOM_CODEC_USE_HIGHBITDEPTH));
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_control(&enc, AOME_SET_CQ_LEVEL, 26));
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_control(&enc, AV1E_SET_TILE_ROWS, 3));
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_control(&enc, AOME_SET_CPUUSED, 6));
+ EXPECT_EQ(AOM_CODEC_OK,
+ aom_codec_control(&enc, AV1E_SET_COLOR_RANGE, AOM_CR_FULL_RANGE));
+ EXPECT_EQ(AOM_CODEC_OK,
+ aom_codec_control(&enc, AOME_SET_TUNING, AOM_TUNE_SSIM));
+
+ // Encode frame
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_encode(&enc, &img, 0, 1, 0));
+ aom_codec_iter_t iter = nullptr;
+ const aom_codec_cx_pkt_t *pkt = aom_codec_get_cx_data(&enc, &iter);
+ ASSERT_NE(pkt, nullptr);
+ EXPECT_EQ(pkt->kind, AOM_CODEC_CX_FRAME_PKT);
+ // pkt->data.frame.flags is 0x1f0011.
+ EXPECT_EQ(pkt->data.frame.flags & AOM_FRAME_IS_KEY, AOM_FRAME_IS_KEY);
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ EXPECT_EQ(pkt, nullptr);
+
+ // Encode frame
+ EXPECT_EQ(AOM_CODEC_OK,
+ aom_codec_encode(&enc, &img, 0, 1, AOM_EFLAG_FORCE_KF));
+ iter = nullptr;
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ ASSERT_NE(pkt, nullptr);
+ EXPECT_EQ(pkt->kind, AOM_CODEC_CX_FRAME_PKT);
+ // pkt->data.frame.flags is 0x1f0011.
+ EXPECT_EQ(pkt->data.frame.flags & AOM_FRAME_IS_KEY, AOM_FRAME_IS_KEY);
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ EXPECT_EQ(pkt, nullptr);
+
+ // Encode frame
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_encode(&enc, &img, 0, 1, 0));
+ iter = nullptr;
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ ASSERT_NE(pkt, nullptr);
+ EXPECT_EQ(pkt->kind, AOM_CODEC_CX_FRAME_PKT);
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ EXPECT_EQ(pkt, nullptr);
+
+ // Encode frame
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_encode(&enc, &img, 0, 1, 0));
+ iter = nullptr;
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ ASSERT_NE(pkt, nullptr);
+ EXPECT_EQ(pkt->kind, AOM_CODEC_CX_FRAME_PKT);
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ EXPECT_EQ(pkt, nullptr);
+
+ // Flush encoder
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_encode(&enc, nullptr, 0, 1, 0));
+ iter = nullptr;
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ EXPECT_EQ(pkt, nullptr);
+
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_destroy(&enc));
+}
+
// A test that reproduces b/272139363: signed integer overflow in
// update_b_sep_sym().
TEST(SearchWienerTest, 10bitSignedIntegerOverflowInUpdateBSepSym) {
@@ -1164,6 +1391,161 @@ TEST(SearchWienerTest, 10bitSignedIntegerOverflowInUpdateBSepSym) {
EXPECT_EQ(AOM_CODEC_OK, aom_codec_destroy(&enc));
}
+// A test that reproduces b/319140742: signed integer overflow in
+// update_b_sep_sym().
+TEST(SearchWienerTest, 10bitSignedIntegerOverflowInUpdateBSepSym2) {
+ constexpr int kWidth = 326;
+ constexpr int kHeight = 3;
+ static const uint16_t buffer[kWidth * kHeight] = {
+ // Y plane:
+ 1023, 1023, 0, 1023, 1023, 0, 623, 0, 0, 1023, 1023, 0,
+ 0, 0, 0, 523, 1023, 2, 0, 0, 863, 1023, 1023, 409,
+ 7, 1023, 0, 409, 1023, 0, 579, 1023, 1023, 1023, 0, 0,
+ 1023, 1023, 446, 1023, 1023, 0, 0, 1023, 0, 0, 829, 1023,
+ 0, 1023, 939, 0, 0, 23, 1022, 990, 1023, 0, 0, 4,
+ 0, 299, 0, 0, 1023, 1023, 629, 688, 1023, 1023, 266, 1023,
+ 865, 0, 413, 0, 267, 0, 0, 69, 1023, 866, 1023, 885,
+ 0, 762, 330, 382, 0, 1023, 1023, 734, 504, 899, 119, 0,
+ 378, 1011, 0, 0, 1023, 364, 0, 1023, 1023, 462, 1023, 0,
+ 504, 1023, 1023, 0, 695, 1023, 57, 1023, 1023, 362, 0, 0,
+ 0, 0, 1023, 1023, 387, 12, 929, 1023, 0, 194, 1023, 0,
+ 1023, 505, 0, 1023, 1023, 1023, 1023, 1023, 0, 0, 676, 0,
+ 6, 683, 70, 0, 0, 1023, 226, 1023, 320, 758, 0, 0,
+ 648, 1023, 867, 550, 630, 960, 1023, 1023, 1023, 0, 0, 822,
+ 0, 0, 0, 1023, 1011, 1023, 1023, 0, 0, 15, 30, 0,
+ 1023, 1023, 0, 0, 0, 84, 954, 1023, 933, 416, 333, 323,
+ 0, 0, 1023, 355, 1023, 176, 1023, 1023, 886, 87, 1023, 0,
+ 1023, 1023, 1023, 562, 0, 1023, 1023, 354, 0, 0, 1023, 0,
+ 86, 0, 0, 1023, 0, 1023, 192, 0, 1023, 0, 1023, 0,
+ 0, 0, 735, 1023, 1023, 1023, 0, 372, 988, 131, 1023, 1023,
+ 0, 1023, 1023, 1023, 1023, 970, 1023, 1023, 248, 757, 665, 330,
+ 223, 273, 0, 274, 1023, 0, 1023, 613, 786, 1023, 792, 0,
+ 390, 282, 0, 1023, 0, 1023, 0, 1023, 1023, 1023, 614, 993,
+ 135, 737, 662, 0, 1023, 524, 970, 1023, 0, 906, 1023, 1023,
+ 959, 1023, 1023, 1023, 1023, 836, 838, 0, 0, 0, 0, 0,
+ 1023, 917, 492, 290, 1023, 1023, 817, 1023, 0, 0, 588, 410,
+ 419, 0, 1023, 1023, 178, 0, 0, 563, 775, 977, 1023, 1023,
+ 0, 1023, 0, 370, 434, 1023, 963, 587, 0, 0, 1023, 1023,
+ 1023, 1023, 1023, 1023, 619, 0, 1023, 352, 1023, 0, 0, 0,
+ 133, 557, 36, 1023, 1023, 1023, 0, 469, 1023, 1023, 0, 900,
+ 59, 841, 1023, 886, 0, 193, 126, 263, 119, 629, 0, 1023,
+ 0, 1023, 0, 0, 478, 0, 1023, 63, 1023, 0, 0, 0,
+ 0, 0, 0, 0, 1023, 888, 1023, 905, 646, 0, 0, 1023,
+ 752, 1023, 1023, 0, 1023, 0, 0, 648, 1023, 0, 0, 838,
+ 0, 321, 1023, 475, 0, 215, 867, 1023, 0, 1023, 1023, 624,
+ 417, 1023, 426, 0, 0, 960, 1020, 839, 687, 1023, 161, 1023,
+ 1023, 1023, 1023, 968, 0, 95, 430, 0, 132, 1023, 1023, 113,
+ 0, 1023, 1023, 606, 1023, 0, 0, 31, 1023, 1023, 0, 180,
+ 140, 654, 1023, 1023, 1023, 1023, 1023, 779, 1023, 0, 0, 1023,
+ 1023, 1023, 0, 1023, 0, 0, 1023, 963, 723, 536, 1023, 0,
+ 0, 0, 337, 812, 0, 0, 0, 428, 48, 0, 321, 205,
+ 0, 587, 799, 272, 5, 1023, 322, 0, 761, 0, 749, 1023,
+ 0, 0, 1023, 1023, 1023, 1023, 242, 402, 98, 0, 1023, 884,
+ 219, 1023, 0, 1023, 0, 0, 0, 106, 1023, 0, 1023, 414,
+ 1023, 0, 1023, 619, 0, 0, 973, 854, 82, 1023, 1023, 1023,
+ 0, 1023, 1023, 0, 0, 588, 433, 0, 0, 961, 0, 0,
+ 0, 917, 859, 461, 455, 68, 1023, 409, 1023, 821, 1023, 487,
+ 1023, 0, 717, 0, 613, 0, 0, 840, 932, 782, 1023, 1023,
+ 576, 1023, 0, 1023, 1023, 187, 876, 162, 0, 1023, 1023, 946,
+ 873, 0, 0, 953, 0, 537, 0, 0, 1023, 193, 807, 756,
+ 0, 0, 1023, 732, 1023, 1023, 1023, 0, 0, 1023, 1023, 1023,
+ 1023, 1023, 119, 0, 0, 90, 1023, 0, 1023, 0, 0, 0,
+ 1023, 366, 1023, 655, 0, 58, 1023, 1023, 8, 1023, 1023, 24,
+ 1023, 103, 0, 0, 1023, 919, 1023, 566, 1023, 0, 0, 480,
+ 1023, 1023, 0, 0, 807, 0, 1023, 0, 273, 412, 632, 1023,
+ 1023, 1023, 10, 633, 1023, 692, 978, 0, 0, 1023, 1023, 1023,
+ 25, 494, 215, 0, 148, 1023, 840, 118, 1023, 1023, 999, 1023,
+ 1023, 1023, 0, 0, 1023, 435, 894, 0, 1023, 1023, 168, 1023,
+ 1023, 211, 1023, 1023, 656, 1023, 0, 0, 0, 744, 238, 1023,
+ 0, 196, 907, 0, 0, 0, 838, 726, 1023, 1023, 1023, 0,
+ 0, 0, 1023, 0, 1023, 1023, 1023, 0, 1023, 0, 0, 0,
+ 323, 1023, 1023, 0, 1023, 0, 0, 925, 582, 1023, 0, 685,
+ 1023, 661, 464, 0, 0, 0, 1023, 0, 807, 0, 1023, 1023,
+ 1023, 100, 0, 1023, 302, 1023, 1023, 1023, 616, 0, 1023, 0,
+ 0, 377, 1023, 1023, 1023, 0, 1023, 555, 1023, 784, 0, 0,
+ 1023, 0, 0, 1023, 755, 0, 839, 1023, 0, 0, 0, 1023,
+ 1023, 1023, 0, 1023, 413, 0, 1023, 1023, 384, 0, 823, 797,
+ 1023, 0, 1023, 0, 0, 1023, 1023, 1023, 1023, 0, 1023, 39,
+ 0, 473, 299, 0, 0, 1023, 567, 1023, 1023, 0, 0, 1023,
+ 650, 1023, 41, 1023, 0, 1023, 0, 1023, 0, 1023, 0, 0,
+ 444, 1023, 23, 0, 503, 97, 0, 1023, 0, 890, 59, 578,
+ 0, 201, 1023, 672, 1023, 593, 1023, 599, 213, 1023, 1023, 1023,
+ 986, 1023, 335, 1023, 457, 0, 888, 1023, 1023, 97, 308, 259,
+ 813, 1023, 1023, 1023, 0, 1023, 798, 907, 105, 0, 1023, 0,
+ 1023, 1023, 0, 970, 518, 0, 635, 0, 634, 329, 1023, 430,
+ 0, 17, 1023, 1023, 1023, 0, 0, 407, 1023, 1023, 0, 1023,
+ 0, 0, 0, 0, 1023, 1023, 1023, 402, 1023, 0, 0, 101,
+ 1023, 1023, 1023, 1023, 1023, 1023, 425, 791, 1023, 1023, 961, 0,
+ 0, 1023, 474, 1023, 1023, 1023, 1023, 468, 1023, 1023, 0, 1023,
+ 215, 0, 1023, 1023, 334, 463, 286, 1023, 0, 1023, 0, 1023,
+ 270, 401, 0, 0, 1023, 0, 794, 0, 0, 0, 1023, 0,
+ 1023, 172, 317, 905, 950, 0
+ };
+ unsigned char *img_data =
+ reinterpret_cast<unsigned char *>(const_cast<uint16_t *>(buffer));
+
+ aom_image_t img;
+ EXPECT_EQ(&img, aom_img_wrap(&img, AOM_IMG_FMT_I42016, kWidth, kHeight, 1,
+ img_data));
+ img.cp = AOM_CICP_CP_UNSPECIFIED;
+ img.tc = AOM_CICP_TC_UNSPECIFIED;
+ img.mc = AOM_CICP_MC_UNSPECIFIED;
+ img.monochrome = 1;
+ img.csp = AOM_CSP_UNKNOWN;
+ img.range = AOM_CR_FULL_RANGE;
+ img.planes[1] = img.planes[2] = nullptr;
+ img.stride[1] = img.stride[2] = 0;
+
+ aom_codec_iface_t *iface = aom_codec_av1_cx();
+ aom_codec_enc_cfg_t cfg;
+ EXPECT_EQ(AOM_CODEC_OK,
+ aom_codec_enc_config_default(iface, &cfg, AOM_USAGE_GOOD_QUALITY));
+ cfg.rc_end_usage = AOM_Q;
+ cfg.g_profile = 0;
+ cfg.g_bit_depth = AOM_BITS_10;
+ cfg.g_input_bit_depth = 10;
+ cfg.g_w = kWidth;
+ cfg.g_h = kHeight;
+ cfg.g_threads = 6;
+ cfg.monochrome = 1;
+ cfg.rc_min_quantizer = 54;
+ cfg.rc_max_quantizer = 62;
+ aom_codec_ctx_t enc;
+ EXPECT_EQ(AOM_CODEC_OK,
+ aom_codec_enc_init(&enc, iface, &cfg, AOM_CODEC_USE_HIGHBITDEPTH));
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_control(&enc, AOME_SET_CQ_LEVEL, 58));
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_control(&enc, AV1E_SET_TILE_ROWS, 1));
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_control(&enc, AOME_SET_CPUUSED, 6));
+ EXPECT_EQ(AOM_CODEC_OK,
+ aom_codec_control(&enc, AV1E_SET_COLOR_RANGE, AOM_CR_FULL_RANGE));
+ EXPECT_EQ(AOM_CODEC_OK,
+ aom_codec_control(&enc, AOME_SET_TUNING, AOM_TUNE_SSIM));
+
+ // Encode frame
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_encode(&enc, &img, 0, 1, 0));
+ aom_codec_iter_t iter = nullptr;
+ const aom_codec_cx_pkt_t *pkt = aom_codec_get_cx_data(&enc, &iter);
+ ASSERT_EQ(pkt, nullptr);
+
+ // Flush encoder
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_encode(&enc, nullptr, 0, 1, 0));
+ iter = nullptr;
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ ASSERT_NE(pkt, nullptr);
+ EXPECT_EQ(pkt->kind, AOM_CODEC_CX_FRAME_PKT);
+ // pkt->data.frame.flags is 0x1f0011.
+ EXPECT_EQ(pkt->data.frame.flags & AOM_FRAME_IS_KEY, AOM_FRAME_IS_KEY);
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ EXPECT_EQ(pkt, nullptr);
+
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_encode(&enc, nullptr, 0, 1, 0));
+ iter = nullptr;
+ pkt = aom_codec_get_cx_data(&enc, &iter);
+ EXPECT_EQ(pkt, nullptr);
+
+ EXPECT_EQ(AOM_CODEC_OK, aom_codec_destroy(&enc));
+}
+
// A test that reproduces b/277121724: signed integer overflow in
// update_b_sep_sym().
TEST(SearchWienerTest, 8bitSignedIntegerOverflowInUpdateBSepSym) {
diff --git a/third_party/aom/third_party/libwebm/README.libaom b/third_party/aom/third_party/libwebm/README.libaom
index ee350a523a..1eb0ce9a94 100644
--- a/third_party/aom/third_party/libwebm/README.libaom
+++ b/third_party/aom/third_party/libwebm/README.libaom
@@ -1,5 +1,5 @@
URL: https://chromium.googlesource.com/webm/libwebm
-Version: 1930e3ca23b007f3ff11d98a570077be6201957e
+Version: affd7f4d9644aa2b65981fa6c7616400be760e6e
License: BSD
License File: LICENSE.TXT
diff --git a/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.cc b/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.cc
index faaf0165f4..21e51be474 100644
--- a/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.cc
+++ b/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.cc
@@ -65,7 +65,8 @@ bool StrCpy(const char* src, char** dst_ptr) {
if (dst == NULL)
return false;
- strcpy(dst, src); // NOLINT
+ memcpy(dst, src, size - 1);
+ dst[size - 1] = '\0';
return true;
}
@@ -919,11 +920,8 @@ void Track::set_codec_id(const char* codec_id) {
const size_t length = strlen(codec_id) + 1;
codec_id_ = new (std::nothrow) char[length]; // NOLINT
if (codec_id_) {
-#ifdef _MSC_VER
- strcpy_s(codec_id_, length, codec_id);
-#else
- strcpy(codec_id_, codec_id);
-#endif
+ memcpy(codec_id_, codec_id, length - 1);
+ codec_id_[length - 1] = '\0';
}
}
}
@@ -936,11 +934,8 @@ void Track::set_language(const char* language) {
const size_t length = strlen(language) + 1;
language_ = new (std::nothrow) char[length]; // NOLINT
if (language_) {
-#ifdef _MSC_VER
- strcpy_s(language_, length, language);
-#else
- strcpy(language_, language);
-#endif
+ memcpy(language_, language, length - 1);
+ language_[length - 1] = '\0';
}
}
}
@@ -952,11 +947,8 @@ void Track::set_name(const char* name) {
const size_t length = strlen(name) + 1;
name_ = new (std::nothrow) char[length]; // NOLINT
if (name_) {
-#ifdef _MSC_VER
- strcpy_s(name_, length, name);
-#else
- strcpy(name_, name);
-#endif
+ memcpy(name_, name, length - 1);
+ name_[length - 1] = '\0';
}
}
}
@@ -1559,11 +1551,8 @@ void VideoTrack::set_colour_space(const char* colour_space) {
const size_t length = strlen(colour_space) + 1;
colour_space_ = new (std::nothrow) char[length]; // NOLINT
if (colour_space_) {
-#ifdef _MSC_VER
- strcpy_s(colour_space_, length, colour_space);
-#else
- strcpy(colour_space_, colour_space);
-#endif
+ memcpy(colour_space_, colour_space, length - 1);
+ colour_space_[length - 1] = '\0';
}
}
}
@@ -2856,13 +2845,13 @@ bool SeekHead::AddSeekEntry(uint32_t id, uint64_t pos) {
uint32_t SeekHead::GetId(int index) const {
if (index < 0 || index >= kSeekEntryCount)
- return UINT_MAX;
+ return UINT32_MAX;
return seek_entry_id_[index];
}
uint64_t SeekHead::GetPosition(int index) const {
if (index < 0 || index >= kSeekEntryCount)
- return ULLONG_MAX;
+ return UINT64_MAX;
return seek_entry_pos_[index];
}
@@ -2896,7 +2885,7 @@ SegmentInfo::SegmentInfo()
muxing_app_(NULL),
timecode_scale_(1000000ULL),
writing_app_(NULL),
- date_utc_(LLONG_MIN),
+ date_utc_(INT64_MIN),
duration_pos_(-1) {}
SegmentInfo::~SegmentInfo() {
@@ -2927,11 +2916,8 @@ bool SegmentInfo::Init() {
if (!muxing_app_)
return false;
-#ifdef _MSC_VER
- strcpy_s(muxing_app_, app_len, temp);
-#else
- strcpy(muxing_app_, temp);
-#endif
+ memcpy(muxing_app_, temp, app_len - 1);
+ muxing_app_[app_len - 1] = '\0';
set_writing_app(temp);
if (!writing_app_)
@@ -2974,7 +2960,7 @@ bool SegmentInfo::Write(IMkvWriter* writer) {
if (duration_ > 0.0)
size +=
EbmlElementSize(libwebm::kMkvDuration, static_cast<float>(duration_));
- if (date_utc_ != LLONG_MIN)
+ if (date_utc_ != INT64_MIN)
size += EbmlDateElementSize(libwebm::kMkvDateUTC);
size += EbmlElementSize(libwebm::kMkvMuxingApp, muxing_app_);
size += EbmlElementSize(libwebm::kMkvWritingApp, writing_app_);
@@ -2999,7 +2985,7 @@ bool SegmentInfo::Write(IMkvWriter* writer) {
return false;
}
- if (date_utc_ != LLONG_MIN)
+ if (date_utc_ != INT64_MIN)
WriteEbmlDateElement(writer, libwebm::kMkvDateUTC, date_utc_);
if (!WriteEbmlElement(writer, libwebm::kMkvMuxingApp, muxing_app_))
@@ -3022,11 +3008,8 @@ void SegmentInfo::set_muxing_app(const char* app) {
if (!temp_str)
return;
-#ifdef _MSC_VER
- strcpy_s(temp_str, length, app);
-#else
- strcpy(temp_str, app);
-#endif
+ memcpy(temp_str, app, length - 1);
+ temp_str[length - 1] = '\0';
delete[] muxing_app_;
muxing_app_ = temp_str;
@@ -3040,11 +3023,8 @@ void SegmentInfo::set_writing_app(const char* app) {
if (!temp_str)
return;
-#ifdef _MSC_VER
- strcpy_s(temp_str, length, app);
-#else
- strcpy(temp_str, app);
-#endif
+ memcpy(temp_str, app, length - 1);
+ temp_str[length - 1] = '\0';
delete[] writing_app_;
writing_app_ = temp_str;
@@ -3628,19 +3608,17 @@ bool Segment::SetChunking(bool chunking, const char* filename) {
if (chunking_ && !strcmp(filename, chunking_base_name_))
return true;
- const size_t name_length = strlen(filename) + 1;
- char* const temp = new (std::nothrow) char[name_length]; // NOLINT
+ const size_t filename_length = strlen(filename);
+ char* const temp = new (std::nothrow) char[filename_length + 1]; // NOLINT
if (!temp)
return false;
-#ifdef _MSC_VER
- strcpy_s(temp, name_length, filename);
-#else
- strcpy(temp, filename);
-#endif
+ memcpy(temp, filename, filename_length);
+ temp[filename_length] = '\0';
delete[] chunking_base_name_;
chunking_base_name_ = temp;
+ // From this point, strlen(chunking_base_name_) == filename_length
if (!UpdateChunkName("chk", &chunk_name_))
return false;
@@ -3666,18 +3644,16 @@ bool Segment::SetChunking(bool chunking, const char* filename) {
if (!chunk_writer_cluster_->Open(chunk_name_))
return false;
- const size_t header_length = strlen(filename) + strlen(".hdr") + 1;
+ const size_t hdr_length = strlen(".hdr");
+ const size_t header_length = filename_length + hdr_length + 1;
char* const header = new (std::nothrow) char[header_length]; // NOLINT
if (!header)
return false;
-#ifdef _MSC_VER
- strcpy_s(header, header_length - strlen(".hdr"), chunking_base_name_);
- strcat_s(header, header_length, ".hdr");
-#else
- strcpy(header, chunking_base_name_);
- strcat(header, ".hdr");
-#endif
+ memcpy(header, chunking_base_name_, filename_length);
+ memcpy(&header[filename_length], ".hdr", hdr_length);
+ header[filename_length + hdr_length] = '\0';
+
if (!chunk_writer_header_->Open(header)) {
delete[] header;
return false;
@@ -4022,18 +3998,16 @@ bool Segment::UpdateChunkName(const char* ext, char** name) const {
snprintf(ext_chk, sizeof(ext_chk), "_%06d.%s", chunk_count_, ext);
#endif
- const size_t length = strlen(chunking_base_name_) + strlen(ext_chk) + 1;
+ const size_t chunking_base_name_length = strlen(chunking_base_name_);
+ const size_t ext_chk_length = strlen(ext_chk);
+ const size_t length = chunking_base_name_length + ext_chk_length + 1;
char* const str = new (std::nothrow) char[length]; // NOLINT
if (!str)
return false;
-#ifdef _MSC_VER
- strcpy_s(str, length - strlen(ext_chk), chunking_base_name_);
- strcat_s(str, length, ext_chk);
-#else
- strcpy(str, chunking_base_name_);
- strcat(str, ext_chk);
-#endif
+ memcpy(str, chunking_base_name_, chunking_base_name_length);
+ memcpy(&str[chunking_base_name_length], ext_chk, ext_chk_length);
+ str[chunking_base_name_length + ext_chk_length] = '\0';
delete[] * name;
*name = str;
diff --git a/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.h b/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.h
index 8602d82325..2c4bb9e93e 100644
--- a/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.h
+++ b/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxer.h
@@ -1481,7 +1481,7 @@ class SegmentInfo {
uint64_t timecode_scale_;
// Initially set to libwebm-%d.%d.%d.%d, major, minor, build, revision.
char* writing_app_;
- // LLONG_MIN when DateUTC is not set.
+ // INT64_MIN when DateUTC is not set.
int64_t date_utc_;
// The file position of the duration element.
diff --git a/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc b/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc
index 300b155797..f538310e21 100644
--- a/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc
+++ b/third_party/aom/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc
@@ -607,22 +607,18 @@ uint64 WriteVoidElement(IMkvWriter* writer, uint64 size) {
void GetVersion(int32* major, int32* minor, int32* build, int32* revision) {
*major = 0;
*minor = 3;
- *build = 1;
+ *build = 3;
*revision = 0;
}
uint64 MakeUID(unsigned int* seed) {
uint64 uid = 0;
-#ifdef __MINGW32__
- srand(*seed);
-#endif
-
for (int i = 0; i < 7; ++i) { // avoid problems with 8-byte values
uid <<= 8;
// TODO(fgalligan): Move random number generation to platform specific code.
-#ifdef _MSC_VER
+#ifdef _WIN32
(void)seed;
const int32 nn = rand();
#elif __ANDROID__
@@ -634,8 +630,6 @@ uint64 MakeUID(unsigned int* seed) {
close(fd);
}
const int32 nn = temp_num;
-#elif defined __MINGW32__
- const int32 nn = rand();
#else
const int32 nn = rand_r(seed);
#endif
diff --git a/third_party/aom/third_party/libwebm/mkvparser/mkvparser.cc b/third_party/aom/third_party/libwebm/mkvparser/mkvparser.cc
index 868afcb3ed..eddbc7eb50 100644
--- a/third_party/aom/third_party/libwebm/mkvparser/mkvparser.cc
+++ b/third_party/aom/third_party/libwebm/mkvparser/mkvparser.cc
@@ -55,7 +55,7 @@ Type* SafeArrayAlloc(unsigned long long num_elements,
void GetVersion(int& major, int& minor, int& build, int& revision) {
major = 1;
minor = 1;
- build = 1;
+ build = 3;
revision = 0;
}
@@ -246,7 +246,8 @@ long UnserializeFloat(IMkvReader* pReader, long long pos, long long size_,
if (size == 4) {
union {
float f;
- unsigned long ff;
+ uint32_t ff;
+ static_assert(sizeof(float) == sizeof(uint32_t), "");
};
ff = 0;
@@ -264,7 +265,8 @@ long UnserializeFloat(IMkvReader* pReader, long long pos, long long size_,
} else {
union {
double d;
- unsigned long long dd;
+ uint64_t dd;
+ static_assert(sizeof(double) == sizeof(uint64_t), "");
};
dd = 0;
@@ -4569,7 +4571,8 @@ int Track::Info::CopyStr(char* Info::*str, Info& dst_) const {
if (dst == NULL)
return -1;
- strcpy(dst, src);
+ memcpy(dst, src, len);
+ dst[len] = '\0';
return 0;
}